Add RTC_ prefix to (D)CHECKs and related macros.

We must remove dependency on Chromium, i.e. we can't use Chromium's base/logging.h. That means we need to define these macros in WebRTC also when doing Chromium builds. And this causes redefinition.

Alternative solutions:
* Check if we already have defined e.g. CHECK, and don't define them in that case. This makes us depend on include order in Chromium, which is not acceptable.
* Don't allow using the macros in WebRTC headers. Error prone since if someone adds it there by mistake it may compile fine, but later break if a header in added or order is changed in Chromium. That will be confusing and hard to enforce.
* Ensure that headers that are included by an embedder don't include our macros. This would require some heavy refactoring to be maintainable and enforcable.
* Changes in Chromium for this is obviously not an option.

BUG=chromium:468375
NOTRY=true

Review URL: https://codereview.webrtc.org/1335923002

Cr-Commit-Position: refs/heads/master@{#9964}
diff --git a/talk/app/webrtc/androidvideocapturer.cc b/talk/app/webrtc/androidvideocapturer.cc
index 747dd43..0312cd3 100644
--- a/talk/app/webrtc/androidvideocapturer.cc
+++ b/talk/app/webrtc/androidvideocapturer.cc
@@ -82,7 +82,7 @@
       int dst_width,
       int dst_height) const override {
     // Check that captured_frame is actually our frame.
-    CHECK(captured_frame == &captured_frame_);
+    RTC_CHECK(captured_frame == &captured_frame_);
     rtc::scoped_ptr<cricket::VideoFrame> frame(new cricket::WebRtcVideoFrame(
         ShallowCenterCrop(buffer_, dst_width, dst_height),
         captured_frame->elapsed_time, captured_frame->time_stamp,
@@ -119,8 +119,9 @@
   std::vector<cricket::VideoFormat> formats;
   for (Json::ArrayIndex i = 0; i < json_values.size(); ++i) {
       const Json::Value& json_value = json_values[i];
-      CHECK(!json_value["width"].isNull() && !json_value["height"].isNull() &&
-             !json_value["framerate"].isNull());
+      RTC_CHECK(!json_value["width"].isNull() &&
+                !json_value["height"].isNull() &&
+                !json_value["framerate"].isNull());
       cricket::VideoFormat format(
           json_value["width"].asInt(),
           json_value["height"].asInt(),
@@ -134,13 +135,13 @@
 }
 
 AndroidVideoCapturer::~AndroidVideoCapturer() {
-  CHECK(!running_);
+  RTC_CHECK(!running_);
 }
 
 cricket::CaptureState AndroidVideoCapturer::Start(
     const cricket::VideoFormat& capture_format) {
-  CHECK(thread_checker_.CalledOnValidThread());
-  CHECK(!running_);
+  RTC_CHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(!running_);
   const int fps = cricket::VideoFormat::IntervalToFps(capture_format.interval);
   LOG(LS_INFO) << " AndroidVideoCapturer::Start " << capture_format.width << "x"
                << capture_format.height << "@" << fps;
@@ -157,8 +158,8 @@
 
 void AndroidVideoCapturer::Stop() {
   LOG(LS_INFO) << " AndroidVideoCapturer::Stop ";
-  CHECK(thread_checker_.CalledOnValidThread());
-  CHECK(running_);
+  RTC_CHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(running_);
   running_ = false;
   SetCaptureFormat(NULL);
 
@@ -168,18 +169,18 @@
 }
 
 bool AndroidVideoCapturer::IsRunning() {
-  CHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(thread_checker_.CalledOnValidThread());
   return running_;
 }
 
 bool AndroidVideoCapturer::GetPreferredFourccs(std::vector<uint32>* fourccs) {
-  CHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(thread_checker_.CalledOnValidThread());
   fourccs->push_back(cricket::FOURCC_YV12);
   return true;
 }
 
 void AndroidVideoCapturer::OnCapturerStarted(bool success) {
-  CHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(thread_checker_.CalledOnValidThread());
   cricket::CaptureState new_state =
       success ? cricket::CS_RUNNING : cricket::CS_FAILED;
   if (new_state == current_state_)
@@ -196,7 +197,7 @@
     rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer,
     int rotation,
     int64 time_stamp) {
-  CHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(thread_checker_.CalledOnValidThread());
   frame_factory_->UpdateCapturedFrame(buffer, rotation, time_stamp);
   SignalFrameCaptured(this, frame_factory_->GetCapturedFrame());
   frame_factory_->ClearCapturedFrame();
@@ -204,7 +205,7 @@
 
 void AndroidVideoCapturer::OnOutputFormatRequest(
     int width, int height, int fps) {
-  CHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(thread_checker_.CalledOnValidThread());
   const cricket::VideoFormat& current = video_adapter()->output_format();
   cricket::VideoFormat format(
       width, height, cricket::VideoFormat::FpsToInterval(fps), current.fourcc);
diff --git a/talk/app/webrtc/datachannelinterface.h b/talk/app/webrtc/datachannelinterface.h
index 90573eb..9d2cd44 100644
--- a/talk/app/webrtc/datachannelinterface.h
+++ b/talk/app/webrtc/datachannelinterface.h
@@ -120,7 +120,7 @@
       case kClosed:
         return "closed";
     }
-    CHECK(false) << "Unknown DataChannel state: " << state;
+    RTC_CHECK(false) << "Unknown DataChannel state: " << state;
     return "";
   }
 
diff --git a/talk/app/webrtc/dtlsidentitystore.cc b/talk/app/webrtc/dtlsidentitystore.cc
index fa330af..2758779 100644
--- a/talk/app/webrtc/dtlsidentitystore.cc
+++ b/talk/app/webrtc/dtlsidentitystore.cc
@@ -61,7 +61,7 @@
     store_->SignalDestroyed.connect(this, &WorkerTask::OnStoreDestroyed);
   }
 
-  virtual ~WorkerTask() { DCHECK(signaling_thread_->IsCurrent()); }
+  virtual ~WorkerTask() { RTC_DCHECK(signaling_thread_->IsCurrent()); }
 
  private:
   void GenerateIdentity_w() {
@@ -87,7 +87,7 @@
         signaling_thread_->Post(this, MSG_DESTROY, msg->pdata);
         break;
       case MSG_GENERATE_IDENTITY_RESULT:
-        DCHECK(signaling_thread_->IsCurrent());
+        RTC_DCHECK(signaling_thread_->IsCurrent());
         {
           rtc::scoped_ptr<IdentityResultMessageData> pdata(
               static_cast<IdentityResultMessageData*>(msg->pdata));
@@ -98,17 +98,17 @@
         }
         break;
       case MSG_DESTROY:
-        DCHECK(signaling_thread_->IsCurrent());
+        RTC_DCHECK(signaling_thread_->IsCurrent());
         delete msg->pdata;
         // |this| has now been deleted. Don't touch member variables.
         break;
       default:
-        CHECK(false) << "Unexpected message type";
+        RTC_CHECK(false) << "Unexpected message type";
     }
   }
 
   void OnStoreDestroyed() {
-    DCHECK(signaling_thread_->IsCurrent());
+    RTC_DCHECK(signaling_thread_->IsCurrent());
     store_ = nullptr;
   }
 
@@ -122,7 +122,7 @@
     : signaling_thread_(signaling_thread),
       worker_thread_(worker_thread),
       request_info_() {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   // Preemptively generate identities unless the worker thread and signaling
   // thread are the same (only do preemptive work in the background).
   if (worker_thread_ != signaling_thread_) {
@@ -132,21 +132,21 @@
 }
 
 DtlsIdentityStoreImpl::~DtlsIdentityStoreImpl() {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   SignalDestroyed();
 }
 
 void DtlsIdentityStoreImpl::RequestIdentity(
     rtc::KeyType key_type,
     const rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver>& observer) {
-  DCHECK(signaling_thread_->IsCurrent());
-  DCHECK(observer);
+  RTC_DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(observer);
 
   GenerateIdentity(key_type, observer);
 }
 
 void DtlsIdentityStoreImpl::OnMessage(rtc::Message* msg) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   switch (msg->message_id) {
     case MSG_GENERATE_IDENTITY_RESULT: {
       rtc::scoped_ptr<IdentityResultMessageData> pdata(
@@ -160,14 +160,14 @@
 
 bool DtlsIdentityStoreImpl::HasFreeIdentityForTesting(
     rtc::KeyType key_type) const {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   return request_info_[key_type].free_identity_.get() != nullptr;
 }
 
 void DtlsIdentityStoreImpl::GenerateIdentity(
     rtc::KeyType key_type,
     const rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver>& observer) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
 
   // Enqueue observer to be informed when generation of |key_type| is completed.
   if (observer.get()) {
@@ -205,9 +205,9 @@
 
 void DtlsIdentityStoreImpl::OnIdentityGenerated(
     rtc::KeyType key_type, rtc::scoped_ptr<rtc::SSLIdentity> identity) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
 
-  DCHECK(request_info_[key_type].gen_in_progress_counts_);
+  RTC_DCHECK(request_info_[key_type].gen_in_progress_counts_);
   --request_info_[key_type].gen_in_progress_counts_;
 
   rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver> observer;
@@ -218,7 +218,7 @@
 
   if (observer.get() == nullptr) {
     // No observer - store result in |free_identities_|.
-    DCHECK(!request_info_[key_type].free_identity_.get());
+    RTC_DCHECK(!request_info_[key_type].free_identity_.get());
     request_info_[key_type].free_identity_.swap(identity);
     if (request_info_[key_type].free_identity_.get())
       LOG(LS_VERBOSE) << "A free DTLS identity was saved.";
diff --git a/talk/app/webrtc/dtlsidentitystore_unittest.cc b/talk/app/webrtc/dtlsidentitystore_unittest.cc
index 3e21a47..e924221 100644
--- a/talk/app/webrtc/dtlsidentitystore_unittest.cc
+++ b/talk/app/webrtc/dtlsidentitystore_unittest.cc
@@ -83,7 +83,7 @@
                                          worker_thread_.get())),
         observer_(
             new rtc::RefCountedObject<MockDtlsIdentityRequestObserver>()) {
-    CHECK(worker_thread_->Start());
+    RTC_CHECK(worker_thread_->Start());
   }
   ~DtlsIdentityStoreTest() {}
 
diff --git a/talk/app/webrtc/fakemetricsobserver.cc b/talk/app/webrtc/fakemetricsobserver.cc
index c275311..9c300cc 100644
--- a/talk/app/webrtc/fakemetricsobserver.cc
+++ b/talk/app/webrtc/fakemetricsobserver.cc
@@ -35,7 +35,7 @@
 }
 
 void FakeMetricsObserver::Reset() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   counters_.clear();
   memset(int_histogram_samples_, 0, sizeof(int_histogram_samples_));
   for (std::string& type : string_histogram_samples_) {
@@ -47,7 +47,7 @@
     PeerConnectionEnumCounterType type,
     int counter,
     int counter_max) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (counters_.size() <= static_cast<size_t>(type)) {
     counters_.resize(type + 1);
   }
@@ -60,34 +60,34 @@
 
 void FakeMetricsObserver::AddHistogramSample(PeerConnectionMetricsName type,
     int value) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK_EQ(int_histogram_samples_[type], 0);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK_EQ(int_histogram_samples_[type], 0);
   int_histogram_samples_[type] = value;
 }
 
 void FakeMetricsObserver::AddHistogramSample(PeerConnectionMetricsName type,
     const std::string& value) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   string_histogram_samples_[type].assign(value);
 }
 
 int FakeMetricsObserver::GetEnumCounter(PeerConnectionEnumCounterType type,
                                         int counter) const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  CHECK(counters_.size() > static_cast<size_t>(type) &&
-        counters_[type].size() > static_cast<size_t>(counter));
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(counters_.size() > static_cast<size_t>(type) &&
+            counters_[type].size() > static_cast<size_t>(counter));
   return counters_[type][counter];
 }
 
 int FakeMetricsObserver::GetIntHistogramSample(
     PeerConnectionMetricsName type) const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return int_histogram_samples_[type];
 }
 
 const std::string& FakeMetricsObserver::GetStringHistogramSample(
     PeerConnectionMetricsName type) const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return string_histogram_samples_[type];
 }
 
diff --git a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
index a6f7da3..a67dd50 100644
--- a/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediadecoder_jni.cc
@@ -183,7 +183,7 @@
                               "()V"))) {
   ScopedLocalRefFrame local_ref_frame(jni);
   codec_thread_->SetName("MediaCodecVideoDecoder", NULL);
-  CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoDecoder";
+  RTC_CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoDecoder";
 
   j_init_decode_method_ = GetMethodID(
       jni, *j_media_codec_video_decoder_class_, "initDecode",
@@ -262,8 +262,8 @@
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
   // Factory should guard against other codecs being used with us.
-  CHECK(inst->codecType == codecType_) << "Unsupported codec " <<
-      inst->codecType << " for " << codecType_;
+  RTC_CHECK(inst->codecType == codecType_)
+      << "Unsupported codec " << inst->codecType << " for " << codecType_;
 
   if (sw_fallback_required_) {
     ALOGE("InitDecode() - fallback to SW decoder");
@@ -394,7 +394,7 @@
 }
 
 void MediaCodecVideoDecoder::CheckOnCodecThread() {
-  CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
+  RTC_CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
       << "Running on wrong thread!";
 }
 
@@ -514,7 +514,7 @@
   jobject j_input_buffer = input_buffers_[j_input_buffer_index];
   uint8* buffer =
       reinterpret_cast<uint8*>(jni->GetDirectBufferAddress(j_input_buffer));
-  CHECK(buffer) << "Indirect buffer??";
+  RTC_CHECK(buffer) << "Indirect buffer??";
   int64 buffer_capacity = jni->GetDirectBufferCapacity(j_input_buffer);
   if (CheckException(jni) || buffer_capacity < inputImage._length) {
     ALOGE("Input frame size %d is bigger than buffer size %d.",
@@ -731,8 +731,8 @@
   }
   // We only ever send one message to |this| directly (not through a Bind()'d
   // functor), so expect no ID/data.
-  CHECK(!msg->message_id) << "Unexpected message!";
-  CHECK(!msg->pdata) << "Unexpected message!";
+  RTC_CHECK(!msg->message_id) << "Unexpected message!";
+  RTC_CHECK(!msg->pdata) << "Unexpected message!";
   CheckOnCodecThread();
 
   if (!DeliverPendingOutputs(jni, 0)) {
diff --git a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
index 8c00bc3..bd94562 100644
--- a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
@@ -236,7 +236,7 @@
   // in the bug, we have a problem.  For now work around that with a dedicated
   // thread.
   codec_thread_->SetName("MediaCodecVideoEncoder", NULL);
-  CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoEncoder";
+  RTC_CHECK(codec_thread_->Start()) << "Failed to start MediaCodecVideoEncoder";
 
   jclass j_output_buffer_info_class =
       FindClass(jni, "org/webrtc/MediaCodecVideoEncoder$OutputBufferInfo");
@@ -292,8 +292,9 @@
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
   // Factory should guard against other codecs being used with us.
-  CHECK(codec_settings->codecType == codecType_) << "Unsupported codec " <<
-      codec_settings->codecType << " for " << codecType_;
+  RTC_CHECK(codec_settings->codecType == codecType_)
+      << "Unsupported codec " << codec_settings->codecType << " for "
+      << codecType_;
 
   ALOGD("InitEncode request");
   scale_ = false;
@@ -359,8 +360,8 @@
 
   // We only ever send one message to |this| directly (not through a Bind()'d
   // functor), so expect no ID/data.
-  CHECK(!msg->message_id) << "Unexpected message!";
-  CHECK(!msg->pdata) << "Unexpected message!";
+  RTC_CHECK(!msg->message_id) << "Unexpected message!";
+  RTC_CHECK(!msg->pdata) << "Unexpected message!";
   CheckOnCodecThread();
   if (!inited_) {
     return;
@@ -374,7 +375,7 @@
 }
 
 void MediaCodecVideoEncoder::CheckOnCodecThread() {
-  CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
+  RTC_CHECK(codec_thread_ == ThreadManager::Instance()->CurrentThread())
       << "Running on wrong thread!";
 }
 
@@ -460,7 +461,7 @@
       return WEBRTC_VIDEO_CODEC_ERROR;
   }
   size_t num_input_buffers = jni->GetArrayLength(input_buffers);
-  CHECK(input_buffers_.empty())
+  RTC_CHECK(input_buffers_.empty())
       << "Unexpected double InitEncode without Release";
   input_buffers_.resize(num_input_buffers);
   for (size_t i = 0; i < num_input_buffers; ++i) {
@@ -469,7 +470,7 @@
     int64 yuv_buffer_capacity =
         jni->GetDirectBufferCapacity(input_buffers_[i]);
     CHECK_EXCEPTION(jni);
-    CHECK(yuv_buffer_capacity >= yuv_size_) << "Insufficient capacity";
+    RTC_CHECK(yuv_buffer_capacity >= yuv_size_) << "Insufficient capacity";
   }
   CHECK_EXCEPTION(jni);
 
@@ -499,7 +500,7 @@
     return WEBRTC_VIDEO_CODEC_OK;
   }
 
-  CHECK(frame_types->size() == 1) << "Unexpected stream count";
+  RTC_CHECK(frame_types->size() == 1) << "Unexpected stream count";
   // Check framerate before spatial resolution change.
   if (scale_ && codecType_ == kVideoCodecVP8) {
     quality_scaler_->OnEncodeFrame(frame);
@@ -555,17 +556,12 @@
   uint8* yuv_buffer =
       reinterpret_cast<uint8*>(jni->GetDirectBufferAddress(j_input_buffer));
   CHECK_EXCEPTION(jni);
-  CHECK(yuv_buffer) << "Indirect buffer??";
-  CHECK(!libyuv::ConvertFromI420(
-          input_frame.buffer(webrtc::kYPlane),
-          input_frame.stride(webrtc::kYPlane),
-          input_frame.buffer(webrtc::kUPlane),
-          input_frame.stride(webrtc::kUPlane),
-          input_frame.buffer(webrtc::kVPlane),
-          input_frame.stride(webrtc::kVPlane),
-          yuv_buffer, width_,
-          width_, height_,
-          encoder_fourcc_))
+  RTC_CHECK(yuv_buffer) << "Indirect buffer??";
+  RTC_CHECK(!libyuv::ConvertFromI420(
+      input_frame.buffer(webrtc::kYPlane), input_frame.stride(webrtc::kYPlane),
+      input_frame.buffer(webrtc::kUPlane), input_frame.stride(webrtc::kUPlane),
+      input_frame.buffer(webrtc::kVPlane), input_frame.stride(webrtc::kVPlane),
+      yuv_buffer, width_, width_, height_, encoder_fourcc_))
       << "ConvertFromI420 failed";
   last_input_timestamp_ms_ = current_timestamp_us_ / 1000;
   frames_in_queue_++;
diff --git a/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc b/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
index 43a60c3..69c350a 100644
--- a/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
+++ b/talk/app/webrtc/java/jni/androidvideocapturer_jni.cc
@@ -93,11 +93,11 @@
 void AndroidVideoCapturerJni::Start(int width, int height, int framerate,
                                     webrtc::AndroidVideoCapturer* capturer) {
   LOG(LS_INFO) << "AndroidVideoCapturerJni start";
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   {
     rtc::CritScope cs(&capturer_lock_);
-    CHECK(capturer_ == nullptr);
-    CHECK(invoker_.get() == nullptr);
+    RTC_CHECK(capturer_ == nullptr);
+    RTC_CHECK(invoker_.get() == nullptr);
     capturer_ = capturer;
     invoker_.reset(new rtc::GuardedAsyncInvoker());
   }
@@ -121,7 +121,7 @@
 
 void AndroidVideoCapturerJni::Stop() {
   LOG(LS_INFO) << "AndroidVideoCapturerJni stop";
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   {
     rtc::CritScope cs(&capturer_lock_);
     // Destroying |invoker_| will cancel all pending calls to |capturer_|.
@@ -220,7 +220,8 @@
   // that the memory is valid when we have released |j_frame|.
   // TODO(magjed): Move ReleaseByteArrayElements() into ReturnBuffer() and
   // remove this check.
-  CHECK(!is_copy) << "NativeObserver_nativeOnFrameCaptured: frame is a copy";
+  RTC_CHECK(!is_copy)
+      << "NativeObserver_nativeOnFrameCaptured: frame is a copy";
   reinterpret_cast<AndroidVideoCapturerJni*>(j_capturer)
       ->OnIncomingFrame(bytes, length, width, height, rotation, ts);
   jni->ReleaseByteArrayElements(j_frame, bytes, JNI_ABORT);
diff --git a/talk/app/webrtc/java/jni/classreferenceholder.cc b/talk/app/webrtc/java/jni/classreferenceholder.cc
index fd37838..0ac7e5e 100644
--- a/talk/app/webrtc/java/jni/classreferenceholder.cc
+++ b/talk/app/webrtc/java/jni/classreferenceholder.cc
@@ -51,7 +51,7 @@
 static ClassReferenceHolder* g_class_reference_holder = nullptr;
 
 void LoadGlobalClassReferenceHolder() {
-  CHECK(g_class_reference_holder == nullptr);
+  RTC_CHECK(g_class_reference_holder == nullptr);
   g_class_reference_holder = new ClassReferenceHolder(GetEnv());
 }
 
@@ -114,7 +114,7 @@
 }
 
 ClassReferenceHolder::~ClassReferenceHolder() {
-  CHECK(classes_.empty()) << "Must call FreeReferences() before dtor!";
+  RTC_CHECK(classes_.empty()) << "Must call FreeReferences() before dtor!";
 }
 
 void ClassReferenceHolder::FreeReferences(JNIEnv* jni) {
@@ -127,19 +127,19 @@
 
 jclass ClassReferenceHolder::GetClass(const std::string& name) {
   std::map<std::string, jclass>::iterator it = classes_.find(name);
-  CHECK(it != classes_.end()) << "Unexpected GetClass() call for: " << name;
+  RTC_CHECK(it != classes_.end()) << "Unexpected GetClass() call for: " << name;
   return it->second;
 }
 
 void ClassReferenceHolder::LoadClass(JNIEnv* jni, const std::string& name) {
   jclass localRef = jni->FindClass(name.c_str());
   CHECK_EXCEPTION(jni) << "error during FindClass: " << name;
-  CHECK(localRef) << name;
+  RTC_CHECK(localRef) << name;
   jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
   CHECK_EXCEPTION(jni) << "error during NewGlobalRef: " << name;
-  CHECK(globalRef) << name;
+  RTC_CHECK(globalRef) << name;
   bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
-  CHECK(inserted) << "Duplicate class name: " << name;
+  RTC_CHECK(inserted) << "Duplicate class name: " << name;
 }
 
 // Returns a global reference guaranteed to be valid for the lifetime of the
diff --git a/talk/app/webrtc/java/jni/jni_helpers.cc b/talk/app/webrtc/java/jni/jni_helpers.cc
index ecad5df..755698e 100644
--- a/talk/app/webrtc/java/jni/jni_helpers.cc
+++ b/talk/app/webrtc/java/jni/jni_helpers.cc
@@ -49,7 +49,7 @@
 using icu::UnicodeString;
 
 JavaVM *GetJVM() {
-  CHECK(g_jvm) << "JNI_OnLoad failed to run?";
+  RTC_CHECK(g_jvm) << "JNI_OnLoad failed to run?";
   return g_jvm;
 }
 
@@ -57,8 +57,8 @@
 JNIEnv* GetEnv() {
   void* env = NULL;
   jint status = g_jvm->GetEnv(&env, JNI_VERSION_1_6);
-  CHECK(((env != NULL) && (status == JNI_OK)) ||
-        ((env == NULL) && (status == JNI_EDETACHED)))
+  RTC_CHECK(((env != NULL) && (status == JNI_OK)) ||
+            ((env == NULL) && (status == JNI_EDETACHED)))
       << "Unexpected GetEnv return: " << status << ":" << env;
   return reinterpret_cast<JNIEnv*>(env);
 }
@@ -74,24 +74,24 @@
   if (!GetEnv())
     return;
 
-  CHECK(GetEnv() == prev_jni_ptr)
+  RTC_CHECK(GetEnv() == prev_jni_ptr)
       << "Detaching from another thread: " << prev_jni_ptr << ":" << GetEnv();
   jint status = g_jvm->DetachCurrentThread();
-  CHECK(status == JNI_OK) << "Failed to detach thread: " << status;
-  CHECK(!GetEnv()) << "Detaching was a successful no-op???";
+  RTC_CHECK(status == JNI_OK) << "Failed to detach thread: " << status;
+  RTC_CHECK(!GetEnv()) << "Detaching was a successful no-op???";
 }
 
 static void CreateJNIPtrKey() {
-  CHECK(!pthread_key_create(&g_jni_ptr, &ThreadDestructor))
+  RTC_CHECK(!pthread_key_create(&g_jni_ptr, &ThreadDestructor))
       << "pthread_key_create";
 }
 
 jint InitGlobalJniVariables(JavaVM *jvm) {
-  CHECK(!g_jvm) << "InitGlobalJniVariables!";
+  RTC_CHECK(!g_jvm) << "InitGlobalJniVariables!";
   g_jvm = jvm;
-  CHECK(g_jvm) << "InitGlobalJniVariables handed NULL?";
+  RTC_CHECK(g_jvm) << "InitGlobalJniVariables handed NULL?";
 
-  CHECK(!pthread_once(&g_jni_ptr_once, &CreateJNIPtrKey)) << "pthread_once";
+  RTC_CHECK(!pthread_once(&g_jni_ptr_once, &CreateJNIPtrKey)) << "pthread_once";
 
   JNIEnv* jni = nullptr;
   if (jvm->GetEnv(reinterpret_cast<void**>(&jni), JNI_VERSION_1_6) != JNI_OK)
@@ -103,9 +103,9 @@
 // Return thread ID as a string.
 static std::string GetThreadId() {
   char buf[21];  // Big enough to hold a kuint64max plus terminating NULL.
-  CHECK_LT(snprintf(buf, sizeof(buf), "%ld",
-           static_cast<long>(syscall(__NR_gettid))),
-           sizeof(buf))
+  RTC_CHECK_LT(snprintf(buf, sizeof(buf), "%ld",
+                        static_cast<long>(syscall(__NR_gettid))),
+               sizeof(buf))
       << "Thread id is bigger than uint64??";
   return std::string(buf);
 }
@@ -123,7 +123,7 @@
   JNIEnv* jni = GetEnv();
   if (jni)
     return jni;
-  CHECK(!pthread_getspecific(g_jni_ptr))
+  RTC_CHECK(!pthread_getspecific(g_jni_ptr))
       << "TLS has a JNIEnv* but not attached?";
 
   std::string name(GetThreadName() + " - " + GetThreadId());
@@ -137,10 +137,11 @@
 #else
   JNIEnv* env = NULL;
 #endif
-  CHECK(!g_jvm->AttachCurrentThread(&env, &args)) << "Failed to attach thread";
-  CHECK(env) << "AttachCurrentThread handed back NULL!";
+  RTC_CHECK(!g_jvm->AttachCurrentThread(&env, &args))
+      << "Failed to attach thread";
+  RTC_CHECK(env) << "AttachCurrentThread handed back NULL!";
   jni = reinterpret_cast<JNIEnv*>(env);
-  CHECK(!pthread_setspecific(g_jni_ptr, jni)) << "pthread_setspecific";
+  RTC_CHECK(!pthread_setspecific(g_jni_ptr, jni)) << "pthread_setspecific";
   return jni;
 }
 
@@ -154,18 +155,18 @@
   // conversion from pointer to integral type.  intptr_t to jlong is a standard
   // widening by the static_assert above.
   jlong ret = reinterpret_cast<intptr_t>(ptr);
-  DCHECK(reinterpret_cast<void*>(ret) == ptr);
+  RTC_DCHECK(reinterpret_cast<void*>(ret) == ptr);
   return ret;
 }
 
-// JNIEnv-helper methods that CHECK success: no Java exception thrown and found
-// object/class/method/field is non-null.
+// JNIEnv-helper methods that RTC_CHECK success: no Java exception thrown and
+// found object/class/method/field is non-null.
 jmethodID GetMethodID(
     JNIEnv* jni, jclass c, const std::string& name, const char* signature) {
   jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
   CHECK_EXCEPTION(jni) << "error during GetMethodID: " << name << ", "
                        << signature;
-  CHECK(m) << name << ", " << signature;
+  RTC_CHECK(m) << name << ", " << signature;
   return m;
 }
 
@@ -174,7 +175,7 @@
   jmethodID m = jni->GetStaticMethodID(c, name, signature);
   CHECK_EXCEPTION(jni) << "error during GetStaticMethodID: " << name << ", "
                        << signature;
-  CHECK(m) << name << ", " << signature;
+  RTC_CHECK(m) << name << ", " << signature;
   return m;
 }
 
@@ -182,21 +183,21 @@
     JNIEnv* jni, jclass c, const char* name, const char* signature) {
   jfieldID f = jni->GetFieldID(c, name, signature);
   CHECK_EXCEPTION(jni) << "error during GetFieldID";
-  CHECK(f) << name << ", " << signature;
+  RTC_CHECK(f) << name << ", " << signature;
   return f;
 }
 
 jclass GetObjectClass(JNIEnv* jni, jobject object) {
   jclass c = jni->GetObjectClass(object);
   CHECK_EXCEPTION(jni) << "error during GetObjectClass";
-  CHECK(c) << "GetObjectClass returned NULL";
+  RTC_CHECK(c) << "GetObjectClass returned NULL";
   return c;
 }
 
 jobject GetObjectField(JNIEnv* jni, jobject object, jfieldID id) {
   jobject o = jni->GetObjectField(object, id);
   CHECK_EXCEPTION(jni) << "error during GetObjectField";
-  CHECK(o) << "GetObjectField returned NULL";
+  RTC_CHECK(o) << "GetObjectField returned NULL";
   return o;
 }
 
@@ -265,7 +266,7 @@
 jobject NewGlobalRef(JNIEnv* jni, jobject o) {
   jobject ret = jni->NewGlobalRef(o);
   CHECK_EXCEPTION(jni) << "error during NewGlobalRef";
-  CHECK(ret);
+  RTC_CHECK(ret);
   return ret;
 }
 
@@ -278,7 +279,7 @@
 // callbacks (i.e. entry points that don't originate in a Java callstack
 // through a "native" method call).
 ScopedLocalRefFrame::ScopedLocalRefFrame(JNIEnv* jni) : jni_(jni) {
-  CHECK(!jni_->PushLocalFrame(0)) << "Failed to PushLocalFrame";
+  RTC_CHECK(!jni_->PushLocalFrame(0)) << "Failed to PushLocalFrame";
 }
 ScopedLocalRefFrame::~ScopedLocalRefFrame() {
   jni_->PopLocalFrame(NULL);
diff --git a/talk/app/webrtc/java/jni/jni_helpers.h b/talk/app/webrtc/java/jni/jni_helpers.h
index dde7137..7072ee8 100644
--- a/talk/app/webrtc/java/jni/jni_helpers.h
+++ b/talk/app/webrtc/java/jni/jni_helpers.h
@@ -41,14 +41,14 @@
 // This macros uses the comma operator to execute ExceptionDescribe
 // and ExceptionClear ignoring their return values and sending ""
 // to the error stream.
-#define CHECK_EXCEPTION(jni)    \
-  CHECK(!jni->ExceptionCheck()) \
+#define CHECK_EXCEPTION(jni)        \
+  RTC_CHECK(!jni->ExceptionCheck()) \
       << (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
 
 // Helper that calls ptr->Release() and aborts the process with a useful
 // message if that didn't actually delete *ptr because of extra refcounts.
 #define CHECK_RELEASE(ptr) \
-  CHECK_EQ(0, (ptr)->Release()) << "Unexpected refcount."
+  RTC_CHECK_EQ(0, (ptr)->Release()) << "Unexpected refcount."
 
 namespace webrtc_jni {
 
@@ -67,8 +67,8 @@
 // function expecting a 64-bit param) picks up garbage in the high 32 bits.
 jlong jlongFromPointer(void* ptr);
 
-// JNIEnv-helper methods that CHECK success: no Java exception thrown and found
-// object/class/method/field is non-null.
+// JNIEnv-helper methods that RTC_CHECK success: no Java exception thrown and
+// found object/class/method/field is non-null.
 jmethodID GetMethodID(
     JNIEnv* jni, jclass c, const std::string& name, const char* signature);
 
diff --git a/talk/app/webrtc/java/jni/native_handle_impl.h b/talk/app/webrtc/java/jni/native_handle_impl.h
index cdb72ff..68b213b 100644
--- a/talk/app/webrtc/java/jni/native_handle_impl.h
+++ b/talk/app/webrtc/java/jni/native_handle_impl.h
@@ -66,7 +66,7 @@
  private:
   rtc::scoped_refptr<VideoFrameBuffer> NativeToI420Buffer() override {
     // TODO(pbos): Implement before using this in the encoder pipeline (or
-    // remove the CHECK() in VideoCapture).
+    // remove the RTC_CHECK() in VideoCapture).
     RTC_NOTREACHED();
     return nullptr;
   }
diff --git a/talk/app/webrtc/java/jni/peerconnection_jni.cc b/talk/app/webrtc/java/jni/peerconnection_jni.cc
index 35406f5..5761d86 100644
--- a/talk/app/webrtc/java/jni/peerconnection_jni.cc
+++ b/talk/app/webrtc/java/jni/peerconnection_jni.cc
@@ -140,7 +140,7 @@
   if (ret < 0)
     return -1;
 
-  CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
+  RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
   LoadGlobalClassReferenceHolder();
 
   return ret;
@@ -148,7 +148,7 @@
 
 extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM *jvm, void *reserved) {
   FreeGlobalClassReferenceHolder();
-  CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
+  RTC_CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
 }
 
 // Return the (singleton) Java Enum object corresponding to |index|;
@@ -219,7 +219,7 @@
   void OnIceCandidate(const IceCandidateInterface* candidate) override {
     ScopedLocalRefFrame local_ref_frame(jni());
     std::string sdp;
-    CHECK(candidate->ToString(&sdp)) << "got so far: " << sdp;
+    RTC_CHECK(candidate->ToString(&sdp)) << "got so far: " << sdp;
     jclass candidate_class = FindClass(jni(), "org/webrtc/IceCandidate");
     jmethodID ctor = GetMethodID(jni(), candidate_class,
         "<init>", "(Ljava/lang/String;ILjava/lang/String;)V");
@@ -308,7 +308,7 @@
                                   "(Ljava/lang/Object;)Z");
       jboolean added = jni()->CallBooleanMethod(audio_tracks, add, j_track);
       CHECK_EXCEPTION(jni()) << "error during CallBooleanMethod";
-      CHECK(added);
+      RTC_CHECK(added);
     }
 
     for (const auto& track : stream->GetVideoTracks()) {
@@ -331,7 +331,7 @@
                                   "(Ljava/lang/Object;)Z");
       jboolean added = jni()->CallBooleanMethod(video_tracks, add, j_track);
       CHECK_EXCEPTION(jni()) << "error during CallBooleanMethod";
-      CHECK(added);
+      RTC_CHECK(added);
     }
     remote_streams_[stream] = NewGlobalRef(jni(), j_stream);
 
@@ -344,8 +344,8 @@
   void OnRemoveStream(MediaStreamInterface* stream) override {
     ScopedLocalRefFrame local_ref_frame(jni());
     NativeToJavaStreamsMap::iterator it = remote_streams_.find(stream);
-    CHECK(it != remote_streams_.end()) << "unexpected stream: " << std::hex
-                                       << stream;
+    RTC_CHECK(it != remote_streams_.end()) << "unexpected stream: " << std::hex
+                                           << stream;
     jobject j_stream = it->second;
     jmethodID m = GetMethodID(jni(), *j_observer_class_, "onRemoveStream",
                               "(Lorg/webrtc/MediaStream;)V");
@@ -369,7 +369,7 @@
     // CallVoidMethod above as Java code might call back into native code and be
     // surprised to see a refcount of 2.
     int bumped_count = channel->AddRef();
-    CHECK(bumped_count == 2) << "Unexpected refcount OnDataChannel";
+    RTC_CHECK(bumped_count == 2) << "Unexpected refcount OnDataChannel";
 
     CHECK_EXCEPTION(jni()) << "error during CallVoidMethod";
   }
@@ -383,7 +383,7 @@
   }
 
   void SetConstraints(ConstraintsWrapper* constraints) {
-    CHECK(!constraints_.get()) << "constraints already set!";
+    RTC_CHECK(!constraints_.get()) << "constraints already set!";
     constraints_.reset(constraints);
   }
 
@@ -482,7 +482,7 @@
 static jobject JavaSdpFromNativeSdp(
     JNIEnv* jni, const SessionDescriptionInterface* desc) {
   std::string sdp;
-  CHECK(desc->ToString(&sdp)) << "got so far: " << sdp;
+  RTC_CHECK(desc->ToString(&sdp)) << "got so far: " << sdp;
   jstring j_description = JavaStringFromStdString(jni, sdp);
 
   jclass j_type_class = FindClass(
@@ -871,7 +871,7 @@
 
 JOW(jlong, DataChannel_bufferedAmount)(JNIEnv* jni, jobject j_dc) {
   uint64 buffered_amount = ExtractNativeDC(jni, j_dc)->buffered_amount();
-  CHECK_LE(buffered_amount, std::numeric_limits<int64>::max())
+  RTC_CHECK_LE(buffered_amount, std::numeric_limits<int64>::max())
       << "buffered_amount overflowed jlong!";
   return static_cast<jlong>(buffered_amount);
 }
@@ -903,7 +903,7 @@
 #if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
     if (path != "logcat:") {
 #endif
-      CHECK_EQ(0, webrtc::Trace::SetTraceFile(path.c_str(), false))
+      RTC_CHECK_EQ(0, webrtc::Trace::SetTraceFile(path.c_str(), false))
           << "SetTraceFile failed";
 #if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
     } else {
@@ -1087,7 +1087,7 @@
   worker_thread->SetName("worker_thread", NULL);
   Thread* signaling_thread = new Thread();
   signaling_thread->SetName("signaling_thread", NULL);
-  CHECK(worker_thread->Start() && signaling_thread->Start())
+  RTC_CHECK(worker_thread->Start() && signaling_thread->Start())
       << "Failed to start threads";
   WebRtcVideoEncoderFactory* encoder_factory = nullptr;
   WebRtcVideoDecoderFactory* decoder_factory = nullptr;
@@ -1251,7 +1251,7 @@
   if (enum_name == "NONE")
     return PeerConnectionInterface::kNone;
 
-  CHECK(false) << "Unexpected IceTransportsType enum_name " << enum_name;
+  RTC_CHECK(false) << "Unexpected IceTransportsType enum_name " << enum_name;
   return PeerConnectionInterface::kAll;
 }
 
@@ -1270,7 +1270,7 @@
   if (enum_name == "MAXCOMPAT")
     return PeerConnectionInterface::kBundlePolicyMaxCompat;
 
-  CHECK(false) << "Unexpected BundlePolicy enum_name " << enum_name;
+  RTC_CHECK(false) << "Unexpected BundlePolicy enum_name " << enum_name;
   return PeerConnectionInterface::kBundlePolicyBalanced;
 }
 
@@ -1286,7 +1286,7 @@
   if (enum_name == "REQUIRE")
     return PeerConnectionInterface::kRtcpMuxPolicyRequire;
 
-  CHECK(false) << "Unexpected RtcpMuxPolicy enum_name " << enum_name;
+  RTC_CHECK(false) << "Unexpected RtcpMuxPolicy enum_name " << enum_name;
   return PeerConnectionInterface::kRtcpMuxPolicyNegotiate;
 }
 
@@ -1303,7 +1303,7 @@
   if (enum_name == "DISABLED")
     return PeerConnectionInterface::kTcpCandidatePolicyDisabled;
 
-  CHECK(false) << "Unexpected TcpCandidatePolicy enum_name " << enum_name;
+  RTC_CHECK(false) << "Unexpected TcpCandidatePolicy enum_name " << enum_name;
   return PeerConnectionInterface::kTcpCandidatePolicyEnabled;
 }
 
@@ -1316,7 +1316,7 @@
   if (enum_name == "ECDSA")
     return rtc::KT_ECDSA;
 
-  CHECK(false) << "Unexpected KeyType enum_name " << enum_name;
+  RTC_CHECK(false) << "Unexpected KeyType enum_name " << enum_name;
   return rtc::KT_ECDSA;
 }
 
@@ -1477,7 +1477,7 @@
   // vararg parameter as 64-bit and reading memory that doesn't belong to the
   // 32-bit parameter.
   jlong nativeChannelPtr = jlongFromPointer(channel.get());
-  CHECK(nativeChannelPtr) << "Failed to create DataChannel";
+  RTC_CHECK(nativeChannelPtr) << "Failed to create DataChannel";
   jclass j_data_channel_class = FindClass(jni, "org/webrtc/DataChannel");
   jmethodID j_data_channel_ctor = GetMethodID(
       jni, j_data_channel_class, "<init>", "(J)V");
@@ -1486,7 +1486,7 @@
   CHECK_EXCEPTION(jni) << "error during NewObject";
   // Channel is now owned by Java object, and will be freed from there.
   int bumped_count = channel->AddRef();
-  CHECK(bumped_count == 2) << "Unexpected refcount";
+  RTC_CHECK(bumped_count == 2) << "Unexpected refcount";
   return j_channel;
 }
 
@@ -1648,7 +1648,7 @@
   std::string device_name = JavaToStdString(jni, j_device_name);
   scoped_ptr<cricket::DeviceManagerInterface> device_manager(
       cricket::DeviceManagerFactory::Create());
-  CHECK(device_manager->Init()) << "DeviceManager::Init() failed";
+  RTC_CHECK(device_manager->Init()) << "DeviceManager::Init() failed";
   cricket::Device device;
   if (!device_manager->GetVideoCaptureDevice(device_name, &device)) {
     LOG(LS_ERROR) << "GetVideoCaptureDevice failed for " << device_name;
@@ -1695,11 +1695,11 @@
     jint src_stride, jobject j_dst_buffer, jint dst_stride) {
   size_t src_size = jni->GetDirectBufferCapacity(j_src_buffer);
   size_t dst_size = jni->GetDirectBufferCapacity(j_dst_buffer);
-  CHECK(src_stride >= width) << "Wrong source stride " << src_stride;
-  CHECK(dst_stride >= width) << "Wrong destination stride " << dst_stride;
-  CHECK(src_size >= src_stride * height)
+  RTC_CHECK(src_stride >= width) << "Wrong source stride " << src_stride;
+  RTC_CHECK(dst_stride >= width) << "Wrong destination stride " << dst_stride;
+  RTC_CHECK(src_size >= src_stride * height)
       << "Insufficient source buffer capacity " << src_size;
-  CHECK(dst_size >= dst_stride * height)
+  RTC_CHECK(dst_size >= dst_stride * height)
       << "Isufficient destination buffer capacity " << dst_size;
   uint8_t *src =
       reinterpret_cast<uint8_t*>(jni->GetDirectBufferAddress(j_src_buffer));
diff --git a/talk/app/webrtc/mediacontroller.cc b/talk/app/webrtc/mediacontroller.cc
index ff21314..28b007e 100644
--- a/talk/app/webrtc/mediacontroller.cc
+++ b/talk/app/webrtc/mediacontroller.cc
@@ -42,7 +42,7 @@
   MediaController(rtc::Thread* worker_thread,
                   webrtc::VoiceEngine* voice_engine)
       : worker_thread_(worker_thread) {
-    DCHECK(nullptr != worker_thread);
+    RTC_DCHECK(nullptr != worker_thread);
     worker_thread_->Invoke<void>(
         rtc::Bind(&MediaController::Construct_w, this, voice_engine));
   }
@@ -52,13 +52,13 @@
   }
 
   webrtc::Call* call_w() override {
-    DCHECK(worker_thread_->IsCurrent());
+    RTC_DCHECK(worker_thread_->IsCurrent());
     return call_.get();
   }
 
  private:
   void Construct_w(webrtc::VoiceEngine* voice_engine)  {
-    DCHECK(worker_thread_->IsCurrent());
+    RTC_DCHECK(worker_thread_->IsCurrent());
     webrtc::Call::Config config;
     config.voice_engine = voice_engine;
     config.bitrate_config.min_bitrate_bps = kMinBandwidthBps;
@@ -67,7 +67,7 @@
     call_.reset(webrtc::Call::Create(config));
   }
   void Destruct_w() {
-    DCHECK(worker_thread_->IsCurrent());
+    RTC_DCHECK(worker_thread_->IsCurrent());
     call_.reset(nullptr);
   }
 
diff --git a/talk/app/webrtc/objc/RTCFileLogger.mm b/talk/app/webrtc/objc/RTCFileLogger.mm
index 3080ebc..c4e4696 100644
--- a/talk/app/webrtc/objc/RTCFileLogger.mm
+++ b/talk/app/webrtc/objc/RTCFileLogger.mm
@@ -109,7 +109,7 @@
   if (!_hasStarted) {
     return;
   }
-  DCHECK(_logSink);
+  RTC_DCHECK(_logSink);
   rtc::LogMessage::RemoveLogToStream(_logSink.get());
   _hasStarted = NO;
   _logSink.reset();
diff --git a/talk/app/webrtc/objc/avfoundationvideocapturer.mm b/talk/app/webrtc/objc/avfoundationvideocapturer.mm
index d68fdff..c47e36d 100644
--- a/talk/app/webrtc/objc/avfoundationvideocapturer.mm
+++ b/talk/app/webrtc/objc/avfoundationvideocapturer.mm
@@ -336,7 +336,7 @@
 
   // Keep track of which thread capture started on. This is the thread that
   // frames need to be sent to.
-  DCHECK(!_startThread);
+  RTC_DCHECK(!_startThread);
   _startThread = rtc::Thread::Current();
 
   SetCaptureFormat(&format);
@@ -412,7 +412,8 @@
   // Sanity check assumption that planar bytes are contiguous.
   uint8_t* uvPlaneAddress =
       (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, kUVPlaneIndex);
-  DCHECK(uvPlaneAddress == yPlaneAddress + yPlaneHeight * yPlaneBytesPerRow);
+  RTC_DCHECK(
+      uvPlaneAddress == yPlaneAddress + yPlaneHeight * yPlaneBytesPerRow);
 
   // Stuff data into a cricket::CapturedFrame.
   int64 currentTime = rtc::TimeNanos();
@@ -439,7 +440,7 @@
 
 void AVFoundationVideoCapturer::SignalFrameCapturedOnStartThread(
     const cricket::CapturedFrame* frame) {
-  DCHECK(_startThread->IsCurrent());
+  RTC_DCHECK(_startThread->IsCurrent());
   // This will call a superclass method that will perform the frame conversion
   // to I420.
   SignalFrameCaptured(this, frame);
diff --git a/talk/app/webrtc/peerconnectionfactory.cc b/talk/app/webrtc/peerconnectionfactory.cc
index 26765d2..98c5c85 100644
--- a/talk/app/webrtc/peerconnectionfactory.cc
+++ b/talk/app/webrtc/peerconnectionfactory.cc
@@ -55,7 +55,7 @@
   DtlsIdentityStoreWrapper(
       const rtc::scoped_refptr<RefCountedDtlsIdentityStore>& store)
       : store_(store) {
-    DCHECK(store_);
+    RTC_DCHECK(store_);
   }
 
   void RequestIdentity(
@@ -151,7 +151,7 @@
 }
 
 PeerConnectionFactory::~PeerConnectionFactory() {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   channel_manager_.reset(nullptr);
   default_allocator_factory_ = nullptr;
 
@@ -167,7 +167,7 @@
 }
 
 bool PeerConnectionFactory::Initialize() {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   rtc::InitRandom(rtc::Time());
 
   default_allocator_factory_ = PortAllocatorFactory::Create(worker_thread_);
@@ -200,7 +200,7 @@
 rtc::scoped_refptr<AudioSourceInterface>
 PeerConnectionFactory::CreateAudioSource(
     const MediaConstraintsInterface* constraints) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   rtc::scoped_refptr<LocalAudioSource> source(
       LocalAudioSource::Create(options_, constraints));
   return source;
@@ -210,14 +210,14 @@
 PeerConnectionFactory::CreateVideoSource(
     cricket::VideoCapturer* capturer,
     const MediaConstraintsInterface* constraints) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   rtc::scoped_refptr<VideoSource> source(
       VideoSource::Create(channel_manager_.get(), capturer, constraints));
   return VideoSourceProxy::Create(signaling_thread_, source);
 }
 
 bool PeerConnectionFactory::StartAecDump(rtc::PlatformFile file) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   return channel_manager_->StartAecDump(file);
 }
 
@@ -228,8 +228,8 @@
     PortAllocatorFactoryInterface* allocator_factory,
     rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
     PeerConnectionObserver* observer) {
-  DCHECK(signaling_thread_->IsCurrent());
-  DCHECK(allocator_factory || default_allocator_factory_);
+  RTC_DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(allocator_factory || default_allocator_factory_);
 
   if (!dtls_identity_store.get()) {
     // Because |pc|->Initialize takes ownership of the store we need a new
@@ -258,7 +258,7 @@
 
 rtc::scoped_refptr<MediaStreamInterface>
 PeerConnectionFactory::CreateLocalMediaStream(const std::string& label) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   return MediaStreamProxy::Create(signaling_thread_,
                                   MediaStream::Create(label));
 }
@@ -267,7 +267,7 @@
 PeerConnectionFactory::CreateVideoTrack(
     const std::string& id,
     VideoSourceInterface* source) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   rtc::scoped_refptr<VideoTrackInterface> track(
       VideoTrack::Create(id, source));
   return VideoTrackProxy::Create(signaling_thread_, track);
@@ -276,14 +276,14 @@
 rtc::scoped_refptr<AudioTrackInterface>
 PeerConnectionFactory::CreateAudioTrack(const std::string& id,
                                         AudioSourceInterface* source) {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   rtc::scoped_refptr<AudioTrackInterface> track(
       AudioTrack::Create(id, source));
   return AudioTrackProxy::Create(signaling_thread_, track);
 }
 
 cricket::ChannelManager* PeerConnectionFactory::channel_manager() {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   return channel_manager_.get();
 }
 
@@ -294,7 +294,7 @@
 }
 
 rtc::Thread* PeerConnectionFactory::worker_thread() {
-  DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(signaling_thread_->IsCurrent());
   return worker_thread_;
 }
 
diff --git a/talk/app/webrtc/statscollector.cc b/talk/app/webrtc/statscollector.cc
index a634521..6327445 100644
--- a/talk/app/webrtc/statscollector.cc
+++ b/talk/app/webrtc/statscollector.cc
@@ -71,7 +71,7 @@
 
 StatsReport::Id GetTransportIdFromProxy(const cricket::ProxyTransportMap& map,
                                         const std::string& proxy) {
-  DCHECK(!proxy.empty());
+  RTC_DCHECK(!proxy.empty());
   cricket::ProxyTransportMap::const_iterator found = map.find(proxy);
   if (found == map.end())
     return StatsReport::Id();
@@ -96,7 +96,7 @@
   for (const auto& track : tracks) {
     const std::string& track_id = track->id();
     StatsReport* report = AddTrackReport(reports, track_id);
-    DCHECK(report != nullptr);
+    RTC_DCHECK(report != nullptr);
     track_ids[track_id] = report;
   }
 }
@@ -261,7 +261,7 @@
                   double stats_gathering_started,
                   PeerConnectionInterface::StatsOutputLevel level,
                   StatsReport* report) {
-  DCHECK(report->type() == StatsReport::kStatsReportTypeBwe);
+  RTC_DCHECK(report->type() == StatsReport::kStatsReportTypeBwe);
 
   report->set_timestamp(stats_gathering_started);
   const IntForAdd ints[] = {
@@ -332,7 +332,7 @@
   if (candidate_type == cricket::RELAY_PORT_TYPE) {
     return STATSREPORT_RELAY_PORT_TYPE;
   }
-  DCHECK(false);
+  RTC_DCHECK(false);
   return "unknown";
 }
 
@@ -351,7 +351,7 @@
     case rtc::ADAPTER_TYPE_LOOPBACK:
       return STATSREPORT_ADAPTER_TYPE_LOOPBACK;
     default:
-      DCHECK(false);
+      RTC_DCHECK(false);
       return "";
   }
 }
@@ -359,11 +359,11 @@
 StatsCollector::StatsCollector(WebRtcSession* session)
     : session_(session),
       stats_gathering_started_(0) {
-  DCHECK(session_);
+  RTC_DCHECK(session_);
 }
 
 StatsCollector::~StatsCollector() {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
 }
 
 double StatsCollector::GetTimeNow() {
@@ -373,8 +373,8 @@
 // Adds a MediaStream with tracks that can be used as a |selector| in a call
 // to GetStats.
 void StatsCollector::AddStream(MediaStreamInterface* stream) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
-  DCHECK(stream != NULL);
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(stream != NULL);
 
   CreateTrackReports<AudioTrackVector>(stream->GetAudioTracks(),
                                        &reports_, track_ids_);
@@ -384,11 +384,11 @@
 
 void StatsCollector::AddLocalAudioTrack(AudioTrackInterface* audio_track,
                                         uint32 ssrc) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
-  DCHECK(audio_track != NULL);
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(audio_track != NULL);
 #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
   for (const auto& track : local_audio_tracks_)
-    DCHECK(track.first != audio_track || track.second != ssrc);
+    RTC_DCHECK(track.first != audio_track || track.second != ssrc);
 #endif
 
   local_audio_tracks_.push_back(std::make_pair(audio_track, ssrc));
@@ -406,7 +406,7 @@
 
 void StatsCollector::RemoveLocalAudioTrack(AudioTrackInterface* audio_track,
                                            uint32 ssrc) {
-  DCHECK(audio_track != NULL);
+  RTC_DCHECK(audio_track != NULL);
   local_audio_tracks_.erase(std::remove_if(local_audio_tracks_.begin(),
       local_audio_tracks_.end(),
       [audio_track, ssrc](const LocalAudioTrackVector::value_type& track) {
@@ -416,9 +416,9 @@
 
 void StatsCollector::GetStats(MediaStreamTrackInterface* track,
                               StatsReports* reports) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
-  DCHECK(reports != NULL);
-  DCHECK(reports->empty());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(reports != NULL);
+  RTC_DCHECK(reports->empty());
 
   rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls;
 
@@ -456,7 +456,7 @@
 
 void
 StatsCollector::UpdateStats(PeerConnectionInterface::StatsOutputLevel level) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
   double time_now = GetTimeNow();
   // Calls to UpdateStats() that occur less than kMinGatherStatsPeriod number of
   // ms apart will be ignored.
@@ -487,7 +487,7 @@
     uint32 ssrc,
     const StatsReport::Id& transport_id,
     StatsReport::Direction direction) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
   StatsReport::Id id(StatsReport::NewIdWithDirection(
       local ? StatsReport::kStatsReportTypeSsrc :
               StatsReport::kStatsReportTypeRemoteSsrc,
@@ -526,7 +526,7 @@
 
 StatsReport* StatsCollector::AddOneCertificateReport(
     const rtc::SSLCertificate* cert, const StatsReport* issuer) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
 
   // TODO(bemasc): Move this computation to a helper class that caches these
   // values to reduce CPU use in GetStats.  This will require adding a fast
@@ -569,13 +569,13 @@
 
 StatsReport* StatsCollector::AddCertificateReports(
     const rtc::SSLCertificate* cert) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
   // Produces a chain of StatsReports representing this certificate and the rest
   // of its chain, and adds those reports to |reports_|.  The return value is
   // the id of the leaf report.  The provided cert must be non-null, so at least
   // one report will always be provided and the returned string will never be
   // empty.
-  DCHECK(cert != NULL);
+  RTC_DCHECK(cert != NULL);
 
   StatsReport* issuer = nullptr;
   rtc::scoped_ptr<rtc::SSLCertChain> chain;
@@ -669,7 +669,7 @@
 }
 
 void StatsCollector::ExtractSessionInfo() {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
 
   // Extract information from the base session.
   StatsReport::Id id(StatsReport::NewTypedId(
@@ -763,7 +763,7 @@
 }
 
 void StatsCollector::ExtractVoiceInfo() {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
 
   if (!session_->voice_channel()) {
     return;
@@ -796,7 +796,7 @@
 
 void StatsCollector::ExtractVideoInfo(
     PeerConnectionInterface::StatsOutputLevel level) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
 
   if (!session_->video_channel())
     return;
@@ -833,7 +833,7 @@
 }
 
 void StatsCollector::ExtractDataInfo() {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
 
   rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls;
 
@@ -854,14 +854,14 @@
 StatsReport* StatsCollector::GetReport(const StatsReport::StatsType& type,
                                        const std::string& id,
                                        StatsReport::Direction direction) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
-  DCHECK(type == StatsReport::kStatsReportTypeSsrc ||
-         type == StatsReport::kStatsReportTypeRemoteSsrc);
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(type == StatsReport::kStatsReportTypeSsrc ||
+             type == StatsReport::kStatsReportTypeRemoteSsrc);
   return reports_.Find(StatsReport::NewIdWithDirection(type, id, direction));
 }
 
 void StatsCollector::UpdateStatsFromExistingLocalAudioTracks() {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
   // Loop through the existing local audio tracks.
   for (const auto& it : local_audio_tracks_) {
     AudioTrackInterface* track = it.first;
@@ -889,8 +889,8 @@
 
 void StatsCollector::UpdateReportFromAudioTrack(AudioTrackInterface* track,
                                                 StatsReport* report) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
-  DCHECK(track != NULL);
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(track != NULL);
 
   int signal_level = 0;
   if (!track->GetSignalLevel(&signal_level))
@@ -911,7 +911,7 @@
 
 bool StatsCollector::GetTrackIdBySsrc(uint32 ssrc, std::string* track_id,
                                       StatsReport::Direction direction) {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
   if (direction == StatsReport::kSend) {
     if (!session_->GetLocalTrackIdBySsrc(ssrc, track_id)) {
       LOG(LS_WARNING) << "The SSRC " << ssrc
@@ -919,7 +919,7 @@
       return false;
     }
   } else {
-    DCHECK(direction == StatsReport::kReceive);
+    RTC_DCHECK(direction == StatsReport::kReceive);
     if (!session_->GetRemoteTrackIdBySsrc(ssrc, track_id)) {
       LOG(LS_WARNING) << "The SSRC " << ssrc
                       << " is not associated with a receiving track";
@@ -931,7 +931,7 @@
 }
 
 void StatsCollector::UpdateTrackReports() {
-  DCHECK(session_->signaling_thread()->IsCurrent());
+  RTC_DCHECK(session_->signaling_thread()->IsCurrent());
 
   rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls;
 
diff --git a/talk/app/webrtc/statstypes.cc b/talk/app/webrtc/statstypes.cc
index a23b959..56d705e 100644
--- a/talk/app/webrtc/statstypes.cc
+++ b/talk/app/webrtc/statstypes.cc
@@ -32,7 +32,7 @@
 #include "webrtc/base/checks.h"
 
 // TODO(tommi): Could we have a static map of value name -> expected type
-// and use this to DCHECK on correct usage (somewhat strongly typed values)?
+// and use this to RTC_DCHECK on correct usage (somewhat strongly typed values)?
 // Alternatively, we could define the names+type in a separate document and
 // generate strongly typed inline C++ code that forces the correct type to be
 // used for a given name at compile time.
@@ -74,7 +74,7 @@
     case StatsReport::kStatsReportTypeDataChannel:
       return "datachannel";
   }
-  DCHECK(false);
+  RTC_DCHECK(false);
   return nullptr;
 }
 
@@ -231,7 +231,7 @@
 
 StatsReport::Value::Value(StatsValueName name, int64 value, Type int_type)
     : name(name), type_(int_type) {
-  DCHECK(type_ == kInt || type_ == kInt64);
+  RTC_DCHECK(type_ == kInt || type_ == kInt64);
   type_ == kInt ? value_.int_ = static_cast<int>(value) : value_.int64_ = value;
 }
 
@@ -283,7 +283,7 @@
 
   // There's a 1:1 relation between a name and a type, so we don't have to
   // check that.
-  DCHECK_EQ(type_, other.type_);
+  RTC_DCHECK_EQ(type_, other.type_);
 
   switch (type_) {
     case kInt:
@@ -295,7 +295,8 @@
     case kStaticString: {
 #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
       if (value_.static_string_ != other.value_.static_string_) {
-        DCHECK(strcmp(value_.static_string_, other.value_.static_string_) != 0)
+        RTC_DCHECK(strcmp(value_.static_string_, other.value_.static_string_) !=
+                   0)
             << "Duplicate global?";
       }
 #endif
@@ -324,7 +325,8 @@
     return false;
 #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
   if (value_.static_string_ != value)
-    DCHECK(strcmp(value_.static_string_, value) != 0) << "Duplicate global?";
+    RTC_DCHECK(strcmp(value_.static_string_, value) != 0)
+        << "Duplicate global?";
 #endif
   return value == value_.static_string_;
 }
@@ -347,32 +349,32 @@
 }
 
 int StatsReport::Value::int_val() const {
-  DCHECK(type_ == kInt);
+  RTC_DCHECK(type_ == kInt);
   return value_.int_;
 }
 
 int64 StatsReport::Value::int64_val() const {
-  DCHECK(type_ == kInt64);
+  RTC_DCHECK(type_ == kInt64);
   return value_.int64_;
 }
 
 float StatsReport::Value::float_val() const {
-  DCHECK(type_ == kFloat);
+  RTC_DCHECK(type_ == kFloat);
   return value_.float_;
 }
 
 const char* StatsReport::Value::static_string_val() const {
-  DCHECK(type_ == kStaticString);
+  RTC_DCHECK(type_ == kStaticString);
   return value_.static_string_;
 }
 
 const std::string& StatsReport::Value::string_val() const {
-  DCHECK(type_ == kString);
+  RTC_DCHECK(type_ == kString);
   return *value_.string_;
 }
 
 bool StatsReport::Value::bool_val() const {
-  DCHECK(type_ == kBool);
+  RTC_DCHECK(type_ == kBool);
   return value_.bool_;
 }
 
@@ -591,7 +593,7 @@
     case kStatsValueNameWritable:
       return "googWritable";
     default:
-      DCHECK(false);
+      RTC_DCHECK(false);
       break;
   }
 
@@ -620,7 +622,7 @@
 }
 
 StatsReport::StatsReport(const Id& id) : id_(id), timestamp_(0.0) {
-  DCHECK(id_.get());
+  RTC_DCHECK(id_.get());
 }
 
 // static
@@ -720,43 +722,43 @@
 }
 
 StatsCollection::~StatsCollection() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   for (auto* r : list_)
     delete r;
 }
 
 StatsCollection::const_iterator StatsCollection::begin() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return list_.begin();
 }
 
 StatsCollection::const_iterator StatsCollection::end() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return list_.end();
 }
 
 size_t StatsCollection::size() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return list_.size();
 }
 
 StatsReport* StatsCollection::InsertNew(const StatsReport::Id& id) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(Find(id) == nullptr);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(Find(id) == nullptr);
   StatsReport* report = new StatsReport(id);
   list_.push_back(report);
   return report;
 }
 
 StatsReport* StatsCollection::FindOrAddNew(const StatsReport::Id& id) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   StatsReport* ret = Find(id);
   return ret ? ret : InsertNew(id);
 }
 
 StatsReport* StatsCollection::ReplaceOrAddNew(const StatsReport::Id& id) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(id.get());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(id.get());
   Container::iterator it = std::find_if(list_.begin(), list_.end(),
       [&id](const StatsReport* r)->bool { return r->id()->Equals(id); });
   if (it != end()) {
@@ -771,7 +773,7 @@
 // Looks for a report with the given |id|.  If one is not found, NULL
 // will be returned.
 StatsReport* StatsCollection::Find(const StatsReport::Id& id) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   Container::iterator it = std::find_if(list_.begin(), list_.end(),
       [&id](const StatsReport* r)->bool { return r->id()->Equals(id); });
   return it == list_.end() ? nullptr : *it;
diff --git a/talk/app/webrtc/test/fakedtlsidentitystore.h b/talk/app/webrtc/test/fakedtlsidentitystore.h
index 5d7743d..0f9bdb9 100644
--- a/talk/app/webrtc/test/fakedtlsidentitystore.h
+++ b/talk/app/webrtc/test/fakedtlsidentitystore.h
@@ -82,7 +82,7 @@
       const rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver>&
           observer) override {
     // TODO(hbos): Should be able to generate KT_ECDSA too.
-    DCHECK(key_type == rtc::KT_RSA || should_fail_);
+    RTC_DCHECK(key_type == rtc::KT_RSA || should_fail_);
     MessageData* msg = new MessageData(
         rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver>(observer));
     rtc::Thread::Current()->Post(
diff --git a/talk/app/webrtc/webrtcsession.cc b/talk/app/webrtc/webrtcsession.cc
index 26a9505..0c0e44d 100644
--- a/talk/app/webrtc/webrtcsession.cc
+++ b/talk/app/webrtc/webrtcsession.cc
@@ -746,7 +746,7 @@
     // Construct with DTLS enabled.
     if (!certificate) {
       // Use the |dtls_identity_store| to generate a certificate.
-      DCHECK(dtls_identity_store);
+      RTC_DCHECK(dtls_identity_store);
       webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory(
           signaling_thread(),
           channel_manager_,
@@ -2006,7 +2006,7 @@
 // for IPv4 and IPv6.
 void WebRtcSession::ReportBestConnectionState(
     const cricket::TransportStats& stats) {
-  DCHECK(metrics_observer_ != NULL);
+  RTC_DCHECK(metrics_observer_ != NULL);
   for (cricket::TransportChannelStatsList::const_iterator it =
          stats.channel_stats.begin();
        it != stats.channel_stats.end(); ++it) {
@@ -2029,7 +2029,7 @@
       } else if (local.protocol() == cricket::UDP_PROTOCOL_NAME) {
         type = kEnumCounterIceCandidatePairTypeUdp;
       } else {
-        CHECK(0);
+        RTC_CHECK(0);
       }
       metrics_observer_->IncrementEnumCounter(
           type, GetIceCandidatePairCounter(local, remote),
@@ -2046,7 +2046,7 @@
             kEnumCounterAddressFamily, kBestConnections_IPv6,
             kPeerConnectionAddressFamilyCounter_Max);
       } else {
-        CHECK(0);
+        RTC_CHECK(0);
       }
 
       return;
@@ -2056,7 +2056,7 @@
 
 void WebRtcSession::ReportNegotiatedCiphers(
     const cricket::TransportStats& stats) {
-  DCHECK(metrics_observer_ != NULL);
+  RTC_DCHECK(metrics_observer_ != NULL);
   if (!dtls_enabled_ || stats.channel_stats.empty()) {
     return;
   }
diff --git a/talk/app/webrtc/webrtcsession_unittest.cc b/talk/app/webrtc/webrtcsession_unittest.cc
index ef4d33f..b84e6fb 100644
--- a/talk/app/webrtc/webrtcsession_unittest.cc
+++ b/talk/app/webrtc/webrtcsession_unittest.cc
@@ -424,7 +424,7 @@
       dtls_identity_store.reset(new FakeDtlsIdentityStore());
       dtls_identity_store->set_should_fail(false);
     } else {
-      CHECK(false);
+      RTC_CHECK(false);
     }
     Init(dtls_identity_store.Pass(), configuration);
   }
@@ -1237,7 +1237,7 @@
 
   void VerifyMultipleAsyncCreateDescriptionAfterInit(
       bool success, CreateSessionDescriptionRequest::Type type) {
-    CHECK(session_);
+    RTC_CHECK(session_);
     SetFactoryDtlsSrtp();
     if (type == CreateSessionDescriptionRequest::kAnswer) {
       cricket::MediaSessionOptions options;
diff --git a/talk/app/webrtc/webrtcsessiondescriptionfactory.cc b/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
index aad5185..a0ec679 100644
--- a/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
+++ b/talk/app/webrtc/webrtcsessiondescriptionfactory.cc
@@ -190,7 +190,7 @@
         session_id,
         dct,
         true) {
-  DCHECK(dtls_identity_store_);
+  RTC_DCHECK(dtls_identity_store_);
 
   certificate_request_state_ = CERTIFICATE_WAITING;
 
@@ -219,7 +219,7 @@
     : WebRtcSessionDescriptionFactory(
         signaling_thread, channel_manager, mediastream_signaling, nullptr,
         nullptr, session, session_id, dct, true) {
-  DCHECK(certificate);
+  RTC_DCHECK(certificate);
 
   certificate_request_state_ = CERTIFICATE_WAITING;
 
@@ -517,7 +517,7 @@
 
 void WebRtcSessionDescriptionFactory::SetCertificate(
     const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) {
-  DCHECK(certificate);
+  RTC_DCHECK(certificate);
   LOG(LS_VERBOSE) << "Setting new certificate";
 
   certificate_request_state_ = CERTIFICATE_SUCCEEDED;
diff --git a/talk/media/base/capturemanager.cc b/talk/media/base/capturemanager.cc
index 0e67692..b7cbbf2 100644
--- a/talk/media/base/capturemanager.cc
+++ b/talk/media/base/capturemanager.cc
@@ -51,16 +51,16 @@
   int IncCaptureStartRef();
   int DecCaptureStartRef();
   CaptureRenderAdapter* adapter() {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return adapter_.get();
   }
   VideoCapturer* GetVideoCapturer() {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return adapter()->video_capturer();
   }
 
   int start_count() const {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return start_count_;
   }
 
@@ -98,7 +98,7 @@
 
 void VideoCapturerState::AddCaptureResolution(
     const VideoFormat& desired_format) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   for (CaptureFormats::iterator iter = capture_formats_.begin();
        iter != capture_formats_.end(); ++iter) {
     if (desired_format == iter->video_format) {
@@ -111,7 +111,7 @@
 }
 
 bool VideoCapturerState::RemoveCaptureResolution(const VideoFormat& format) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   for (CaptureFormats::iterator iter = capture_formats_.begin();
        iter != capture_formats_.end(); ++iter) {
     if (format == iter->video_format) {
@@ -127,7 +127,7 @@
 
 VideoFormat VideoCapturerState::GetHighestFormat(
     VideoCapturer* video_capturer) const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   VideoFormat highest_format(0, 0, VideoFormat::FpsToInterval(1), FOURCC_ANY);
   if (capture_formats_.empty()) {
     VideoFormat default_format(kDefaultCaptureFormat);
@@ -149,12 +149,12 @@
 }
 
 int VideoCapturerState::IncCaptureStartRef() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return ++start_count_;
 }
 
 int VideoCapturerState::DecCaptureStartRef() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (start_count_ > 0) {
     // Start count may be 0 if a capturer was added but never started.
     --start_count_;
@@ -169,20 +169,20 @@
 }
 
 CaptureManager::~CaptureManager() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
   // Since we don't own any of the capturers, all capturers should have been
   // cleaned up before we get here. In fact, in the normal shutdown sequence,
   // all capturers *will* be shut down by now, so trying to stop them here
   // will crash. If we're still tracking any, it's a dangling pointer.
-  // TODO(hbos): DCHECK instead of CHECK until we figure out why capture_states_
-  // is not always empty here.
-  DCHECK(capture_states_.empty());
+  // TODO(hbos): RTC_DCHECK instead of RTC_CHECK until we figure out why
+  // capture_states_ is not always empty here.
+  RTC_DCHECK(capture_states_.empty());
 }
 
 bool CaptureManager::StartVideoCapture(VideoCapturer* video_capturer,
                                        const VideoFormat& desired_format) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (desired_format.width == 0 || desired_format.height == 0) {
     return false;
   }
@@ -215,7 +215,7 @@
 
 bool CaptureManager::StopVideoCapture(VideoCapturer* video_capturer,
                                       const VideoFormat& format) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   VideoCapturerState* capture_state = GetCaptureState(video_capturer);
   if (!capture_state) {
     return false;
@@ -236,7 +236,7 @@
     const VideoFormat& previous_format,
     const VideoFormat& desired_format,
     CaptureManager::RestartOptions options) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!IsCapturerRegistered(video_capturer)) {
     LOG(LS_ERROR) << "RestartVideoCapture: video_capturer is not registered.";
     return false;
@@ -289,7 +289,7 @@
 
 bool CaptureManager::AddVideoRenderer(VideoCapturer* video_capturer,
                                       VideoRenderer* video_renderer) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!video_capturer || !video_renderer) {
     return false;
   }
@@ -302,7 +302,7 @@
 
 bool CaptureManager::RemoveVideoRenderer(VideoCapturer* video_capturer,
                                          VideoRenderer* video_renderer) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!video_capturer || !video_renderer) {
     return false;
   }
@@ -314,12 +314,12 @@
 }
 
 bool CaptureManager::IsCapturerRegistered(VideoCapturer* video_capturer) const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return GetCaptureState(video_capturer) != NULL;
 }
 
 bool CaptureManager::RegisterVideoCapturer(VideoCapturer* video_capturer) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   VideoCapturerState* capture_state =
       VideoCapturerState::Create(video_capturer);
   if (!capture_state) {
@@ -332,7 +332,7 @@
 
 void CaptureManager::UnregisterVideoCapturer(
     VideoCapturerState* capture_state) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   VideoCapturer* video_capturer = capture_state->GetVideoCapturer();
   capture_states_.erase(video_capturer);
   delete capture_state;
@@ -357,7 +357,7 @@
 
 bool CaptureManager::StartWithBestCaptureFormat(
     VideoCapturerState* capture_state, VideoCapturer* video_capturer) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   VideoFormat highest_asked_format =
       capture_state->GetHighestFormat(video_capturer);
   VideoFormat capture_format;
@@ -384,7 +384,7 @@
 
 VideoCapturerState* CaptureManager::GetCaptureState(
     VideoCapturer* video_capturer) const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   CaptureStates::const_iterator iter = capture_states_.find(video_capturer);
   if (iter == capture_states_.end()) {
     return NULL;
@@ -394,7 +394,7 @@
 
 CaptureRenderAdapter* CaptureManager::GetAdapter(
     VideoCapturer* video_capturer) const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   VideoCapturerState* capture_state = GetCaptureState(video_capturer);
   if (!capture_state) {
     return NULL;
diff --git a/talk/media/sctp/sctpdataengine.cc b/talk/media/sctp/sctpdataengine.cc
index 4fc3d43..693fbec 100644
--- a/talk/media/sctp/sctpdataengine.cc
+++ b/talk/media/sctp/sctpdataengine.cc
@@ -377,7 +377,7 @@
 }
 
 void SctpDataMediaChannel::OnSendThresholdCallback() {
-  DCHECK(rtc::Thread::Current() == worker_thread_);
+  RTC_DCHECK(rtc::Thread::Current() == worker_thread_);
   SignalReadyToSend(true);
 }
 
@@ -658,7 +658,7 @@
 // Called by network interface when a packet has been received.
 void SctpDataMediaChannel::OnPacketReceived(
     rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
-  DCHECK(rtc::Thread::Current() == worker_thread_);
+  RTC_DCHECK(rtc::Thread::Current() == worker_thread_);
   LOG(LS_VERBOSE) << debug_name_ << "->OnPacketReceived(...): "
                   << " length=" << packet->size() << ", sending: " << sending_;
   // Only give receiving packets to usrsctp after if connected. This enables two
diff --git a/talk/media/webrtc/fakewebrtccall.cc b/talk/media/webrtc/fakewebrtccall.cc
index a85bdb1..9f2c8e5 100644
--- a/talk/media/webrtc/fakewebrtccall.cc
+++ b/talk/media/webrtc/fakewebrtccall.cc
@@ -37,7 +37,7 @@
 FakeAudioReceiveStream::FakeAudioReceiveStream(
     const webrtc::AudioReceiveStream::Config& config)
     : config_(config), received_packets_(0) {
-  DCHECK(config.voe_channel_id != -1);
+  RTC_DCHECK(config.voe_channel_id != -1);
 }
 
 webrtc::AudioReceiveStream::Stats FakeAudioReceiveStream::GetStats() const {
@@ -60,7 +60,7 @@
       config_(config),
       codec_settings_set_(false),
       num_swapped_frames_(0) {
-  DCHECK(config.encoder_settings.encoder != NULL);
+  RTC_DCHECK(config.encoder_settings.encoder != NULL);
   ReconfigureVideoEncoder(encoder_config);
 }
 
@@ -113,7 +113,7 @@
 }
 
 int64_t FakeVideoSendStream::GetLastTimestamp() const {
-  DCHECK(last_frame_.ntp_time_ms() == 0);
+  RTC_DCHECK(last_frame_.ntp_time_ms() == 0);
   return last_frame_.render_time_ms();
 }
 
diff --git a/talk/media/webrtc/fakewebrtcvoiceengine.h b/talk/media/webrtc/fakewebrtcvoiceengine.h
index d0cff57..4ce5a38 100644
--- a/talk/media/webrtc/fakewebrtcvoiceengine.h
+++ b/talk/media/webrtc/fakewebrtcvoiceengine.h
@@ -89,7 +89,7 @@
   if (channels_.find(channel) == channels_.end()) return -1;
 
 #define WEBRTC_ASSERT_CHANNEL(channel) \
-  DCHECK(channels_.find(channel) != channels_.end());
+  RTC_DCHECK(channels_.find(channel) != channels_.end());
 
 // Verify the header extension ID, if enabled, is within the bounds specified in
 // [RFC5285]: 1-14 inclusive.
@@ -383,7 +383,7 @@
     return channels_[channel]->packets.empty();
   }
   void TriggerCallbackOnError(int channel_num, int err_code) {
-    DCHECK(observer_ != NULL);
+    RTC_DCHECK(observer_ != NULL);
     observer_->CallbackOnError(channel_num, err_code);
   }
   void set_playout_fail_channel(int channel) {
diff --git a/talk/media/webrtc/webrtcvideocapturer.cc b/talk/media/webrtc/webrtcvideocapturer.cc
index f8c373d..60b8422 100644
--- a/talk/media/webrtc/webrtcvideocapturer.cc
+++ b/talk/media/webrtc/webrtcvideocapturer.cc
@@ -152,7 +152,7 @@
 }
 
 bool WebRtcVideoCapturer::Init(const Device& device) {
-  DCHECK(!start_thread_);
+  RTC_DCHECK(!start_thread_);
   if (module_) {
     LOG(LS_ERROR) << "The capturer is already initialized";
     return false;
@@ -226,7 +226,7 @@
 }
 
 bool WebRtcVideoCapturer::Init(webrtc::VideoCaptureModule* module) {
-  DCHECK(!start_thread_);
+  RTC_DCHECK(!start_thread_);
   if (module_) {
     LOG(LS_ERROR) << "The capturer is already initialized";
     return false;
@@ -263,7 +263,7 @@
   // Can't take lock here as this will cause deadlock with
   // OnIncomingCapturedFrame. In fact, the whole method, including methods it
   // calls, can't take lock.
-  DCHECK(module_);
+  RTC_DCHECK(module_);
 
   const std::string group_name =
       webrtc::field_trial::FindFullName("WebRTC-CVO");
@@ -285,13 +285,13 @@
   }
   if (start_thread_) {
     LOG(LS_ERROR) << "The capturer is already running";
-    DCHECK(start_thread_->IsCurrent())
+    RTC_DCHECK(start_thread_->IsCurrent())
         << "Trying to start capturer on different threads";
     return CS_FAILED;
   }
 
   start_thread_ = rtc::Thread::Current();
-  DCHECK(!async_invoker_);
+  RTC_DCHECK(!async_invoker_);
   async_invoker_.reset(new rtc::AsyncInvoker());
   captured_frames_ = 0;
 
@@ -327,9 +327,9 @@
     LOG(LS_ERROR) << "The capturer is already stopped";
     return;
   }
-  DCHECK(start_thread_);
-  DCHECK(start_thread_->IsCurrent());
-  DCHECK(async_invoker_);
+  RTC_DCHECK(start_thread_);
+  RTC_DCHECK(start_thread_->IsCurrent());
+  RTC_DCHECK(async_invoker_);
   if (IsRunning()) {
     // The module is responsible for OnIncomingCapturedFrame being called, if
     // we stop it we will get no further callbacks.
@@ -372,8 +372,8 @@
     const int32_t id,
     const webrtc::VideoFrame& sample) {
   // This can only happen between Start() and Stop().
-  DCHECK(start_thread_);
-  DCHECK(async_invoker_);
+  RTC_DCHECK(start_thread_);
+  RTC_DCHECK(async_invoker_);
   if (start_thread_->IsCurrent()) {
     SignalFrameCapturedOnStartThread(sample);
   } else {
@@ -398,9 +398,9 @@
 void WebRtcVideoCapturer::SignalFrameCapturedOnStartThread(
     const webrtc::VideoFrame frame) {
   // This can only happen between Start() and Stop().
-  DCHECK(start_thread_);
-  DCHECK(start_thread_->IsCurrent());
-  DCHECK(async_invoker_);
+  RTC_DCHECK(start_thread_);
+  RTC_DCHECK(start_thread_->IsCurrent());
+  RTC_DCHECK(async_invoker_);
 
   ++captured_frames_;
   // Log the size and pixel aspect ratio of the first captured frame.
diff --git a/talk/media/webrtc/webrtcvideoengine2.cc b/talk/media/webrtc/webrtcvideoengine2.cc
index cde449e..85e67c4 100644
--- a/talk/media/webrtc/webrtcvideoengine2.cc
+++ b/talk/media/webrtc/webrtcvideoengine2.cc
@@ -106,7 +106,7 @@
 
   webrtc::VideoEncoder* CreateVideoEncoder(
       webrtc::VideoCodecType type) override {
-    DCHECK(factory_ != NULL);
+    RTC_DCHECK(factory_ != NULL);
     // If it's a codec type we can simulcast, create a wrapped encoder.
     if (type == webrtc::kVideoCodecVP8) {
       return new webrtc::SimulcastEncoderAdapter(
@@ -600,7 +600,7 @@
 WebRtcVideoChannel2* WebRtcVideoEngine2::CreateChannel(
     webrtc::Call* call,
     const VideoOptions& options) {
-  DCHECK(initialized_);
+  RTC_DCHECK(initialized_);
   LOG(LS_INFO) << "CreateChannel. Options: " << options.ToString();
   WebRtcVideoChannel2* channel = new WebRtcVideoChannel2(call, options,
       external_encoder_factory_, external_decoder_factory_);
@@ -622,20 +622,20 @@
   LOG(LS_VERBOSE) << "SetLogging: " << min_sev << '"' << filter << '"';
   // if min_sev == -1, we keep the current log level.
   if (min_sev < 0) {
-    DCHECK(min_sev == -1);
+    RTC_DCHECK(min_sev == -1);
     return;
   }
 }
 
 void WebRtcVideoEngine2::SetExternalDecoderFactory(
     WebRtcVideoDecoderFactory* decoder_factory) {
-  DCHECK(!initialized_);
+  RTC_DCHECK(!initialized_);
   external_decoder_factory_ = decoder_factory;
 }
 
 void WebRtcVideoEngine2::SetExternalEncoderFactory(
     WebRtcVideoEncoderFactory* encoder_factory) {
-  DCHECK(!initialized_);
+  RTC_DCHECK(!initialized_);
   if (external_encoder_factory_ == encoder_factory)
     return;
 
@@ -681,7 +681,7 @@
 bool WebRtcVideoEngine2::CanSendCodec(const VideoCodec& requested,
                                       const VideoCodec& current,
                                       VideoCodec* out) {
-  DCHECK(out != NULL);
+  RTC_DCHECK(out != NULL);
 
   if (requested.width != requested.height &&
       (requested.height == 0 || requested.width == 0)) {
@@ -747,7 +747,7 @@
     // we only support up to 8 external payload types.
     const int kExternalVideoPayloadTypeBase = 120;
     size_t payload_type = kExternalVideoPayloadTypeBase + i;
-    DCHECK(payload_type < 128);
+    RTC_DCHECK(payload_type < 128);
     VideoCodec codec(static_cast<int>(payload_type),
                      codecs[i].name,
                      codecs[i].max_width,
@@ -770,7 +770,7 @@
       unsignalled_ssrc_handler_(&default_unsignalled_ssrc_handler_),
       external_encoder_factory_(external_encoder_factory),
       external_decoder_factory_(external_decoder_factory) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   SetDefaultOptions();
   options_.SetAll(options);
   options_.cpu_overuse_detection.Get(&signal_cpu_adaptation_);
@@ -963,13 +963,13 @@
   LOG(LS_INFO) << "Change the send codec because SetSendCodecs has a different "
                   "first supported codec.";
   for (auto& kv : send_streams_) {
-    DCHECK(kv.second != nullptr);
+    RTC_DCHECK(kv.second != nullptr);
     kv.second->SetCodec(supported_codecs.front());
   }
   LOG(LS_INFO) << "SetNackAndRemb on all the receive streams because the send "
                   "codec has changed.";
   for (auto& kv : receive_streams_) {
-    DCHECK(kv.second != nullptr);
+    RTC_DCHECK(kv.second != nullptr);
     kv.second->SetNackAndRemb(HasNack(supported_codecs.front().codec),
                               HasRemb(supported_codecs.front().codec));
   }
@@ -1108,7 +1108,7 @@
                                 send_rtp_extensions_);
 
   uint32 ssrc = sp.first_ssrc();
-  DCHECK(ssrc != 0);
+  RTC_DCHECK(ssrc != 0);
   send_streams_[ssrc] = stream;
 
   if (rtcp_receiver_report_ssrc_ == kDefaultRtcpReceiverReportSsrc) {
@@ -1179,7 +1179,7 @@
 
 bool WebRtcVideoChannel2::AddRecvStream(const StreamParams& sp,
                                         bool default_stream) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
   LOG(LS_INFO) << "AddRecvStream" << (default_stream ? " (default stream)" : "")
                << ": " << sp.ToString();
@@ -1187,7 +1187,7 @@
     return false;
 
   uint32 ssrc = sp.first_ssrc();
-  DCHECK(ssrc != 0);  // TODO(pbos): Is this ever valid?
+  RTC_DCHECK(ssrc != 0);  // TODO(pbos): Is this ever valid?
 
   rtc::CritScope stream_lock(&stream_crit_);
   // Remove running stream if this was a default stream.
@@ -1376,7 +1376,7 @@
 bool WebRtcVideoChannel2::SetCapturer(uint32 ssrc, VideoCapturer* capturer) {
   LOG(LS_INFO) << "SetCapturer: " << ssrc << " -> "
                << (capturer != NULL ? "(capturer)" : "NULL");
-  DCHECK(ssrc != 0);
+  RTC_DCHECK(ssrc != 0);
   {
     rtc::CritScope stream_lock(&stream_crit_);
     if (send_streams_.find(ssrc) == send_streams_.end()) {
@@ -1491,7 +1491,7 @@
 bool WebRtcVideoChannel2::MuteStream(uint32 ssrc, bool mute) {
   LOG(LS_VERBOSE) << "MuteStream: " << ssrc << " -> "
                   << (mute ? "mute" : "unmute");
-  DCHECK(ssrc != 0);
+  RTC_DCHECK(ssrc != 0);
   rtc::CritScope stream_lock(&stream_crit_);
   if (send_streams_.find(ssrc) == send_streams_.end()) {
     LOG(LS_ERROR) << "No sending stream on ssrc " << ssrc;
@@ -1794,7 +1794,7 @@
     return;
 
   if (format_.width == 0) {  // Dropping frames.
-    DCHECK(format_.height == 0);
+    RTC_DCHECK(format_.height == 0);
     LOG(LS_VERBOSE) << "VideoFormat 0x0 set, Dropping frame.";
     return;
   }
@@ -1988,7 +1988,7 @@
 
   // This shouldn't happen, we should not be trying to create something we don't
   // support.
-  DCHECK(false);
+  RTC_DCHECK(false);
   return AllocatedEncoder(NULL, webrtc::kVideoCodecUnknown, false);
 }
 
@@ -2143,7 +2143,7 @@
   last_dimensions_.height = height;
   last_dimensions_.is_screencast = is_screencast;
 
-  DCHECK(!parameters_.encoder_config.streams.empty());
+  RTC_DCHECK(!parameters_.encoder_config.streams.empty());
 
   VideoCodecSettings codec_settings;
   parameters_.codec_settings.Get(&codec_settings);
@@ -2169,7 +2169,7 @@
 
 void WebRtcVideoChannel2::WebRtcVideoSendStream::Start() {
   rtc::CritScope cs(&lock_);
-  DCHECK(stream_ != NULL);
+  RTC_DCHECK(stream_ != NULL);
   stream_->Start();
   sending_ = true;
 }
@@ -2420,7 +2420,7 @@
 
   // This shouldn't happen, we should not be trying to create something we don't
   // support.
-  DCHECK(false);
+  RTC_DCHECK(false);
   return AllocatedDecoder(NULL, webrtc::kVideoCodecUnknown, false);
 }
 
@@ -2454,10 +2454,10 @@
 
 void WebRtcVideoChannel2::WebRtcVideoReceiveStream::SetLocalSsrc(
     uint32_t local_ssrc) {
-  // TODO(pbos): Consider turning this sanity check into a DCHECK. You should
-  // not be able to create a sender with the same SSRC as a receiver, but right
-  // now this can't be done due to unittests depending on receiving what they
-  // are sending from the same MediaChannel.
+  // TODO(pbos): Consider turning this sanity check into a RTC_DCHECK. You
+  // should not be able to create a sender with the same SSRC as a receiver, but
+  // right now this can't be done due to unittests depending on receiving what
+  // they are sending from the same MediaChannel.
   if (local_ssrc == config_.rtp.remote_ssrc) {
     LOG(LS_INFO) << "Ignoring call to SetLocalSsrc because parameters are "
                     "unchanged; local_ssrc=" << local_ssrc;
@@ -2652,7 +2652,7 @@
 
 std::vector<WebRtcVideoChannel2::VideoCodecSettings>
 WebRtcVideoChannel2::MapCodecs(const std::vector<VideoCodec>& codecs) {
-  DCHECK(!codecs.empty());
+  RTC_DCHECK(!codecs.empty());
 
   std::vector<VideoCodecSettings> video_codecs;
   std::map<int, bool> payload_used;
@@ -2677,14 +2677,14 @@
     switch (in_codec.GetCodecType()) {
       case VideoCodec::CODEC_RED: {
         // RED payload type, should not have duplicates.
-        DCHECK(fec_settings.red_payload_type == -1);
+        RTC_DCHECK(fec_settings.red_payload_type == -1);
         fec_settings.red_payload_type = in_codec.id;
         continue;
       }
 
       case VideoCodec::CODEC_ULPFEC: {
         // ULPFEC payload type, should not have duplicates.
-        DCHECK(fec_settings.ulpfec_payload_type == -1);
+        RTC_DCHECK(fec_settings.ulpfec_payload_type == -1);
         fec_settings.ulpfec_payload_type = in_codec.id;
         continue;
       }
@@ -2713,7 +2713,7 @@
 
   // One of these codecs should have been a video codec. Only having FEC
   // parameters into this code is a logic error.
-  DCHECK(!video_codecs.empty());
+  RTC_DCHECK(!video_codecs.empty());
 
   for (std::map<int, int>::const_iterator it = rtx_mapping.begin();
        it != rtx_mapping.end();
diff --git a/talk/media/webrtc/webrtcvideoengine2_unittest.cc b/talk/media/webrtc/webrtcvideoengine2_unittest.cc
index 5a7a0d1..da16d2b 100644
--- a/talk/media/webrtc/webrtcvideoengine2_unittest.cc
+++ b/talk/media/webrtc/webrtcvideoengine2_unittest.cc
@@ -113,7 +113,7 @@
       : call_(webrtc::Call::Create(webrtc::Call::Config())),
         engine_() {
     std::vector<VideoCodec> engine_codecs = engine_.codecs();
-    DCHECK(!engine_codecs.empty());
+    RTC_DCHECK(!engine_codecs.empty());
     bool codec_set = false;
     for (size_t i = 0; i < engine_codecs.size(); ++i) {
       if (engine_codecs[i].name == "red") {
@@ -132,7 +132,7 @@
       }
     }
 
-    DCHECK(codec_set);
+    RTC_DCHECK(codec_set);
   }
 
  protected:
@@ -2982,7 +2982,7 @@
     ASSERT_TRUE(channel_->SetSendCodecs(codecs));
 
     std::vector<uint32> ssrcs = MAKE_VECTOR(kSsrcs3);
-    DCHECK(num_configured_streams <= ssrcs.size());
+    RTC_DCHECK(num_configured_streams <= ssrcs.size());
     ssrcs.resize(num_configured_streams);
 
     FakeVideoSendStream* stream =
diff --git a/talk/media/webrtc/webrtcvideoframe.cc b/talk/media/webrtc/webrtcvideoframe.cc
index e72ab14..932bf3c 100644
--- a/talk/media/webrtc/webrtcvideoframe.cc
+++ b/talk/media/webrtc/webrtcvideoframe.cc
@@ -177,7 +177,7 @@
 }
 
 bool WebRtcVideoFrame::MakeExclusive() {
-  DCHECK(video_frame_buffer_->native_handle() == nullptr);
+  RTC_DCHECK(video_frame_buffer_->native_handle() == nullptr);
   if (IsExclusive())
     return true;
 
@@ -202,8 +202,8 @@
 
 size_t WebRtcVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
                                             size_t size, int stride_rgb) const {
-  CHECK(video_frame_buffer_);
-  CHECK(video_frame_buffer_->native_handle() == nullptr);
+  RTC_CHECK(video_frame_buffer_);
+  RTC_CHECK(video_frame_buffer_->native_handle() == nullptr);
   return VideoFrame::ConvertToRgbBuffer(to_fourcc, buffer, size, stride_rgb);
 }
 
@@ -296,7 +296,7 @@
   // If the video frame is backed up by a native handle, it resides in the GPU
   // memory which we can't rotate here. The assumption is that the renderers
   // which uses GPU to render should be able to rotate themselves.
-  DCHECK(!GetNativeHandle());
+  RTC_DCHECK(!GetNativeHandle());
 
   if (rotated_frame_) {
     return rotated_frame_.get();
diff --git a/talk/media/webrtc/webrtcvoiceengine.cc b/talk/media/webrtc/webrtcvoiceengine.cc
index b01bfab..add831d 100644
--- a/talk/media/webrtc/webrtcvoiceengine.cc
+++ b/talk/media/webrtc/webrtcvoiceengine.cc
@@ -331,7 +331,7 @@
   if (IsCodec(*voe_codec, kG722CodecName)) {
     // If the ASSERT triggers, the codec definition in WebRTC VoiceEngine
     // has changed, and this special case is no longer needed.
-    DCHECK(voe_codec->plfreq != new_plfreq);
+    RTC_DCHECK(voe_codec->plfreq != new_plfreq);
     voe_codec->plfreq = new_plfreq;
   }
 }
@@ -493,14 +493,14 @@
   }
 
   // Test to see if the media processor was deregistered properly
-  DCHECK(SignalRxMediaFrame.is_empty());
-  DCHECK(SignalTxMediaFrame.is_empty());
+  RTC_DCHECK(SignalRxMediaFrame.is_empty());
+  RTC_DCHECK(SignalTxMediaFrame.is_empty());
 
   tracing_->SetTraceCallback(NULL);
 }
 
 bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) {
-  DCHECK(worker_thread == rtc::Thread::Current());
+  RTC_DCHECK(worker_thread == rtc::Thread::Current());
   LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
   bool res = InitInternal();
   if (res) {
@@ -1071,7 +1071,7 @@
 }
 
 bool WebRtcVoiceEngine::SetOutputVolume(int level) {
-  DCHECK(level >= 0 && level <= 255);
+  RTC_DCHECK(level >= 0 && level <= 255);
   if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
     LOG_RTCERR1(SetSpeakerVolume, level);
     return false;
@@ -1304,7 +1304,7 @@
   LOG(LS_WARNING) << "VoiceEngine error " << err_code << " reported on channel "
                   << channel_num << ".";
   if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
-    DCHECK(channel != NULL);
+    RTC_DCHECK(channel != NULL);
     channel->OnError(ssrc, err_code);
   } else {
     LOG(LS_ERROR) << "VoiceEngine channel " << channel_num
@@ -1314,13 +1314,13 @@
 
 bool WebRtcVoiceEngine::FindChannelAndSsrc(
     int channel_num, WebRtcVoiceMediaChannel** channel, uint32* ssrc) const {
-  DCHECK(channel != NULL && ssrc != NULL);
+  RTC_DCHECK(channel != NULL && ssrc != NULL);
 
   *channel = NULL;
   *ssrc = 0;
   // Find corresponding channel and ssrc
   for (WebRtcVoiceMediaChannel* ch : channels_) {
-    DCHECK(ch != NULL);
+    RTC_DCHECK(ch != NULL);
     if (ch->FindSsrc(channel_num, ssrc)) {
       *channel = ch;
       return true;
@@ -1334,13 +1334,13 @@
 // obtain the voice engine's channel number.
 bool WebRtcVoiceEngine::FindChannelNumFromSsrc(
     uint32 ssrc, MediaProcessorDirection direction, int* channel_num) {
-  DCHECK(channel_num != NULL);
-  DCHECK(direction == MPD_RX || direction == MPD_TX);
+  RTC_DCHECK(channel_num != NULL);
+  RTC_DCHECK(direction == MPD_RX || direction == MPD_TX);
 
   *channel_num = -1;
   // Find corresponding channel for ssrc.
   for (const WebRtcVoiceMediaChannel* ch : channels_) {
-    DCHECK(ch != NULL);
+    RTC_DCHECK(ch != NULL);
     if (direction & MPD_RX) {
       *channel_num = ch->GetReceiveChannelNum(ssrc);
     }
@@ -1622,9 +1622,9 @@
   // TODO(xians): Make sure Start() is called only once.
   void Start(AudioRenderer* renderer) {
     rtc::CritScope lock(&lock_);
-    DCHECK(renderer != NULL);
+    RTC_DCHECK(renderer != NULL);
     if (renderer_ != NULL) {
-      DCHECK(renderer_ == renderer);
+      RTC_DCHECK(renderer_ == renderer);
       return;
     }
 
@@ -1708,7 +1708,7 @@
   engine->RegisterChannel(this);
   LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
                   << voe_channel();
-  DCHECK(nullptr != call);
+  RTC_DCHECK(nullptr != call);
   ConfigureSendChannel(voe_channel());
 }
 
@@ -1727,7 +1727,7 @@
   while (!receive_channels_.empty()) {
     RemoveRecvStream(receive_channels_.begin()->first);
   }
-  DCHECK(receive_streams_.empty());
+  RTC_DCHECK(receive_streams_.empty());
 
   // Delete the default channel.
   DeleteChannel(voe_channel());
@@ -2365,7 +2365,7 @@
       return false;
     }
   } else {  // SEND_NOTHING
-    DCHECK(send == SEND_NOTHING);
+    RTC_DCHECK(send == SEND_NOTHING);
     if (engine()->voe()->base()->StopSend(channel) == -1) {
       LOG_RTCERR1(StopSend, channel);
       return false;
@@ -2532,7 +2532,7 @@
 }
 
 bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   rtc::CritScope lock(&receive_channels_cs_);
 
   if (!VERIFY(sp.ssrcs.size() == 1))
@@ -2549,7 +2549,7 @@
     return false;
   }
 
-  DCHECK(receive_stream_params_.find(ssrc) == receive_stream_params_.end());
+  RTC_DCHECK(receive_stream_params_.find(ssrc) == receive_stream_params_.end());
 
   // Reuse default channel for recv stream in non-conference mode call
   // when the default channel is not being used.
@@ -2662,7 +2662,7 @@
 }
 
 bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   rtc::CritScope lock(&receive_channels_cs_);
   ChannelMap::iterator it = receive_channels_.find(ssrc);
   if (it == receive_channels_.end()) {
@@ -2682,7 +2682,7 @@
   receive_channels_.erase(it);
 
   if (ssrc == default_receive_ssrc_) {
-    DCHECK(IsDefaultChannel(channel));
+    RTC_DCHECK(IsDefaultChannel(channel));
     // Recycle the default channel is for recv stream.
     if (playout_)
       SetPlayout(voe_channel(), false);
@@ -2963,7 +2963,7 @@
 
 void WebRtcVoiceMediaChannel::OnPacketReceived(
     rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
   // Forward packet to Call as well.
   const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp,
@@ -3005,7 +3005,7 @@
 
 void WebRtcVoiceMediaChannel::OnRtcpReceived(
     rtc::Buffer* packet, const rtc::PacketTime& packet_time) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
   // Forward packet to Call as well.
   const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp,
@@ -3325,15 +3325,15 @@
 
 void WebRtcVoiceMediaChannel::GetLastMediaError(
     uint32* ssrc, VoiceMediaChannel::Error* error) {
-  DCHECK(ssrc != NULL);
-  DCHECK(error != NULL);
+  RTC_DCHECK(ssrc != NULL);
+  RTC_DCHECK(error != NULL);
   FindSsrc(voe_channel(), ssrc);
   *error = WebRtcErrorToChannelError(GetLastEngineError());
 }
 
 bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
   rtc::CritScope lock(&receive_channels_cs_);
-  DCHECK(ssrc != NULL);
+  RTC_DCHECK(ssrc != NULL);
   if (channel_num == -1 && send_ != SEND_NOTHING) {
     // Sometimes the VoiceEngine core will throw error with channel_num = -1.
     // This means the error is not limited to a specific channel.  Signal the
@@ -3544,7 +3544,7 @@
 }
 
 void WebRtcVoiceMediaChannel::RecreateAudioReceiveStreams() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   for (const auto& it : receive_channels_) {
     RemoveAudioReceiveStream(it.first);
   }
@@ -3554,10 +3554,10 @@
 }
 
 void WebRtcVoiceMediaChannel::AddAudioReceiveStream(uint32 ssrc) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   WebRtcVoiceChannelRenderer* channel = receive_channels_[ssrc];
-  DCHECK(channel != nullptr);
-  DCHECK(receive_streams_.find(ssrc) == receive_streams_.end());
+  RTC_DCHECK(channel != nullptr);
+  RTC_DCHECK(receive_streams_.find(ssrc) == receive_streams_.end());
   webrtc::AudioReceiveStream::Config config;
   config.rtp.remote_ssrc = ssrc;
   // Only add RTP extensions if we support combined A/V BWE.
@@ -3571,7 +3571,7 @@
 }
 
 void WebRtcVoiceMediaChannel::RemoveAudioReceiveStream(uint32 ssrc) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   auto stream_it = receive_streams_.find(ssrc);
   if (stream_it != receive_streams_.end()) {
     call_->DestroyAudioReceiveStream(stream_it->second);
diff --git a/talk/media/webrtc/webrtcvoiceengine_unittest.cc b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
index 27a9c02..5fcdf5b 100644
--- a/talk/media/webrtc/webrtcvoiceengine_unittest.cc
+++ b/talk/media/webrtc/webrtcvoiceengine_unittest.cc
@@ -97,7 +97,7 @@
    public:
     explicit ChannelErrorListener(cricket::VoiceMediaChannel* channel)
         : ssrc_(0), error_(cricket::VoiceMediaChannel::ERROR_NONE) {
-      DCHECK(channel != NULL);
+      RTC_DCHECK(channel != NULL);
       channel->SignalMediaError.connect(
           this, &ChannelErrorListener::OnVoiceChannelError);
     }
diff --git a/talk/session/media/channelmanager_unittest.cc b/talk/session/media/channelmanager_unittest.cc
index e48fd74..71493c8 100644
--- a/talk/session/media/channelmanager_unittest.cc
+++ b/talk/session/media/channelmanager_unittest.cc
@@ -54,7 +54,7 @@
 class FakeMediaController : public webrtc::MediaControllerInterface {
  public:
   explicit FakeMediaController(webrtc::Call* call) : call_(call) {
-    DCHECK(nullptr != call);
+    RTC_DCHECK(nullptr != call);
   }
   ~FakeMediaController() override {}
   webrtc::Call* call_w() override { return call_; }
diff --git a/webrtc/base/asyncinvoker.cc b/webrtc/base/asyncinvoker.cc
index ee53e04..563ccb7 100644
--- a/webrtc/base/asyncinvoker.cc
+++ b/webrtc/base/asyncinvoker.cc
@@ -96,7 +96,7 @@
 void GuardedAsyncInvoker::ThreadDestroyed() {
   rtc::CritScope cs(&crit_);
   // We should never get more than one notification about the thread dying.
-  DCHECK(thread_ != nullptr);
+  RTC_DCHECK(thread_ != nullptr);
   thread_ = nullptr;
 }
 
diff --git a/webrtc/base/bitbuffer.cc b/webrtc/base/bitbuffer.cc
index cd36613..e8f69cb 100644
--- a/webrtc/base/bitbuffer.cc
+++ b/webrtc/base/bitbuffer.cc
@@ -19,14 +19,14 @@
 
 // Returns the lowest (right-most) |bit_count| bits in |byte|.
 uint8_t LowestBits(uint8_t byte, size_t bit_count) {
-  DCHECK_LE(bit_count, 8u);
+  RTC_DCHECK_LE(bit_count, 8u);
   return byte & ((1 << bit_count) - 1);
 }
 
 // Returns the highest (left-most) |bit_count| bits in |byte|, shifted to the
 // lowest bits (to the right).
 uint8_t HighestBits(uint8_t byte, size_t bit_count) {
-  DCHECK_LE(bit_count, 8u);
+  RTC_DCHECK_LE(bit_count, 8u);
   uint8_t shift = 8 - static_cast<uint8_t>(bit_count);
   uint8_t mask = 0xFF << shift;
   return (byte & mask) >> shift;
@@ -44,9 +44,9 @@
                          size_t source_bit_count,
                          uint8_t target,
                          size_t target_bit_offset) {
-  DCHECK(target_bit_offset < 8);
-  DCHECK(source_bit_count < 9);
-  DCHECK(source_bit_count <= (8 - target_bit_offset));
+  RTC_DCHECK(target_bit_offset < 8);
+  RTC_DCHECK(source_bit_count < 9);
+  RTC_DCHECK(source_bit_count <= (8 - target_bit_offset));
   // Generate a mask for just the bits we're going to overwrite, so:
   uint8_t mask =
       // The number of bits we want, in the most significant bits...
@@ -75,8 +75,8 @@
 
 BitBuffer::BitBuffer(const uint8_t* bytes, size_t byte_count)
     : bytes_(bytes), byte_count_(byte_count), byte_offset_(), bit_offset_() {
-  DCHECK(static_cast<uint64_t>(byte_count_) <=
-         std::numeric_limits<uint32_t>::max());
+  RTC_DCHECK(static_cast<uint64_t>(byte_count_) <=
+             std::numeric_limits<uint32_t>::max());
 }
 
 uint64_t BitBuffer::RemainingBitCount() const {
@@ -88,7 +88,7 @@
   if (!ReadBits(&bit_val, sizeof(uint8_t) * 8)) {
     return false;
   }
-  DCHECK(bit_val <= std::numeric_limits<uint8_t>::max());
+  RTC_DCHECK(bit_val <= std::numeric_limits<uint8_t>::max());
   *val = static_cast<uint8_t>(bit_val);
   return true;
 }
@@ -98,7 +98,7 @@
   if (!ReadBits(&bit_val, sizeof(uint16_t) * 8)) {
     return false;
   }
-  DCHECK(bit_val <= std::numeric_limits<uint16_t>::max());
+  RTC_DCHECK(bit_val <= std::numeric_limits<uint16_t>::max());
   *val = static_cast<uint16_t>(bit_val);
   return true;
 }
@@ -173,14 +173,14 @@
   }
 
   // We should either be at the end of the stream, or the next bit should be 1.
-  DCHECK(!PeekBits(&peeked_bit, 1) || peeked_bit == 1);
+  RTC_DCHECK(!PeekBits(&peeked_bit, 1) || peeked_bit == 1);
 
   // The bit count of the value is the number of zeros + 1. Make sure that many
   // bits fits in a uint32_t and that we have enough bits left for it, and then
   // read the value.
   size_t value_bit_count = zero_bit_count + 1;
   if (value_bit_count > 32 || !ReadBits(val, value_bit_count)) {
-    CHECK(Seek(original_byte_offset, original_bit_offset));
+    RTC_CHECK(Seek(original_byte_offset, original_bit_offset));
     return false;
   }
   *val -= 1;
@@ -189,8 +189,8 @@
 
 void BitBuffer::GetCurrentOffset(
     size_t* out_byte_offset, size_t* out_bit_offset) {
-  CHECK(out_byte_offset != NULL);
-  CHECK(out_bit_offset != NULL);
+  RTC_CHECK(out_byte_offset != NULL);
+  RTC_CHECK(out_bit_offset != NULL);
   *out_byte_offset = byte_offset_;
   *out_bit_offset = bit_offset_;
 }
diff --git a/webrtc/base/checks.cc b/webrtc/base/checks.cc
index b85af1e..49a31f2 100644
--- a/webrtc/base/checks.cc
+++ b/webrtc/base/checks.cc
@@ -109,9 +109,6 @@
           << file << ", line " << line << std::endl << "# ";
 }
 
-// Refer to comments in checks.h.
-#ifndef WEBRTC_CHROMIUM_BUILD
-
 // MSVC doesn't like complex extern templates and DLLs.
 #if !defined(COMPILER_MSVC)
 // Explicit instantiations for commonly used comparisons.
@@ -127,6 +124,4 @@
     const std::string&, const std::string&, const char* name);
 #endif
 
-#endif  // WEBRTC_CHROMIUM_BUILD
-
 }  // namespace rtc
diff --git a/webrtc/base/checks.h b/webrtc/base/checks.h
index 5215868..ad0954d 100644
--- a/webrtc/base/checks.h
+++ b/webrtc/base/checks.h
@@ -25,50 +25,46 @@
 
 // The macros here print a message to stderr and abort under various
 // conditions. All will accept additional stream messages. For example:
-// DCHECK_EQ(foo, bar) << "I'm printed when foo != bar.";
+// RTC_DCHECK_EQ(foo, bar) << "I'm printed when foo != bar.";
 //
-// - CHECK(x) is an assertion that x is always true, and that if it isn't, it's
-//   better to terminate the process than to continue. During development, the
-//   reason that it's better to terminate might simply be that the error
+// - RTC_CHECK(x) is an assertion that x is always true, and that if it isn't,
+//   it's better to terminate the process than to continue. During development,
+//   the reason that it's better to terminate might simply be that the error
 //   handling code isn't in place yet; in production, the reason might be that
 //   the author of the code truly believes that x will always be true, but that
 //   she recognizes that if she is wrong, abrupt and unpleasant process
 //   termination is still better than carrying on with the assumption violated.
 //
-//   CHECK always evaluates its argument, so it's OK for x to have side
+//   RTC_CHECK always evaluates its argument, so it's OK for x to have side
 //   effects.
 //
-// - DCHECK(x) is the same as CHECK(x)---an assertion that x is always
+// - RTC_DCHECK(x) is the same as RTC_CHECK(x)---an assertion that x is always
 //   true---except that x will only be evaluated in debug builds; in production
 //   builds, x is simply assumed to be true. This is useful if evaluating x is
 //   expensive and the expected cost of failing to detect the violated
 //   assumption is acceptable. You should not handle cases where a production
 //   build fails to spot a violated condition, even those that would result in
 //   crashes. If the code needs to cope with the error, make it cope, but don't
-//   call DCHECK; if the condition really can't occur, but you'd sleep better
-//   at night knowing that the process will suicide instead of carrying on in
-//   case you were wrong, use CHECK instead of DCHECK.
+//   call RTC_DCHECK; if the condition really can't occur, but you'd sleep
+//   better at night knowing that the process will suicide instead of carrying
+//   on in case you were wrong, use RTC_CHECK instead of RTC_DCHECK.
 //
-//   DCHECK only evaluates its argument in debug builds, so if x has visible
+//   RTC_DCHECK only evaluates its argument in debug builds, so if x has visible
 //   side effects, you need to write e.g.
-//     bool w = x; DCHECK(w);
+//     bool w = x; RTC_DCHECK(w);
 //
-// - CHECK_EQ, _NE, _GT, ..., and DCHECK_EQ, _NE, _GT, ... are specialized
-//   variants of CHECK and DCHECK that print prettier messages if the condition
-//   doesn't hold. Prefer them to raw CHECK and DCHECK.
+// - RTC_CHECK_EQ, _NE, _GT, ..., and RTC_DCHECK_EQ, _NE, _GT, ... are
+//   specialized variants of RTC_CHECK and RTC_DCHECK that print prettier
+//   messages if the condition doesn't hold. Prefer them to raw RTC_CHECK and
+//   RTC_DCHECK.
 //
 // - FATAL() aborts unconditionally.
 
 namespace rtc {
 
-// The use of overrides/webrtc/base/logging.h in a Chromium build results in
-// redefined macro errors. Fortunately, Chromium's macros can be used as drop-in
-// replacements for the standalone versions.
-#ifndef WEBRTC_CHROMIUM_BUILD
-
 // Helper macro which avoids evaluating the arguments to a stream if
 // the condition doesn't hold.
-#define LAZY_STREAM(stream, condition)                                        \
+#define RTC_LAZY_STREAM(stream, condition)                                    \
   !(condition) ? static_cast<void>(0) : rtc::FatalMessageVoidify() & (stream)
 
 // The actual stream used isn't important. We reference condition in the code
@@ -76,30 +72,30 @@
 // in a particularly convoluted way with an extra ?: because that appears to be
 // the simplest construct that keeps Visual Studio from complaining about
 // condition being unused).
-#define EAT_STREAM_PARAMETERS(condition) \
-  (true ? true : !(condition))           \
-      ? static_cast<void>(0)             \
+#define RTC_EAT_STREAM_PARAMETERS(condition) \
+  (true ? true : !(condition))               \
+      ? static_cast<void>(0)                 \
       : rtc::FatalMessageVoidify() & rtc::FatalMessage("", 0).stream()
 
-// CHECK dies with a fatal error if condition is not true.  It is *not*
+// RTC_CHECK dies with a fatal error if condition is not true.  It is *not*
 // controlled by NDEBUG, so the check will be executed regardless of
 // compilation mode.
 //
-// We make sure CHECK et al. always evaluates their arguments, as
-// doing CHECK(FunctionWithSideEffect()) is a common idiom.
-#define CHECK(condition)                                                    \
-  LAZY_STREAM(rtc::FatalMessage(__FILE__, __LINE__).stream(), !(condition)) \
-  << "Check failed: " #condition << std::endl << "# "
+// We make sure RTC_CHECK et al. always evaluates their arguments, as
+// doing RTC_CHECK(FunctionWithSideEffect()) is a common idiom.
+#define RTC_CHECK(condition)                                      \
+  RTC_LAZY_STREAM(rtc::FatalMessage(__FILE__, __LINE__).stream(), \
+                  !(condition))                                   \
+      << "Check failed: " #condition << std::endl << "# "
 
 // Helper macro for binary operators.
-// Don't use this macro directly in your code, use CHECK_EQ et al below.
+// Don't use this macro directly in your code, use RTC_CHECK_EQ et al below.
 //
 // TODO(akalin): Rewrite this so that constructs like if (...)
-// CHECK_EQ(...) else { ... } work properly.
-#define CHECK_OP(name, op, val1, val2)                      \
-  if (std::string* _result =                                \
-      rtc::Check##name##Impl((val1), (val2),                \
-                             #val1 " " #op " " #val2))      \
+// RTC_CHECK_EQ(...) else { ... } work properly.
+#define RTC_CHECK_OP(name, op, val1, val2)                                 \
+  if (std::string* _result =                                               \
+          rtc::Check##name##Impl((val1), (val2), #val1 " " #op " " #val2)) \
     rtc::FatalMessage(__FILE__, __LINE__, _result).stream()
 
 // Build the error message string.  This is separate from the "Impl"
@@ -134,55 +130,59 @@
     const std::string&, const std::string&, const char* name);
 #endif
 
-// Helper functions for CHECK_OP macro.
+// Helper functions for RTC_CHECK_OP macro.
 // The (int, int) specialization works around the issue that the compiler
 // will not instantiate the template version of the function on values of
 // unnamed enum type - see comment below.
-#define DEFINE_CHECK_OP_IMPL(name, op) \
-  template <class t1, class t2> \
-  inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
-                                        const char* names) { \
-    if (v1 op v2) return NULL; \
-    else return rtc::MakeCheckOpString(v1, v2, names); \
-  } \
+#define DEFINE_RTC_CHECK_OP_IMPL(name, op)                                   \
+  template <class t1, class t2>                                              \
+  inline std::string* Check##name##Impl(const t1& v1, const t2& v2,          \
+                                        const char* names) {                 \
+    if (v1 op v2)                                                            \
+      return NULL;                                                           \
+    else                                                                     \
+      return rtc::MakeCheckOpString(v1, v2, names);                          \
+  }                                                                          \
   inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
-    if (v1 op v2) return NULL; \
-    else return rtc::MakeCheckOpString(v1, v2, names); \
+    if (v1 op v2)                                                            \
+      return NULL;                                                           \
+    else                                                                     \
+      return rtc::MakeCheckOpString(v1, v2, names);                          \
   }
-DEFINE_CHECK_OP_IMPL(EQ, ==)
-DEFINE_CHECK_OP_IMPL(NE, !=)
-DEFINE_CHECK_OP_IMPL(LE, <=)
-DEFINE_CHECK_OP_IMPL(LT, < )
-DEFINE_CHECK_OP_IMPL(GE, >=)
-DEFINE_CHECK_OP_IMPL(GT, > )
-#undef DEFINE_CHECK_OP_IMPL
+DEFINE_RTC_CHECK_OP_IMPL(EQ, ==)
+DEFINE_RTC_CHECK_OP_IMPL(NE, !=)
+DEFINE_RTC_CHECK_OP_IMPL(LE, <=)
+DEFINE_RTC_CHECK_OP_IMPL(LT, < )
+DEFINE_RTC_CHECK_OP_IMPL(GE, >=)
+DEFINE_RTC_CHECK_OP_IMPL(GT, > )
+#undef DEFINE_RTC_CHECK_OP_IMPL
 
-#define CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2)
-#define CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2)
-#define CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2)
-#define CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2)
-#define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
-#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
+#define RTC_CHECK_EQ(val1, val2) RTC_CHECK_OP(EQ, ==, val1, val2)
+#define RTC_CHECK_NE(val1, val2) RTC_CHECK_OP(NE, !=, val1, val2)
+#define RTC_CHECK_LE(val1, val2) RTC_CHECK_OP(LE, <=, val1, val2)
+#define RTC_CHECK_LT(val1, val2) RTC_CHECK_OP(LT, < , val1, val2)
+#define RTC_CHECK_GE(val1, val2) RTC_CHECK_OP(GE, >=, val1, val2)
+#define RTC_CHECK_GT(val1, val2) RTC_CHECK_OP(GT, > , val1, val2)
 
-// The DCHECK macro is equivalent to CHECK except that it only generates code
-// in debug builds. It does reference the condition parameter in all cases,
+// The RTC_DCHECK macro is equivalent to RTC_CHECK except that it only generates
+// code in debug builds. It does reference the condition parameter in all cases,
 // though, so callers won't risk getting warnings about unused variables.
 #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
-#define DCHECK(condition) CHECK(condition)
-#define DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
-#define DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
-#define DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
-#define DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
-#define DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
-#define DCHECK_GT(v1, v2) CHECK_GT(v1, v2)
+#define RTC_DCHECK(condition) RTC_CHECK(condition)
+#define RTC_DCHECK_EQ(v1, v2) RTC_CHECK_EQ(v1, v2)
+#define RTC_DCHECK_NE(v1, v2) RTC_CHECK_NE(v1, v2)
+#define RTC_DCHECK_LE(v1, v2) RTC_CHECK_LE(v1, v2)
+#define RTC_DCHECK_LT(v1, v2) RTC_CHECK_LT(v1, v2)
+#define RTC_DCHECK_GE(v1, v2) RTC_CHECK_GE(v1, v2)
+#define RTC_DCHECK_GT(v1, v2) RTC_CHECK_GT(v1, v2)
 #else
-#define DCHECK(condition) EAT_STREAM_PARAMETERS(condition)
-#define DCHECK_EQ(v1, v2) EAT_STREAM_PARAMETERS((v1) == (v2))
-#define DCHECK_NE(v1, v2) EAT_STREAM_PARAMETERS((v1) != (v2))
-#define DCHECK_LE(v1, v2) EAT_STREAM_PARAMETERS((v1) <= (v2))
-#define DCHECK_LT(v1, v2) EAT_STREAM_PARAMETERS((v1) < (v2))
-#define DCHECK_GE(v1, v2) EAT_STREAM_PARAMETERS((v1) >= (v2))
-#define DCHECK_GT(v1, v2) EAT_STREAM_PARAMETERS((v1) > (v2))
+#define RTC_DCHECK(condition) RTC_EAT_STREAM_PARAMETERS(condition)
+#define RTC_DCHECK_EQ(v1, v2) RTC_EAT_STREAM_PARAMETERS((v1) == (v2))
+#define RTC_DCHECK_NE(v1, v2) RTC_EAT_STREAM_PARAMETERS((v1) != (v2))
+#define RTC_DCHECK_LE(v1, v2) RTC_EAT_STREAM_PARAMETERS((v1) <= (v2))
+#define RTC_DCHECK_LT(v1, v2) RTC_EAT_STREAM_PARAMETERS((v1) < (v2))
+#define RTC_DCHECK_GE(v1, v2) RTC_EAT_STREAM_PARAMETERS((v1) >= (v2))
+#define RTC_DCHECK_GT(v1, v2) RTC_EAT_STREAM_PARAMETERS((v1) > (v2))
 #endif
 
 // This is identical to LogMessageVoidify but in name.
@@ -194,13 +194,11 @@
   void operator&(std::ostream&) { }
 };
 
-#endif  // WEBRTC_CHROMIUM_BUILD
-
 #define RTC_UNREACHABLE_CODE_HIT false
-#define RTC_NOTREACHED() DCHECK(RTC_UNREACHABLE_CODE_HIT)
+#define RTC_NOTREACHED() RTC_DCHECK(RTC_UNREACHABLE_CODE_HIT)
 
 #define FATAL() rtc::FatalMessage(__FILE__, __LINE__).stream()
-// TODO(ajm): Consider adding NOTIMPLEMENTED and NOTREACHED macros when
+// TODO(ajm): Consider adding RTC_NOTIMPLEMENTED macro when
 // base/logging.h and system_wrappers/logging.h are consolidated such that we
 // can match the Chromium behavior.
 
@@ -208,7 +206,7 @@
 class FatalMessage {
  public:
   FatalMessage(const char* file, int line);
-  // Used for CHECK_EQ(), etc. Takes ownership of the given string.
+  // Used for RTC_CHECK_EQ(), etc. Takes ownership of the given string.
   FatalMessage(const char* file, int line, std::string* result);
   NO_RETURN ~FatalMessage();
 
@@ -224,7 +222,7 @@
 // remainder is zero.
 template <typename T>
 inline T CheckedDivExact(T a, T b) {
-  CHECK_EQ(a % b, static_cast<T>(0));
+  RTC_CHECK_EQ(a % b, static_cast<T>(0));
   return a / b;
 }
 
diff --git a/webrtc/base/criticalsection.cc b/webrtc/base/criticalsection.cc
index 4f3a28f..851635d 100644
--- a/webrtc/base/criticalsection.cc
+++ b/webrtc/base/criticalsection.cc
@@ -43,10 +43,10 @@
   pthread_mutex_lock(&mutex_);
 #if CS_DEBUG_CHECKS
   if (!recursion_count_) {
-    DCHECK(!thread_);
+    RTC_DCHECK(!thread_);
     thread_ = pthread_self();
   } else {
-    DCHECK(CurrentThreadIsOwner());
+    RTC_DCHECK(CurrentThreadIsOwner());
   }
   ++recursion_count_;
 #endif
@@ -61,10 +61,10 @@
     return false;
 #if CS_DEBUG_CHECKS
   if (!recursion_count_) {
-    DCHECK(!thread_);
+    RTC_DCHECK(!thread_);
     thread_ = pthread_self();
   } else {
-    DCHECK(CurrentThreadIsOwner());
+    RTC_DCHECK(CurrentThreadIsOwner());
   }
   ++recursion_count_;
 #endif
@@ -72,13 +72,13 @@
 #endif
 }
 void CriticalSection::Leave() UNLOCK_FUNCTION() {
-  DCHECK(CurrentThreadIsOwner());
+  RTC_DCHECK(CurrentThreadIsOwner());
 #if defined(WEBRTC_WIN)
   LeaveCriticalSection(&crit_);
 #else
 #if CS_DEBUG_CHECKS
   --recursion_count_;
-  DCHECK(recursion_count_ >= 0);
+  RTC_DCHECK(recursion_count_ >= 0);
   if (!recursion_count_)
     thread_ = 0;
 #endif
@@ -119,7 +119,7 @@
 }
 
 TryCritScope::~TryCritScope() {
-  CS_DEBUG_CODE(DCHECK(lock_was_called_));
+  CS_DEBUG_CODE(RTC_DCHECK(lock_was_called_));
   if (locked_)
     cs_->Leave();
 }
@@ -145,7 +145,7 @@
 
 void GlobalLockPod::Unlock() {
   int old_value = AtomicOps::CompareAndSwap(&lock_acquired, 1, 0);
-  DCHECK_EQ(1, old_value) << "Unlock called without calling Lock first";
+  RTC_DCHECK_EQ(1, old_value) << "Unlock called without calling Lock first";
 }
 
 GlobalLock::GlobalLock() {
diff --git a/webrtc/base/criticalsection.h b/webrtc/base/criticalsection.h
index 241d611..ddbf857 100644
--- a/webrtc/base/criticalsection.h
+++ b/webrtc/base/criticalsection.h
@@ -50,9 +50,9 @@
   bool TryEnter() EXCLUSIVE_TRYLOCK_FUNCTION(true);
   void Leave() UNLOCK_FUNCTION();
 
-  // Use only for DCHECKing.
+  // Use only for RTC_DCHECKing.
   bool CurrentThreadIsOwner() const;
-  // Use only for DCHECKing.
+  // Use only for RTC_DCHECKing.
   bool IsLocked() const;
 
  private:
diff --git a/webrtc/base/event.cc b/webrtc/base/event.cc
index 999db38..a9af208 100644
--- a/webrtc/base/event.cc
+++ b/webrtc/base/event.cc
@@ -31,7 +31,7 @@
                                 manual_reset,
                                 initially_signaled,
                                 NULL);                // Name.
-  CHECK(event_handle_);
+  RTC_CHECK(event_handle_);
 }
 
 Event::~Event() {
@@ -56,8 +56,8 @@
 Event::Event(bool manual_reset, bool initially_signaled)
     : is_manual_reset_(manual_reset),
       event_status_(initially_signaled) {
-  CHECK(pthread_mutex_init(&event_mutex_, NULL) == 0);
-  CHECK(pthread_cond_init(&event_cond_, NULL) == 0);
+  RTC_CHECK(pthread_mutex_init(&event_mutex_, NULL) == 0);
+  RTC_CHECK(pthread_cond_init(&event_cond_, NULL) == 0);
 }
 
 Event::~Event() {
diff --git a/webrtc/base/filerotatingstream.cc b/webrtc/base/filerotatingstream.cc
index f2a6def..65dfd63 100644
--- a/webrtc/base/filerotatingstream.cc
+++ b/webrtc/base/filerotatingstream.cc
@@ -37,8 +37,8 @@
                          max_file_size,
                          num_files,
                          kWrite) {
-  DCHECK_GT(max_file_size, 0u);
-  DCHECK_GT(num_files, 1u);
+  RTC_DCHECK_GT(max_file_size, 0u);
+  RTC_DCHECK_GT(num_files, 1u);
 }
 
 FileRotatingStream::FileRotatingStream(const std::string& dir_path,
@@ -55,7 +55,7 @@
       rotation_index_(0),
       current_bytes_written_(0),
       disable_buffering_(false) {
-  DCHECK(Filesystem::IsFolder(dir_path));
+  RTC_DCHECK(Filesystem::IsFolder(dir_path));
   switch (mode) {
     case kWrite: {
       file_names_.clear();
@@ -94,7 +94,7 @@
                                       size_t buffer_len,
                                       size_t* read,
                                       int* error) {
-  DCHECK(buffer);
+  RTC_DCHECK(buffer);
   if (mode_ != kRead) {
     return SR_EOS;
   }
@@ -152,7 +152,7 @@
     return SR_ERROR;
   }
   // Write as much as will fit in to the current file.
-  DCHECK_LT(current_bytes_written_, max_file_size_);
+  RTC_DCHECK_LT(current_bytes_written_, max_file_size_);
   size_t remaining_bytes = max_file_size_ - current_bytes_written_;
   size_t write_length = std::min(data_len, remaining_bytes);
   size_t local_written = 0;
@@ -164,7 +164,7 @@
 
   // If we're done with this file, rotate it out.
   if (current_bytes_written_ >= max_file_size_) {
-    DCHECK_EQ(current_bytes_written_, max_file_size_);
+    RTC_DCHECK_EQ(current_bytes_written_, max_file_size_);
     RotateFiles();
   }
   return result;
@@ -183,7 +183,7 @@
     // potential buffering.
     return false;
   }
-  DCHECK(size);
+  RTC_DCHECK(size);
   *size = 0;
   size_t total_size = 0;
   for (auto file_name : file_names_) {
@@ -232,7 +232,7 @@
 }
 
 std::string FileRotatingStream::GetFilePath(size_t index) const {
-  DCHECK_LT(index, file_names_.size());
+  RTC_DCHECK_LT(index, file_names_.size());
   return file_names_[index];
 }
 
@@ -240,7 +240,7 @@
   CloseCurrentFile();
 
   // Opens the appropriate file in the appropriate mode.
-  DCHECK_LT(current_file_index_, file_names_.size());
+  RTC_DCHECK_LT(current_file_index_, file_names_.size());
   std::string file_path = file_names_[current_file_index_];
   file_stream_.reset(new FileStream());
   const char* mode = nullptr;
@@ -248,7 +248,7 @@
     case kWrite:
       mode = "w+";
       // We should always we writing to the zero-th file.
-      DCHECK_EQ(current_file_index_, 0u);
+      RTC_DCHECK_EQ(current_file_index_, 0u);
       break;
     case kRead:
       mode = "r";
@@ -276,12 +276,12 @@
 }
 
 void FileRotatingStream::RotateFiles() {
-  DCHECK_EQ(mode_, kWrite);
+  RTC_DCHECK_EQ(mode_, kWrite);
   CloseCurrentFile();
   // Rotates the files by deleting the file at |rotation_index_|, which is the
   // oldest file and then renaming the newer files to have an incremented index.
   // See header file comments for example.
-  DCHECK_LE(rotation_index_, file_names_.size());
+  RTC_DCHECK_LE(rotation_index_, file_names_.size());
   std::string file_to_delete = file_names_[rotation_index_];
   if (Filesystem::IsFile(file_to_delete)) {
     if (!Filesystem::DeleteFile(file_to_delete)) {
@@ -325,13 +325,13 @@
 
 std::string FileRotatingStream::GetFilePath(size_t index,
                                             size_t num_files) const {
-  DCHECK_LT(index, num_files);
+  RTC_DCHECK_LT(index, num_files);
   std::ostringstream file_name;
   // The format will be "_%<num_digits>zu". We want to zero pad the index so
   // that it will sort nicely.
   size_t max_digits = ((num_files - 1) / 10) + 1;
   size_t num_digits = (index / 10) + 1;
-  DCHECK_LE(num_digits, max_digits);
+  RTC_DCHECK_LE(num_digits, max_digits);
   size_t padding = max_digits - num_digits;
 
   file_name << file_prefix_ << "_";
@@ -360,7 +360,7 @@
                          GetNumRotatingLogFiles(max_total_log_size) + 1),
       max_total_log_size_(max_total_log_size),
       num_rotations_(0) {
-  DCHECK_GE(max_total_log_size, 4u);
+  RTC_DCHECK_GE(max_total_log_size, 4u);
 }
 
 const char* CallSessionFileRotatingStream::kLogPrefix = "webrtc_log";
diff --git a/webrtc/base/flags.cc b/webrtc/base/flags.cc
index a5e1c45..0c0f449 100644
--- a/webrtc/base/flags.cc
+++ b/webrtc/base/flags.cc
@@ -163,7 +163,7 @@
     if (*arg == '=') {
       // make a copy so we can NUL-terminate flag name
       int n = static_cast<int>(arg - *name);
-      CHECK_LT(n, buffer_size);
+      RTC_CHECK_LT(n, buffer_size);
       memcpy(buffer, *name, n * sizeof(char));
       buffer[n] = '\0';
       *name = buffer;
@@ -257,7 +257,8 @@
 
 void FlagList::Register(Flag* flag) {
   assert(flag != NULL && strlen(flag->name()) > 0);
-  CHECK(!Lookup(flag->name())) << "flag " << flag->name() << " declared twice";
+  RTC_CHECK(!Lookup(flag->name())) << "flag " << flag->name()
+                                   << " declared twice";
   flag->next_ = list_;
   list_ = flag;
 }
diff --git a/webrtc/base/logsinks.cc b/webrtc/base/logsinks.cc
index 4968339..5a6db45 100644
--- a/webrtc/base/logsinks.cc
+++ b/webrtc/base/logsinks.cc
@@ -29,7 +29,7 @@
 
 FileRotatingLogSink::FileRotatingLogSink(FileRotatingStream* stream)
     : stream_(stream) {
-  DCHECK(stream);
+  RTC_DCHECK(stream);
 }
 
 FileRotatingLogSink::~FileRotatingLogSink() {
diff --git a/webrtc/base/network.cc b/webrtc/base/network.cc
index c011c1f..bc7d505 100644
--- a/webrtc/base/network.cc
+++ b/webrtc/base/network.cc
@@ -123,7 +123,7 @@
     case ADAPTER_TYPE_LOOPBACK:
       return "Loopback";
     default:
-      DCHECK(false) << "Invalid type " << type;
+      RTC_DCHECK(false) << "Invalid type " << type;
       return std::string();
   }
 }
diff --git a/webrtc/base/platform_thread.cc b/webrtc/base/platform_thread.cc
index 973f7f7..4167392 100644
--- a/webrtc/base/platform_thread.cc
+++ b/webrtc/base/platform_thread.cc
@@ -37,7 +37,7 @@
   ret = reinterpret_cast<pid_t>(pthread_self());
 #endif
 #endif  // defined(WEBRTC_POSIX)
-  DCHECK(ret);
+  RTC_DCHECK(ret);
   return ret;
 }
 
@@ -58,7 +58,7 @@
 }
 
 void SetCurrentThreadName(const char* name) {
-  DCHECK(strlen(name) < 64);
+  RTC_DCHECK(strlen(name) < 64);
 #if defined(WEBRTC_WIN)
   struct {
     DWORD dwType;
diff --git a/webrtc/base/ratetracker.cc b/webrtc/base/ratetracker.cc
index 7dcdb91..57906f7 100644
--- a/webrtc/base/ratetracker.cc
+++ b/webrtc/base/ratetracker.cc
@@ -26,8 +26,8 @@
     sample_buckets_(new size_t[bucket_count + 1]),
     total_sample_count_(0u),
     bucket_start_time_milliseconds_(~0u) {
-  CHECK(bucket_milliseconds > 0u);
-  CHECK(bucket_count > 0u);
+  RTC_CHECK(bucket_milliseconds > 0u);
+  RTC_CHECK(bucket_count > 0u);
 }
 
 RateTracker::~RateTracker() {
diff --git a/webrtc/base/rtccertificate.cc b/webrtc/base/rtccertificate.cc
index 5279fd4..d912eb4 100644
--- a/webrtc/base/rtccertificate.cc
+++ b/webrtc/base/rtccertificate.cc
@@ -22,7 +22,7 @@
 
 RTCCertificate::RTCCertificate(SSLIdentity* identity)
     : identity_(identity) {
-  DCHECK(identity_);
+  RTC_DCHECK(identity_);
 }
 
 RTCCertificate::~RTCCertificate() {
diff --git a/webrtc/base/safe_conversions.h b/webrtc/base/safe_conversions.h
index 7fc67cb..51239bc 100644
--- a/webrtc/base/safe_conversions.h
+++ b/webrtc/base/safe_conversions.h
@@ -32,13 +32,13 @@
 // overflow or underflow. NaN source will always trigger a CHECK.
 template <typename Dst, typename Src>
 inline Dst checked_cast(Src value) {
-  CHECK(IsValueInRangeForNumericType<Dst>(value));
+  RTC_CHECK(IsValueInRangeForNumericType<Dst>(value));
   return static_cast<Dst>(value);
 }
 
 // saturated_cast<> is analogous to static_cast<> for numeric types, except
 // that the specified numeric conversion will saturate rather than overflow or
-// underflow. NaN assignment to an integral will trigger a CHECK condition.
+// underflow. NaN assignment to an integral will trigger a RTC_CHECK condition.
 template <typename Dst, typename Src>
 inline Dst saturated_cast(Src value) {
   // Optimization for floating point values, which already saturate.
diff --git a/webrtc/base/stringencode.cc b/webrtc/base/stringencode.cc
index c48c526..2930e57 100644
--- a/webrtc/base/stringencode.cc
+++ b/webrtc/base/stringencode.cc
@@ -26,7 +26,7 @@
 size_t escape(char * buffer, size_t buflen,
               const char * source, size_t srclen,
               const char * illegal, char escape) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   if (buflen <= 0)
     return 0;
 
@@ -48,7 +48,7 @@
 size_t unescape(char * buffer, size_t buflen,
                 const char * source, size_t srclen,
                 char escape) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   if (buflen <= 0)
     return 0;
 
@@ -67,7 +67,7 @@
 size_t encode(char * buffer, size_t buflen,
               const char * source, size_t srclen,
               const char * illegal, char escape) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   if (buflen <= 0)
     return 0;
 
@@ -119,8 +119,8 @@
 #if defined(WEBRTC_WIN)
   return "\\/:*?\"<>|";
 #else  // !WEBRTC_WIN
-  // TODO
-  DCHECK(false);
+  // TODO(grunell): Should this never be reached?
+  RTC_DCHECK(false);
   return "";
 #endif  // !WEBRTC_WIN
 }
@@ -257,7 +257,7 @@
 
 size_t html_encode(char * buffer, size_t buflen,
                    const char * source, size_t srclen) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   if (buflen <= 0)
     return 0;
 
@@ -275,7 +275,7 @@
           case '\'': escseq = "&#39;";  esclen = 5; break;
           case '\"': escseq = "&quot;"; esclen = 6; break;
           case '&':  escseq = "&amp;";  esclen = 5; break;
-          default: DCHECK(false);
+          default: RTC_DCHECK(false);
         }
         if (bufpos + esclen >= buflen) {
           break;
@@ -310,13 +310,13 @@
 
 size_t html_decode(char * buffer, size_t buflen,
                    const char * source, size_t srclen) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   return xml_decode(buffer, buflen, source, srclen);
 }
 
 size_t xml_encode(char * buffer, size_t buflen,
                   const char * source, size_t srclen) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   if (buflen <= 0)
     return 0;
 
@@ -332,7 +332,7 @@
         case '\'': escseq = "&apos;"; esclen = 6; break;
         case '\"': escseq = "&quot;"; esclen = 6; break;
         case '&':  escseq = "&amp;";  esclen = 5; break;
-        default: DCHECK(false);
+        default: RTC_DCHECK(false);
       }
       if (bufpos + esclen >= buflen) {
         break;
@@ -349,7 +349,7 @@
 
 size_t xml_decode(char * buffer, size_t buflen,
                   const char * source, size_t srclen) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   if (buflen <= 0)
     return 0;
 
@@ -385,7 +385,7 @@
         srcpos += 1;
       }
       char * ptr;
-      // TODO: Fix hack (ptr may go past end of data)
+      // TODO(grunell): Fix hack (ptr may go past end of data)
       unsigned long val = strtoul(source + srcpos + 1, &ptr, int_base);
       if ((static_cast<size_t>(ptr - source) < srclen) && (*ptr == ';')) {
         srcpos = ptr - source + 1;
@@ -411,7 +411,7 @@
 static const char HEX[] = "0123456789abcdef";
 
 char hex_encode(unsigned char val) {
-  DCHECK_LT(val, 16);
+  RTC_DCHECK_LT(val, 16);
   return (val < 16) ? HEX[val] : '!';
 }
 
@@ -436,7 +436,7 @@
 size_t hex_encode_with_delimiter(char* buffer, size_t buflen,
                                  const char* csource, size_t srclen,
                                  char delimiter) {
-  DCHECK(buffer);  // TODO: estimate output size
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
   if (buflen == 0)
     return 0;
 
@@ -480,7 +480,7 @@
   char* buffer = STACK_ARRAY(char, kBufferSize);
   size_t length = hex_encode_with_delimiter(buffer, kBufferSize,
                                             source, srclen, delimiter);
-  DCHECK(srclen == 0 || length > 0);
+  RTC_DCHECK(srclen == 0 || length > 0);
   return std::string(buffer, length);
 }
 
@@ -492,7 +492,7 @@
 size_t hex_decode_with_delimiter(char* cbuffer, size_t buflen,
                                  const char* source, size_t srclen,
                                  char delimiter) {
-  DCHECK(cbuffer);  // TODO: estimate output size
+  RTC_DCHECK(cbuffer);  // TODO(grunell): estimate output size
   if (buflen == 0)
     return 0;
 
@@ -556,7 +556,7 @@
 
 size_t tokenize(const std::string& source, char delimiter,
                 std::vector<std::string>* fields) {
-  DCHECK(fields);
+  RTC_DCHECK(fields);
   fields->clear();
   size_t last = 0;
   for (size_t i = 0; i < source.length(); ++i) {
@@ -634,7 +634,7 @@
 
 size_t split(const std::string& source, char delimiter,
              std::vector<std::string>* fields) {
-  DCHECK(fields);
+  RTC_DCHECK(fields);
   fields->clear();
   size_t last = 0;
   for (size_t i = 0; i < source.length(); ++i) {
diff --git a/webrtc/base/stringencode.h b/webrtc/base/stringencode.h
index 356844c..0b9ed0e 100644
--- a/webrtc/base/stringencode.h
+++ b/webrtc/base/stringencode.h
@@ -176,7 +176,7 @@
 
 template <class T>
 static bool ToString(const T &t, std::string* s) {
-  DCHECK(s);
+  RTC_DCHECK(s);
   std::ostringstream oss;
   oss << std::boolalpha << t;
   *s = oss.str();
@@ -185,7 +185,7 @@
 
 template <class T>
 static bool FromString(const std::string& s, T* t) {
-  DCHECK(t);
+  RTC_DCHECK(t);
   std::istringstream iss(s);
   iss >> std::boolalpha >> *t;
   return !iss.fail();
diff --git a/webrtc/base/stringutils.cc b/webrtc/base/stringutils.cc
index cb99c25..868e475 100644
--- a/webrtc/base/stringutils.cc
+++ b/webrtc/base/stringutils.cc
@@ -57,7 +57,7 @@
     if (n-- == 0) return 0;
     c1 = transformation(*s1);
     // Double check that characters are not UTF-8
-    DCHECK_LT(static_cast<unsigned char>(*s2), 128);
+    RTC_DCHECK_LT(static_cast<unsigned char>(*s2), 128);
     // Note: *s2 gets implicitly promoted to wchar_t
     c2 = transformation(*s2);
     if (c1 != c2) return (c1 < c2) ? -1 : 1;
@@ -80,7 +80,7 @@
 #if _DEBUG
   // Double check that characters are not UTF-8
   for (size_t pos = 0; pos < srclen; ++pos)
-    DCHECK_LT(static_cast<unsigned char>(source[pos]), 128);
+    RTC_DCHECK_LT(static_cast<unsigned char>(source[pos]), 128);
 #endif  // _DEBUG
   std::copy(source, source + srclen, buffer);
   buffer[srclen] = 0;
diff --git a/webrtc/base/thread_checker.h b/webrtc/base/thread_checker.h
index eee9315..6cd7d7b 100644
--- a/webrtc/base/thread_checker.h
+++ b/webrtc/base/thread_checker.h
@@ -18,10 +18,10 @@
 // with this define will get the same level of thread checking as
 // debug bots.
 //
-// Note that this does not perfectly match situations where DCHECK is
+// Note that this does not perfectly match situations where RTC_DCHECK is
 // enabled.  For example a non-official release build may have
 // DCHECK_ALWAYS_ON undefined (and therefore ThreadChecker would be
-// disabled) but have DCHECKs enabled at runtime.
+// disabled) but have RTC_DCHECKs enabled at runtime.
 #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
 #define ENABLE_THREAD_CHECKER 1
 #else
@@ -67,7 +67,7 @@
 // class MyClass {
 //  public:
 //   void Foo() {
-//     DCHECK(thread_checker_.CalledOnValidThread());
+//     RTC_DCHECK(thread_checker_.CalledOnValidThread());
 //     ... (do stuff) ...
 //   }
 //
diff --git a/webrtc/base/thread_checker_impl.h b/webrtc/base/thread_checker_impl.h
index 835c53e..7b39ada 100644
--- a/webrtc/base/thread_checker_impl.h
+++ b/webrtc/base/thread_checker_impl.h
@@ -19,7 +19,7 @@
 namespace rtc {
 
 // Real implementation of ThreadChecker, for use in debug mode, or
-// for temporary use in release mode (e.g. to CHECK on a threading issue
+// for temporary use in release mode (e.g. to RTC_CHECK on a threading issue
 // seen only in the wild).
 //
 // Note: You should almost always use the ThreadChecker class to get the
diff --git a/webrtc/base/thread_checker_unittest.cc b/webrtc/base/thread_checker_unittest.cc
index a193248..bcffb52 100644
--- a/webrtc/base/thread_checker_unittest.cc
+++ b/webrtc/base/thread_checker_unittest.cc
@@ -37,9 +37,7 @@
   ThreadCheckerClass() {}
 
   // Verifies that it was called on the same thread as the constructor.
-  void DoStuff() {
-    DCHECK(CalledOnValidThread());
-  }
+  void DoStuff() { RTC_DCHECK(CalledOnValidThread()); }
 
   void DetachFromThread() {
     ThreadChecker::DetachFromThread();
diff --git a/webrtc/base/timeutils.cc b/webrtc/base/timeutils.cc
index 64dae2f..ffaf326 100644
--- a/webrtc/base/timeutils.cc
+++ b/webrtc/base/timeutils.cc
@@ -42,7 +42,7 @@
     // Get the timebase if this is the first time we run.
     // Recommended by Apple's QA1398.
     if (mach_timebase_info(&timebase) != KERN_SUCCESS) {
-      DCHECK(false);
+      RTC_DCHECK(false);
     }
   }
   // Use timebase to convert absolute time tick units into nanoseconds.
@@ -136,8 +136,8 @@
 }
 
 uint32 TimeAfter(int32 elapsed) {
-  DCHECK_GE(elapsed, 0);
-  DCHECK_LT(static_cast<uint32>(elapsed), HALF);
+  RTC_DCHECK_GE(elapsed, 0);
+  RTC_DCHECK_LT(static_cast<uint32>(elapsed), HALF);
   return Time() + elapsed;
 }
 
diff --git a/webrtc/base/virtualsocketserver.cc b/webrtc/base/virtualsocketserver.cc
index a9ca98b..4568bf2 100644
--- a/webrtc/base/virtualsocketserver.cc
+++ b/webrtc/base/virtualsocketserver.cc
@@ -1115,7 +1115,7 @@
   return IPAddress();
 }
 void VirtualSocketServer::SetDefaultRoute(const IPAddress& from_addr) {
-  DCHECK(!IPIsAny(from_addr));
+  RTC_DCHECK(!IPIsAny(from_addr));
   if (from_addr.family() == AF_INET) {
     default_route_v4_ = from_addr;
   } else if (from_addr.family() == AF_INET6) {
diff --git a/webrtc/common_audio/audio_converter.cc b/webrtc/common_audio/audio_converter.cc
index 624c38d..07e5c6b 100644
--- a/webrtc/common_audio/audio_converter.cc
+++ b/webrtc/common_audio/audio_converter.cc
@@ -106,7 +106,7 @@
  public:
   CompositionConverter(ScopedVector<AudioConverter> converters)
       : converters_(converters.Pass()) {
-    CHECK_GE(converters_.size(), 2u);
+    RTC_CHECK_GE(converters_.size(), 2u);
     // We need an intermediate buffer after every converter.
     for (auto it = converters_.begin(); it != converters_.end() - 1; ++it)
       buffers_.push_back(new ChannelBuffer<float>((*it)->dst_frames(),
@@ -188,12 +188,13 @@
       src_frames_(src_frames),
       dst_channels_(dst_channels),
       dst_frames_(dst_frames) {
-  CHECK(dst_channels == src_channels || dst_channels == 1 || src_channels == 1);
+  RTC_CHECK(dst_channels == src_channels || dst_channels == 1 ||
+            src_channels == 1);
 }
 
 void AudioConverter::CheckSizes(size_t src_size, size_t dst_capacity) const {
-  CHECK_EQ(src_size, src_channels() * src_frames());
-  CHECK_GE(dst_capacity, dst_channels() * dst_frames());
+  RTC_CHECK_EQ(src_size, src_channels() * src_frames());
+  RTC_CHECK_GE(dst_capacity, dst_channels() * dst_frames());
 }
 
 }  // namespace webrtc
diff --git a/webrtc/common_audio/audio_converter.h b/webrtc/common_audio/audio_converter.h
index c6fe08e..7d1513b 100644
--- a/webrtc/common_audio/audio_converter.h
+++ b/webrtc/common_audio/audio_converter.h
@@ -49,7 +49,7 @@
   AudioConverter(int src_channels, size_t src_frames, int dst_channels,
                  size_t dst_frames);
 
-  // Helper to CHECK that inputs are correctly sized.
+  // Helper to RTC_CHECK that inputs are correctly sized.
   void CheckSizes(size_t src_size, size_t dst_capacity) const;
 
  private:
diff --git a/webrtc/common_audio/audio_ring_buffer.cc b/webrtc/common_audio/audio_ring_buffer.cc
index 13cf36b..a29e53a 100644
--- a/webrtc/common_audio/audio_ring_buffer.cc
+++ b/webrtc/common_audio/audio_ring_buffer.cc
@@ -30,19 +30,19 @@
 
 void AudioRingBuffer::Write(const float* const* data, size_t channels,
                             size_t frames) {
-  DCHECK_EQ(buffers_.size(), channels);
+  RTC_DCHECK_EQ(buffers_.size(), channels);
   for (size_t i = 0; i < channels; ++i) {
     const size_t written = WebRtc_WriteBuffer(buffers_[i], data[i], frames);
-    CHECK_EQ(written, frames);
+    RTC_CHECK_EQ(written, frames);
   }
 }
 
 void AudioRingBuffer::Read(float* const* data, size_t channels, size_t frames) {
-  DCHECK_EQ(buffers_.size(), channels);
+  RTC_DCHECK_EQ(buffers_.size(), channels);
   for (size_t i = 0; i < channels; ++i) {
     const size_t read =
         WebRtc_ReadBuffer(buffers_[i], nullptr, data[i], frames);
-    CHECK_EQ(read, frames);
+    RTC_CHECK_EQ(read, frames);
   }
 }
 
@@ -60,7 +60,7 @@
   for (auto buf : buffers_) {
     const size_t moved =
         static_cast<size_t>(WebRtc_MoveReadPtr(buf, static_cast<int>(frames)));
-    CHECK_EQ(moved, frames);
+    RTC_CHECK_EQ(moved, frames);
   }
 }
 
@@ -68,7 +68,7 @@
   for (auto buf : buffers_) {
     const size_t moved = static_cast<size_t>(
         -WebRtc_MoveReadPtr(buf, -static_cast<int>(frames)));
-    CHECK_EQ(moved, frames);
+    RTC_CHECK_EQ(moved, frames);
   }
 }
 
diff --git a/webrtc/common_audio/blocker.cc b/webrtc/common_audio/blocker.cc
index 359e881..0133550 100644
--- a/webrtc/common_audio/blocker.cc
+++ b/webrtc/common_audio/blocker.cc
@@ -118,8 +118,8 @@
       window_(new float[block_size_]),
       shift_amount_(shift_amount),
       callback_(callback) {
-  CHECK_LE(num_output_channels_, num_input_channels_);
-  CHECK_LE(shift_amount_, block_size_);
+  RTC_CHECK_LE(num_output_channels_, num_input_channels_);
+  RTC_CHECK_LE(shift_amount_, block_size_);
 
   memcpy(window_.get(), window, block_size_ * sizeof(*window_.get()));
   input_buffer_.MoveReadPositionBackward(initial_delay_);
@@ -169,9 +169,9 @@
                            int num_input_channels,
                            int num_output_channels,
                            float* const* output) {
-  CHECK_EQ(chunk_size, chunk_size_);
-  CHECK_EQ(num_input_channels, num_input_channels_);
-  CHECK_EQ(num_output_channels, num_output_channels_);
+  RTC_CHECK_EQ(chunk_size, chunk_size_);
+  RTC_CHECK_EQ(num_input_channels, num_input_channels_);
+  RTC_CHECK_EQ(num_output_channels, num_output_channels_);
 
   input_buffer_.Write(input, num_input_channels, chunk_size_);
   size_t first_frame_in_block = frame_offset_;
diff --git a/webrtc/common_audio/channel_buffer.h b/webrtc/common_audio/channel_buffer.h
index 00ea733..6050090 100644
--- a/webrtc/common_audio/channel_buffer.h
+++ b/webrtc/common_audio/channel_buffer.h
@@ -75,7 +75,7 @@
   // 0 <= channel < |num_channels_|
   // 0 <= sample < |num_frames_per_band_|
   const T* const* channels(size_t band) const {
-    DCHECK_LT(band, num_bands_);
+    RTC_DCHECK_LT(band, num_bands_);
     return &channels_[band * num_channels_];
   }
   T* const* channels(size_t band) {
@@ -91,8 +91,8 @@
   // 0 <= band < |num_bands_|
   // 0 <= sample < |num_frames_per_band_|
   const T* const* bands(int channel) const {
-    DCHECK_LT(channel, num_channels_);
-    DCHECK_GE(channel, 0);
+    RTC_DCHECK_LT(channel, num_channels_);
+    RTC_DCHECK_GE(channel, 0);
     return &bands_[channel * num_bands_];
   }
   T* const* bands(int channel) {
@@ -103,7 +103,7 @@
   // Sets the |slice| pointers to the |start_frame| position for each channel.
   // Returns |slice| for convenience.
   const T* const* Slice(T** slice, size_t start_frame) const {
-    DCHECK_LT(start_frame, num_frames_);
+    RTC_DCHECK_LT(start_frame, num_frames_);
     for (int i = 0; i < num_channels_; ++i)
       slice[i] = &channels_[i][start_frame];
     return slice;
@@ -120,7 +120,7 @@
   size_t size() const {return num_frames_ * num_channels_; }
 
   void SetDataForTesting(const T* data, size_t size) {
-    CHECK_EQ(size, this->size());
+    RTC_CHECK_EQ(size, this->size());
     memcpy(data_.get(), data, size * sizeof(*data));
   }
 
diff --git a/webrtc/common_audio/include/audio_util.h b/webrtc/common_audio/include/audio_util.h
index d8e1ce3..2c0028c 100644
--- a/webrtc/common_audio/include/audio_util.h
+++ b/webrtc/common_audio/include/audio_util.h
@@ -154,8 +154,8 @@
                                   size_t num_frames,
                                   int num_channels,
                                   T* deinterleaved) {
-  DCHECK_GT(num_channels, 0);
-  DCHECK_GT(num_frames, 0u);
+  RTC_DCHECK_GT(num_channels, 0);
+  RTC_DCHECK_GT(num_frames, 0u);
 
   const T* const end = interleaved + num_frames * num_channels;
 
diff --git a/webrtc/common_audio/lapped_transform.cc b/webrtc/common_audio/lapped_transform.cc
index 525450d..c01f1d9 100644
--- a/webrtc/common_audio/lapped_transform.cc
+++ b/webrtc/common_audio/lapped_transform.cc
@@ -24,9 +24,9 @@
                                                int num_input_channels,
                                                int num_output_channels,
                                                float* const* output) {
-  CHECK_EQ(num_input_channels, parent_->num_in_channels_);
-  CHECK_EQ(num_output_channels, parent_->num_out_channels_);
-  CHECK_EQ(parent_->block_length_, num_frames);
+  RTC_CHECK_EQ(num_input_channels, parent_->num_in_channels_);
+  RTC_CHECK_EQ(num_output_channels, parent_->num_out_channels_);
+  RTC_CHECK_EQ(parent_->block_length_, num_frames);
 
   for (int i = 0; i < num_input_channels; ++i) {
     memcpy(parent_->real_buf_.Row(i), input[i],
@@ -37,7 +37,7 @@
 
   size_t block_length = RealFourier::ComplexLength(
       RealFourier::FftOrder(num_frames));
-  CHECK_EQ(parent_->cplx_length_, block_length);
+  RTC_CHECK_EQ(parent_->cplx_length_, block_length);
   parent_->block_processor_->ProcessAudioBlock(parent_->cplx_pre_.Array(),
                                                num_input_channels,
                                                parent_->cplx_length_,
@@ -83,13 +83,13 @@
       cplx_post_(num_out_channels,
                  cplx_length_,
                  RealFourier::kFftBufferAlignment) {
-  CHECK(num_in_channels_ > 0 && num_out_channels_ > 0);
-  CHECK_GT(block_length_, 0u);
-  CHECK_GT(chunk_length_, 0u);
-  CHECK(block_processor_);
+  RTC_CHECK(num_in_channels_ > 0 && num_out_channels_ > 0);
+  RTC_CHECK_GT(block_length_, 0u);
+  RTC_CHECK_GT(chunk_length_, 0u);
+  RTC_CHECK(block_processor_);
 
   // block_length_ power of 2?
-  CHECK_EQ(0u, block_length_ & (block_length_ - 1));
+  RTC_CHECK_EQ(0u, block_length_ & (block_length_ - 1));
 }
 
 void LappedTransform::ProcessChunk(const float* const* in_chunk,
diff --git a/webrtc/common_audio/lapped_transform_unittest.cc b/webrtc/common_audio/lapped_transform_unittest.cc
index 49751c0..f688cc2 100644
--- a/webrtc/common_audio/lapped_transform_unittest.cc
+++ b/webrtc/common_audio/lapped_transform_unittest.cc
@@ -29,7 +29,7 @@
                                  size_t frames,
                                  int out_channels,
                                  complex<float>* const* out_block) {
-    CHECK_EQ(in_channels, out_channels);
+    RTC_CHECK_EQ(in_channels, out_channels);
     for (int i = 0; i < out_channels; ++i) {
       memcpy(out_block[i], in_block[i], sizeof(**in_block) * frames);
     }
@@ -53,7 +53,7 @@
                                  size_t frames,
                                  int out_channels,
                                  complex<float>* const* out_block) {
-    CHECK_EQ(in_channels, out_channels);
+    RTC_CHECK_EQ(in_channels, out_channels);
 
     size_t full_length = (frames - 1) * 2;
     ++block_num_;
diff --git a/webrtc/common_audio/real_fourier.cc b/webrtc/common_audio/real_fourier.cc
index 29b704b..fef3c60 100644
--- a/webrtc/common_audio/real_fourier.cc
+++ b/webrtc/common_audio/real_fourier.cc
@@ -30,12 +30,12 @@
 }
 
 int RealFourier::FftOrder(size_t length) {
-  CHECK_GT(length, 0U);
+  RTC_CHECK_GT(length, 0U);
   return WebRtcSpl_GetSizeInBits(static_cast<uint32_t>(length - 1));
 }
 
 size_t RealFourier::FftLength(int order) {
-  CHECK_GE(order, 0);
+  RTC_CHECK_GE(order, 0);
   return static_cast<size_t>(1 << order);
 }
 
diff --git a/webrtc/common_audio/real_fourier_ooura.cc b/webrtc/common_audio/real_fourier_ooura.cc
index 1c4004d..8cd4c86 100644
--- a/webrtc/common_audio/real_fourier_ooura.cc
+++ b/webrtc/common_audio/real_fourier_ooura.cc
@@ -42,7 +42,7 @@
       // arrays on the first call.
       work_ip_(new size_t[ComputeWorkIpSize(length_)]()),
       work_w_(new float[complex_length_]()) {
-  CHECK_GE(fft_order, 1);
+  RTC_CHECK_GE(fft_order, 1);
 }
 
 void RealFourierOoura::Forward(const float* src, complex<float>* dest) const {
diff --git a/webrtc/common_audio/real_fourier_openmax.cc b/webrtc/common_audio/real_fourier_openmax.cc
index f7a0f64..bc3e734 100644
--- a/webrtc/common_audio/real_fourier_openmax.cc
+++ b/webrtc/common_audio/real_fourier_openmax.cc
@@ -23,19 +23,19 @@
 
 // Creates and initializes the Openmax state. Transfers ownership to caller.
 OMXFFTSpec_R_F32* CreateOpenmaxState(int order) {
-  CHECK_GE(order, 1);
+  RTC_CHECK_GE(order, 1);
   // The omx implementation uses this macro to check order validity.
-  CHECK_LE(order, TWIDDLE_TABLE_ORDER);
+  RTC_CHECK_LE(order, TWIDDLE_TABLE_ORDER);
 
   OMX_INT buffer_size;
   OMXResult r = omxSP_FFTGetBufSize_R_F32(order, &buffer_size);
-  CHECK_EQ(r, OMX_Sts_NoErr);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
 
   OMXFFTSpec_R_F32* omx_spec = malloc(buffer_size);
-  DCHECK(omx_spec);
+  RTC_DCHECK(omx_spec);
 
   r = omxSP_FFTInit_R_F32(omx_spec, order);
-  CHECK_EQ(r, OMX_Sts_NoErr);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
   return omx_spec;
 }
 
@@ -55,14 +55,14 @@
   // http://en.cppreference.com/w/cpp/numeric/complex
   OMXResult r =
       omxSP_FFTFwd_RToCCS_F32(src, reinterpret_cast<OMX_F32*>(dest), omx_spec_);
-  CHECK_EQ(r, OMX_Sts_NoErr);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
 }
 
 void RealFourierOpenmax::Inverse(const complex<float>* src, float* dest) const {
   OMXResult r =
       omxSP_FFTInv_CCSToR_F32(reinterpret_cast<const OMX_F32*>(src), dest,
                               omx_spec_);
-  CHECK_EQ(r, OMX_Sts_NoErr);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
 }
 
 }  // namespace webrtc
diff --git a/webrtc/common_audio/resampler/push_sinc_resampler.cc b/webrtc/common_audio/resampler/push_sinc_resampler.cc
index 72ed56b..a740423 100644
--- a/webrtc/common_audio/resampler/push_sinc_resampler.cc
+++ b/webrtc/common_audio/resampler/push_sinc_resampler.cc
@@ -50,8 +50,8 @@
                                    size_t source_length,
                                    float* destination,
                                    size_t destination_capacity) {
-  CHECK_EQ(source_length, resampler_->request_frames());
-  CHECK_GE(destination_capacity, destination_frames_);
+  RTC_CHECK_EQ(source_length, resampler_->request_frames());
+  RTC_CHECK_GE(destination_capacity, destination_frames_);
   // Cache the source pointer. Calling Resample() will immediately trigger
   // the Run() callback whereupon we provide the cached value.
   source_ptr_ = source;
@@ -81,7 +81,7 @@
 void PushSincResampler::Run(size_t frames, float* destination) {
   // Ensure we are only asked for the available samples. This would fail if
   // Run() was triggered more than once per Resample() call.
-  CHECK_EQ(source_available_, frames);
+  RTC_CHECK_EQ(source_available_, frames);
 
   if (first_pass_) {
     // Provide dummy input on the first pass, the output of which will be
diff --git a/webrtc/common_audio/resampler/sinc_resampler_unittest.cc b/webrtc/common_audio/resampler/sinc_resampler_unittest.cc
index 8bdcb25..206a174 100644
--- a/webrtc/common_audio/resampler/sinc_resampler_unittest.cc
+++ b/webrtc/common_audio/resampler/sinc_resampler_unittest.cc
@@ -163,8 +163,8 @@
 #endif
 
 // Benchmark for the various Convolve() methods.  Make sure to build with
-// branding=Chrome so that DCHECKs are compiled out when benchmarking.  Original
-// benchmarks were run with --convolve-iterations=50000000.
+// branding=Chrome so that RTC_DCHECKs are compiled out when benchmarking.
+// Original benchmarks were run with --convolve-iterations=50000000.
 TEST(SincResamplerTest, ConvolveBenchmark) {
   // Initialize a dummy resampler.
   MockSource mock_source;
diff --git a/webrtc/common_audio/sparse_fir_filter.cc b/webrtc/common_audio/sparse_fir_filter.cc
index 28bc013..5862b7c 100644
--- a/webrtc/common_audio/sparse_fir_filter.cc
+++ b/webrtc/common_audio/sparse_fir_filter.cc
@@ -22,8 +22,8 @@
       offset_(offset),
       nonzero_coeffs_(nonzero_coeffs, nonzero_coeffs + num_nonzero_coeffs),
       state_(sparsity_ * (num_nonzero_coeffs - 1) + offset_, 0.f) {
-  CHECK_GE(num_nonzero_coeffs, 1u);
-  CHECK_GE(sparsity, 1u);
+  RTC_CHECK_GE(num_nonzero_coeffs, 1u);
+  RTC_CHECK_GE(sparsity, 1u);
 }
 
 void SparseFIRFilter::Filter(const float* in, size_t length, float* out) {
diff --git a/webrtc/common_audio/vad/vad.cc b/webrtc/common_audio/vad/vad.cc
index 8973a68..95a162f 100644
--- a/webrtc/common_audio/vad/vad.cc
+++ b/webrtc/common_audio/vad/vad.cc
@@ -35,7 +35,7 @@
       case 1:
         return kActive;
       default:
-        DCHECK(false) << "WebRtcVad_Process returned an error.";
+        RTC_DCHECK(false) << "WebRtcVad_Process returned an error.";
         return kError;
     }
   }
@@ -44,9 +44,9 @@
     if (handle_)
       WebRtcVad_Free(handle_);
     handle_ = WebRtcVad_Create();
-    CHECK(handle_);
-    CHECK_EQ(WebRtcVad_Init(handle_), 0);
-    CHECK_EQ(WebRtcVad_set_mode(handle_, aggressiveness_), 0);
+    RTC_CHECK(handle_);
+    RTC_CHECK_EQ(WebRtcVad_Init(handle_), 0);
+    RTC_CHECK_EQ(WebRtcVad_set_mode(handle_, aggressiveness_), 0);
   }
 
  private:
diff --git a/webrtc/common_audio/vad/vad_unittest.cc b/webrtc/common_audio/vad/vad_unittest.cc
index ecc4734..a0e16b1 100644
--- a/webrtc/common_audio/vad/vad_unittest.cc
+++ b/webrtc/common_audio/vad/vad_unittest.cc
@@ -76,7 +76,7 @@
             WebRtcVad_Process(nullptr, kRates[0], speech, kFrameLengths[0]));
 
   // WebRtcVad_Create()
-  CHECK(handle);
+  RTC_CHECK(handle);
 
   // Not initialized tests
   EXPECT_EQ(-1, WebRtcVad_Process(handle, kRates[0], speech, kFrameLengths[0]));
diff --git a/webrtc/common_audio/wav_file.cc b/webrtc/common_audio/wav_file.cc
index a0c792c..8dae7d6 100644
--- a/webrtc/common_audio/wav_file.cc
+++ b/webrtc/common_audio/wav_file.cc
@@ -39,16 +39,16 @@
 
 WavReader::WavReader(const std::string& filename)
     : file_handle_(fopen(filename.c_str(), "rb")) {
-  CHECK(file_handle_ && "Could not open wav file for reading.");
+  RTC_CHECK(file_handle_ && "Could not open wav file for reading.");
 
   ReadableWavFile readable(file_handle_);
   WavFormat format;
   int bytes_per_sample;
-  CHECK(ReadWavHeader(&readable, &num_channels_, &sample_rate_, &format,
-                      &bytes_per_sample, &num_samples_));
+  RTC_CHECK(ReadWavHeader(&readable, &num_channels_, &sample_rate_, &format,
+                          &bytes_per_sample, &num_samples_));
   num_samples_remaining_ = num_samples_;
-  CHECK_EQ(kWavFormat, format);
-  CHECK_EQ(kBytesPerSample, bytes_per_sample);
+  RTC_CHECK_EQ(kWavFormat, format);
+  RTC_CHECK_EQ(kBytesPerSample, bytes_per_sample);
 }
 
 WavReader::~WavReader() {
@@ -65,8 +65,8 @@
   const size_t read =
       fread(samples, sizeof(*samples), num_samples, file_handle_);
   // If we didn't read what was requested, ensure we've reached the EOF.
-  CHECK(read == num_samples || feof(file_handle_));
-  CHECK_LE(read, num_samples_remaining_);
+  RTC_CHECK(read == num_samples || feof(file_handle_));
+  RTC_CHECK_LE(read, num_samples_remaining_);
   num_samples_remaining_ -= rtc::checked_cast<uint32_t>(read);
   return read;
 }
@@ -86,7 +86,7 @@
 }
 
 void WavReader::Close() {
-  CHECK_EQ(0, fclose(file_handle_));
+  RTC_CHECK_EQ(0, fclose(file_handle_));
   file_handle_ = NULL;
 }
 
@@ -96,17 +96,14 @@
       num_channels_(num_channels),
       num_samples_(0),
       file_handle_(fopen(filename.c_str(), "wb")) {
-  CHECK(file_handle_ && "Could not open wav file for writing.");
-  CHECK(CheckWavParameters(num_channels_,
-                           sample_rate_,
-                           kWavFormat,
-                           kBytesPerSample,
-                           num_samples_));
+  RTC_CHECK(file_handle_ && "Could not open wav file for writing.");
+  RTC_CHECK(CheckWavParameters(num_channels_, sample_rate_, kWavFormat,
+                               kBytesPerSample, num_samples_));
 
   // Write a blank placeholder header, since we need to know the total number
   // of samples before we can fill in the real data.
   static const uint8_t blank_header[kWavHeaderSize] = {0};
-  CHECK_EQ(1u, fwrite(blank_header, kWavHeaderSize, 1, file_handle_));
+  RTC_CHECK_EQ(1u, fwrite(blank_header, kWavHeaderSize, 1, file_handle_));
 }
 
 WavWriter::~WavWriter() {
@@ -119,10 +116,10 @@
 #endif
   const size_t written =
       fwrite(samples, sizeof(*samples), num_samples, file_handle_);
-  CHECK_EQ(num_samples, written);
+  RTC_CHECK_EQ(num_samples, written);
   num_samples_ += static_cast<uint32_t>(written);
-  CHECK(written <= std::numeric_limits<uint32_t>::max() ||
-        num_samples_ >= written);  // detect uint32_t overflow
+  RTC_CHECK(written <= std::numeric_limits<uint32_t>::max() ||
+            num_samples_ >= written);  // detect uint32_t overflow
 }
 
 void WavWriter::WriteSamples(const float* samples, size_t num_samples) {
@@ -136,12 +133,12 @@
 }
 
 void WavWriter::Close() {
-  CHECK_EQ(0, fseek(file_handle_, 0, SEEK_SET));
+  RTC_CHECK_EQ(0, fseek(file_handle_, 0, SEEK_SET));
   uint8_t header[kWavHeaderSize];
   WriteWavHeader(header, num_channels_, sample_rate_, kWavFormat,
                  kBytesPerSample, num_samples_);
-  CHECK_EQ(1u, fwrite(header, kWavHeaderSize, 1, file_handle_));
-  CHECK_EQ(0, fclose(file_handle_));
+  RTC_CHECK_EQ(1u, fwrite(header, kWavHeaderSize, 1, file_handle_));
+  RTC_CHECK_EQ(0, fclose(file_handle_));
   file_handle_ = NULL;
 }
 
diff --git a/webrtc/common_audio/wav_file.h b/webrtc/common_audio/wav_file.h
index 14a8a0e..2eadd3f 100644
--- a/webrtc/common_audio/wav_file.h
+++ b/webrtc/common_audio/wav_file.h
@@ -32,7 +32,7 @@
 };
 
 // Simple C++ class for writing 16-bit PCM WAV files. All error handling is
-// by calls to CHECK(), making it unsuitable for anything but debug code.
+// by calls to RTC_CHECK(), making it unsuitable for anything but debug code.
 class WavWriter final : public WavFile {
  public:
   // Open a new WAV file for writing.
diff --git a/webrtc/common_audio/wav_header.cc b/webrtc/common_audio/wav_header.cc
index fefbee0..61cfffe 100644
--- a/webrtc/common_audio/wav_header.cc
+++ b/webrtc/common_audio/wav_header.cc
@@ -151,8 +151,8 @@
                     WavFormat format,
                     int bytes_per_sample,
                     uint32_t num_samples) {
-  CHECK(CheckWavParameters(num_channels, sample_rate, format,
-                           bytes_per_sample, num_samples));
+  RTC_CHECK(CheckWavParameters(num_channels, sample_rate, format,
+                               bytes_per_sample, num_samples));
 
   WavHeader header;
   const uint32_t bytes_in_payload = bytes_per_sample * num_samples;
diff --git a/webrtc/common_audio/window_generator.cc b/webrtc/common_audio/window_generator.cc
index ae6cbc9..ab983b7 100644
--- a/webrtc/common_audio/window_generator.cc
+++ b/webrtc/common_audio/window_generator.cc
@@ -38,8 +38,8 @@
 namespace webrtc {
 
 void WindowGenerator::Hanning(int length, float* window) {
-  CHECK_GT(length, 1);
-  CHECK(window != nullptr);
+  RTC_CHECK_GT(length, 1);
+  RTC_CHECK(window != nullptr);
   for (int i = 0; i < length; ++i) {
     window[i] = 0.5f * (1 - cosf(2 * static_cast<float>(M_PI) * i /
                                  (length - 1)));
@@ -48,8 +48,8 @@
 
 void WindowGenerator::KaiserBesselDerived(float alpha, size_t length,
                                           float* window) {
-  CHECK_GT(length, 1U);
-  CHECK(window != nullptr);
+  RTC_CHECK_GT(length, 1U);
+  RTC_CHECK(window != nullptr);
 
   const size_t half = (length + 1) / 2;
   float sum = 0.0f;
diff --git a/webrtc/common_video/i420_buffer_pool.cc b/webrtc/common_video/i420_buffer_pool.cc
index cb1f4d4..c746666 100644
--- a/webrtc/common_video/i420_buffer_pool.cc
+++ b/webrtc/common_video/i420_buffer_pool.cc
@@ -32,7 +32,7 @@
   uint8_t* MutableData(webrtc::PlaneType type) override {
     // Make the HasOneRef() check here instead of in |buffer_|, because the pool
     // also has a reference to |buffer_|.
-    DCHECK(HasOneRef());
+    RTC_DCHECK(HasOneRef());
     return const_cast<uint8_t*>(buffer_->data(type));
   }
   int stride(webrtc::PlaneType type) const override {
@@ -64,7 +64,7 @@
 
 rtc::scoped_refptr<VideoFrameBuffer> I420BufferPool::CreateBuffer(int width,
                                                                   int height) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   // Release buffers with wrong resolution.
   for (auto it = buffers_.begin(); it != buffers_.end();) {
     if ((*it)->width() != width || (*it)->height() != height)
diff --git a/webrtc/common_video/video_frame.cc b/webrtc/common_video/video_frame.cc
index 0ebb983..7cdbd53 100644
--- a/webrtc/common_video/video_frame.cc
+++ b/webrtc/common_video/video_frame.cc
@@ -42,11 +42,11 @@
                                  int stride_u,
                                  int stride_v) {
   const int half_width = (width + 1) / 2;
-  DCHECK_GT(width, 0);
-  DCHECK_GT(height, 0);
-  DCHECK_GE(stride_y, width);
-  DCHECK_GE(stride_u, half_width);
-  DCHECK_GE(stride_v, half_width);
+  RTC_DCHECK_GT(width, 0);
+  RTC_DCHECK_GT(height, 0);
+  RTC_DCHECK_GE(stride_y, width);
+  RTC_DCHECK_GE(stride_u, half_width);
+  RTC_DCHECK_GE(stride_v, half_width);
 
   // Creating empty frame - reset all values.
   timestamp_ = 0;
@@ -195,7 +195,7 @@
 }
 
 VideoFrame VideoFrame::ConvertNativeToI420Frame() const {
-  DCHECK(native_handle());
+  RTC_DCHECK(native_handle());
   VideoFrame frame;
   frame.ShallowCopy(*this);
   frame.set_video_frame_buffer(video_frame_buffer_->NativeToI420Buffer());
diff --git a/webrtc/common_video/video_frame_buffer.cc b/webrtc/common_video/video_frame_buffer.cc
index 4c15958..36ee14a 100644
--- a/webrtc/common_video/video_frame_buffer.cc
+++ b/webrtc/common_video/video_frame_buffer.cc
@@ -48,11 +48,11 @@
       data_(static_cast<uint8_t*>(AlignedMalloc(
           stride_y * height + (stride_u + stride_v) * ((height + 1) / 2),
           kBufferAlignment))) {
-  DCHECK_GT(width, 0);
-  DCHECK_GT(height, 0);
-  DCHECK_GE(stride_y, width);
-  DCHECK_GE(stride_u, (width + 1) / 2);
-  DCHECK_GE(stride_v, (width + 1) / 2);
+  RTC_DCHECK_GT(width, 0);
+  RTC_DCHECK_GT(height, 0);
+  RTC_DCHECK_GE(stride_y, width);
+  RTC_DCHECK_GE(stride_u, (width + 1) / 2);
+  RTC_DCHECK_GE(stride_v, (width + 1) / 2);
 }
 
 I420Buffer::~I420Buffer() {
@@ -82,7 +82,7 @@
 }
 
 uint8_t* I420Buffer::MutableData(PlaneType type) {
-  DCHECK(HasOneRef());
+  RTC_DCHECK(HasOneRef());
   return const_cast<uint8_t*>(
       static_cast<const VideoFrameBuffer*>(this)->data(type));
 }
@@ -114,9 +114,9 @@
                                        int width,
                                        int height)
     : native_handle_(native_handle), width_(width), height_(height) {
-  DCHECK(native_handle != nullptr);
-  DCHECK_GT(width, 0);
-  DCHECK_GT(height, 0);
+  RTC_DCHECK(native_handle != nullptr);
+  RTC_DCHECK_GT(width, 0);
+  RTC_DCHECK_GT(height, 0);
 }
 
 int NativeHandleBuffer::width() const {
@@ -214,9 +214,9 @@
     const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
     int cropped_width,
     int cropped_height) {
-  CHECK(buffer->native_handle() == nullptr);
-  CHECK_LE(cropped_width, buffer->width());
-  CHECK_LE(cropped_height, buffer->height());
+  RTC_CHECK(buffer->native_handle() == nullptr);
+  RTC_CHECK_LE(cropped_width, buffer->width());
+  RTC_CHECK_LE(cropped_height, buffer->height());
   if (buffer->width() == cropped_width && buffer->height() == cropped_height)
     return buffer;
 
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.cc b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
index c0c20be..6d76300 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
@@ -26,11 +26,11 @@
                                                size_t num_samples_per_channel,
                                                size_t max_encoded_bytes,
                                                uint8_t* encoded) {
-  CHECK_EQ(num_samples_per_channel,
-           static_cast<size_t>(SampleRateHz() / 100));
+  RTC_CHECK_EQ(num_samples_per_channel,
+               static_cast<size_t>(SampleRateHz() / 100));
   EncodedInfo info =
       EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
-  CHECK_LE(info.encoded_bytes, max_encoded_bytes);
+  RTC_CHECK_LE(info.encoded_bytes, max_encoded_bytes);
   return info;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index 2fe58c9..1215246 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -24,9 +24,10 @@
     int sid_frame_interval_ms,
     int num_cng_coefficients) {
   rtc::scoped_ptr<CNG_enc_inst, CngInstDeleter> cng_inst;
-  CHECK_EQ(0, WebRtcCng_CreateEnc(cng_inst.accept()));
-  CHECK_EQ(0, WebRtcCng_InitEnc(cng_inst.get(), sample_rate_hz,
-                                sid_frame_interval_ms, num_cng_coefficients));
+  RTC_CHECK_EQ(0, WebRtcCng_CreateEnc(cng_inst.accept()));
+  RTC_CHECK_EQ(0,
+               WebRtcCng_InitEnc(cng_inst.get(), sample_rate_hz,
+                                 sid_frame_interval_ms, num_cng_coefficients));
   return cng_inst;
 }
 
@@ -56,7 +57,7 @@
       last_frame_active_(true),
       vad_(config.vad ? rtc_make_scoped_ptr(config.vad)
                       : CreateVad(config.vad_mode)) {
-  CHECK(config.IsOk()) << "Invalid configuration.";
+  RTC_CHECK(config.IsOk()) << "Invalid configuration.";
   cng_inst_ = CreateCngInst(SampleRateHz(), sid_frame_interval_ms_,
                             num_cng_coefficients_);
 }
@@ -99,10 +100,11 @@
     const int16_t* audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
-  CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
+  RTC_CHECK_GE(max_encoded_bytes,
+               static_cast<size_t>(num_cng_coefficients_ + 1));
   const size_t samples_per_10ms_frame = SamplesPer10msFrame();
-  CHECK_EQ(speech_buffer_.size(),
-           rtp_timestamps_.size() * samples_per_10ms_frame);
+  RTC_CHECK_EQ(speech_buffer_.size(),
+               rtp_timestamps_.size() * samples_per_10ms_frame);
   rtp_timestamps_.push_back(rtp_timestamp);
   for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
     speech_buffer_.push_back(audio[i]);
@@ -111,7 +113,7 @@
   if (rtp_timestamps_.size() < frames_to_encode) {
     return EncodedInfo();
   }
-  CHECK_LE(static_cast<int>(frames_to_encode * 10), kMaxFrameSizeMs)
+  RTC_CHECK_LE(static_cast<int>(frames_to_encode * 10), kMaxFrameSizeMs)
       << "Frame size cannot be larger than " << kMaxFrameSizeMs
       << " ms when using VAD/CNG.";
 
@@ -123,7 +125,7 @@
       (frames_to_encode > 3 ? 3 : frames_to_encode);
   if (frames_to_encode == 4)
     blocks_in_first_vad_call = 2;
-  CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
+  RTC_CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
   const size_t blocks_in_second_vad_call =
       frames_to_encode - blocks_in_first_vad_call;
 
@@ -206,7 +208,7 @@
   bool force_sid = last_frame_active_;
   bool output_produced = false;
   const size_t samples_per_10ms_frame = SamplesPer10msFrame();
-  CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
+  RTC_CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
   AudioEncoder::EncodedInfo info;
   for (size_t i = 0; i < frames_to_encode; ++i) {
     // It's important not to pass &info.encoded_bytes directly to
@@ -214,12 +216,13 @@
     // value, in which case we don't want to overwrite any value from an earlier
     // iteration.
     size_t encoded_bytes_tmp = 0;
-    CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
-                              &speech_buffer_[i * samples_per_10ms_frame],
-                              samples_per_10ms_frame,
-                              encoded, &encoded_bytes_tmp, force_sid), 0);
+    RTC_CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
+                                  &speech_buffer_[i * samples_per_10ms_frame],
+                                  samples_per_10ms_frame, encoded,
+                                  &encoded_bytes_tmp, force_sid),
+                 0);
     if (encoded_bytes_tmp > 0) {
-      CHECK(!output_produced);
+      RTC_CHECK(!output_produced);
       info.encoded_bytes = encoded_bytes_tmp;
       output_produced = true;
       force_sid = false;
@@ -243,9 +246,10 @@
         rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
         samples_per_10ms_frame, max_encoded_bytes, encoded);
     if (i + 1 == frames_to_encode) {
-      CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
+      RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
     } else {
-      CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";
+      RTC_CHECK_EQ(info.encoded_bytes, 0u)
+          << "Encoder delivered data too early.";
     }
   }
   return info;
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index f7812b3..dde3cc6 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -24,7 +24,7 @@
                            int frame_size_ms,
                            int sample_rate_hz) {
   int samples_per_frame = num_channels * frame_size_ms * sample_rate_hz / 1000;
-  CHECK_LE(samples_per_frame, std::numeric_limits<int16_t>::max())
+  RTC_CHECK_LE(samples_per_frame, std::numeric_limits<int16_t>::max())
       << "Frame size too large.";
   return static_cast<int16_t>(samples_per_frame);
 }
@@ -54,8 +54,8 @@
                                              config.frame_size_ms,
                                              sample_rate_hz_)),
       first_timestamp_in_buffer_(0) {
-  CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
-  CHECK_EQ(config.frame_size_ms % 10, 0)
+  RTC_CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
+  RTC_CHECK_EQ(config.frame_size_ms % 10, 0)
       << "Frame size must be an integer multiple of 10 ms.";
   speech_buffer_.reserve(full_frame_samples_);
 }
@@ -101,8 +101,8 @@
   if (speech_buffer_.size() < full_frame_samples_) {
     return EncodedInfo();
   }
-  CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
-  CHECK_GE(max_encoded_bytes, full_frame_samples_);
+  RTC_CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
+  RTC_CHECK_GE(max_encoded_bytes, full_frame_samples_);
   EncodedInfo info;
   info.encoded_timestamp = first_timestamp_in_buffer_;
   info.payload_type = payload_type_;
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index 6df5430..43b097f 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -45,7 +45,7 @@
       first_timestamp_in_buffer_(0),
       encoders_(new EncoderState[num_channels_]),
       interleave_buffer_(2 * num_channels_) {
-  CHECK(config.IsOk());
+  RTC_CHECK(config.IsOk());
   const size_t samples_per_channel =
       kSampleRateHz / 100 * num_10ms_frames_per_packet_;
   for (int i = 0; i < num_channels_; ++i) {
@@ -96,7 +96,7 @@
     const int16_t* audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
-  CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
+  RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
 
   if (num_10ms_frames_buffered_ == 0)
     first_timestamp_in_buffer_ = rtp_timestamp;
@@ -113,14 +113,14 @@
   }
 
   // Encode each channel separately.
-  CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+  RTC_CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
   num_10ms_frames_buffered_ = 0;
   const size_t samples_per_channel = SamplesPerChannel();
   for (int i = 0; i < num_channels_; ++i) {
     const size_t encoded = WebRtcG722_Encode(
         encoders_[i].encoder, encoders_[i].speech_buffer.get(),
         samples_per_channel, encoders_[i].encoded_buffer.data());
-    CHECK_EQ(encoded, samples_per_channel / 2);
+    RTC_CHECK_EQ(encoded, samples_per_channel / 2);
   }
 
   // Interleave the encoded bytes of the different channels. Each separate
@@ -146,15 +146,15 @@
 void AudioEncoderG722::Reset() {
   num_10ms_frames_buffered_ = 0;
   for (int i = 0; i < num_channels_; ++i)
-    CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
+    RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
 }
 
 AudioEncoderG722::EncoderState::EncoderState() {
-  CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
+  RTC_CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
 }
 
 AudioEncoderG722::EncoderState::~EncoderState() {
-  CHECK_EQ(0, WebRtcG722_FreeEncoder(encoder));
+  RTC_CHECK_EQ(0, WebRtcG722_FreeEncoder(encoder));
 }
 
 size_t AudioEncoderG722::SamplesPerChannel() const {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
index 619d686..998e10d 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
@@ -33,7 +33,7 @@
                                      int sample_rate_hz,
                                      int16_t* decoded,
                                      SpeechType* speech_type) {
-  DCHECK_EQ(sample_rate_hz, 8000);
+  RTC_DCHECK_EQ(sample_rate_hz, 8000);
   int16_t temp_type = 1;  // Default is speech.
   int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded,
                                  &temp_type);
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 8f16d66..e3d729f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -53,7 +53,7 @@
     : AudioEncoderIlbc(CreateConfig(codec_inst)) {}
 
 AudioEncoderIlbc::~AudioEncoderIlbc() {
-  CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+  RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
 }
 
 size_t AudioEncoderIlbc::MaxEncodedBytes() const {
@@ -94,7 +94,7 @@
     const int16_t* audio,
     size_t max_encoded_bytes,
     uint8_t* encoded) {
-  DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
+  RTC_DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
 
   // Save timestamp if starting a new packet.
   if (num_10ms_frames_buffered_ == 0)
@@ -112,17 +112,17 @@
   }
 
   // Encode buffered input.
-  DCHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+  RTC_DCHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
   num_10ms_frames_buffered_ = 0;
   const int output_len = WebRtcIlbcfix_Encode(
       encoder_,
       input_buffer_,
       kSampleRateHz / 100 * num_10ms_frames_per_packet_,
       encoded);
-  CHECK_GE(output_len, 0);
+  RTC_CHECK_GE(output_len, 0);
   EncodedInfo info;
   info.encoded_bytes = static_cast<size_t>(output_len);
-  DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
+  RTC_DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
   info.encoded_timestamp = first_timestamp_in_buffer_;
   info.payload_type = config_.payload_type;
   return info;
@@ -130,13 +130,13 @@
 
 void AudioEncoderIlbc::Reset() {
   if (encoder_)
-    CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
-  CHECK(config_.IsOk());
-  CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
+    RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+  RTC_CHECK(config_.IsOk());
+  RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
   const int encoder_frame_size_ms = config_.frame_size_ms > 30
                                         ? config_.frame_size_ms / 2
                                         : config_.frame_size_ms;
-  CHECK_EQ(0, WebRtcIlbcfix_EncoderInit(encoder_, encoder_frame_size_ms));
+  RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderInit(encoder_, encoder_frame_size_ms));
   num_10ms_frames_buffered_ = 0;
 }
 
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index 3cc635c..4122ee0 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -78,7 +78,7 @@
 
 template <typename T>
 AudioEncoderIsacT<T>::~AudioEncoderIsacT() {
-  CHECK_EQ(0, T::Free(isac_state_));
+  RTC_CHECK_EQ(0, T::Free(isac_state_));
 }
 
 template <typename T>
@@ -132,12 +132,12 @@
     T::SetBandwidthInfo(isac_state_, &bwinfo);
   }
   int r = T::Encode(isac_state_, audio, encoded);
-  CHECK_GE(r, 0) << "Encode failed (error code " << T::GetErrorCode(isac_state_)
-                 << ")";
+  RTC_CHECK_GE(r, 0) << "Encode failed (error code "
+                     << T::GetErrorCode(isac_state_) << ")";
 
   // T::Encode doesn't allow us to tell it the size of the output
   // buffer. All we can do is check for an overrun after the fact.
-  CHECK_LE(static_cast<size_t>(r), max_encoded_bytes);
+  RTC_CHECK_LE(static_cast<size_t>(r), max_encoded_bytes);
 
   if (r == 0)
     return EncodedInfo();
@@ -159,26 +159,26 @@
 
 template <typename T>
 void AudioEncoderIsacT<T>::RecreateEncoderInstance(const Config& config) {
-  CHECK(config.IsOk());
+  RTC_CHECK(config.IsOk());
   packet_in_progress_ = false;
   bwinfo_ = config.bwinfo;
   if (isac_state_)
-    CHECK_EQ(0, T::Free(isac_state_));
-  CHECK_EQ(0, T::Create(&isac_state_));
-  CHECK_EQ(0, T::EncoderInit(isac_state_, config.adaptive_mode ? 0 : 1));
-  CHECK_EQ(0, T::SetEncSampRate(isac_state_, config.sample_rate_hz));
+    RTC_CHECK_EQ(0, T::Free(isac_state_));
+  RTC_CHECK_EQ(0, T::Create(&isac_state_));
+  RTC_CHECK_EQ(0, T::EncoderInit(isac_state_, config.adaptive_mode ? 0 : 1));
+  RTC_CHECK_EQ(0, T::SetEncSampRate(isac_state_, config.sample_rate_hz));
   const int bit_rate = config.bit_rate == 0 ? kDefaultBitRate : config.bit_rate;
   if (config.adaptive_mode) {
-    CHECK_EQ(0, T::ControlBwe(isac_state_, bit_rate, config.frame_size_ms,
-                              config.enforce_frame_size));
+    RTC_CHECK_EQ(0, T::ControlBwe(isac_state_, bit_rate, config.frame_size_ms,
+                                  config.enforce_frame_size));
   } else {
-    CHECK_EQ(0, T::Control(isac_state_, bit_rate, config.frame_size_ms));
+    RTC_CHECK_EQ(0, T::Control(isac_state_, bit_rate, config.frame_size_ms));
   }
   if (config.max_payload_size_bytes != -1)
-    CHECK_EQ(0,
-             T::SetMaxPayloadSize(isac_state_, config.max_payload_size_bytes));
+    RTC_CHECK_EQ(
+        0, T::SetMaxPayloadSize(isac_state_, config.max_payload_size_bytes));
   if (config.max_bit_rate != -1)
-    CHECK_EQ(0, T::SetMaxRate(isac_state_, config.max_bit_rate));
+    RTC_CHECK_EQ(0, T::SetMaxRate(isac_state_, config.max_bit_rate));
 
   // When config.sample_rate_hz is set to 48000 Hz (iSAC-fb), the decoder is
   // still set to 32000 Hz, since there is no full-band mode in the decoder.
@@ -188,7 +188,7 @@
   // doesn't appear to be necessary to produce a valid encoding, but without it
   // we get an encoding that isn't bit-for-bit identical with what a combined
   // encoder+decoder object produces.
-  CHECK_EQ(0, T::SetDecSampRate(isac_state_, decoder_sample_rate_hz));
+  RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, decoder_sample_rate_hz));
 
   config_ = config;
 }
@@ -200,7 +200,7 @@
 template <typename T>
 AudioDecoderIsacT<T>::AudioDecoderIsacT(LockedIsacBandwidthInfo* bwinfo)
     : bwinfo_(bwinfo), decoder_sample_rate_hz_(-1) {
-  CHECK_EQ(0, T::Create(&isac_state_));
+  RTC_CHECK_EQ(0, T::Create(&isac_state_));
   T::DecoderInit(isac_state_);
   if (bwinfo_) {
     IsacBandwidthInfo bi;
@@ -211,7 +211,7 @@
 
 template <typename T>
 AudioDecoderIsacT<T>::~AudioDecoderIsacT() {
-  CHECK_EQ(0, T::Free(isac_state_));
+  RTC_CHECK_EQ(0, T::Free(isac_state_));
 }
 
 template <typename T>
@@ -224,10 +224,10 @@
   // in fact it outputs 32000 Hz. This is the iSAC fullband mode.
   if (sample_rate_hz == 48000)
     sample_rate_hz = 32000;
-  CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000)
+  RTC_CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000)
       << "Unsupported sample rate " << sample_rate_hz;
   if (sample_rate_hz != decoder_sample_rate_hz_) {
-    CHECK_EQ(0, T::SetDecSampRate(isac_state_, sample_rate_hz));
+    RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, sample_rate_hz));
     decoder_sample_rate_hz_ = sample_rate_hz;
   }
   int16_t temp_type = 1;  // Default is speech.
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h b/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
index e710f24..5bca23e 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
@@ -84,17 +84,17 @@
   }
   static inline int16_t SetDecSampRate(instance_type* inst,
                                        uint16_t sample_rate_hz) {
-    DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+    RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
     return 0;
   }
   static inline int16_t SetEncSampRate(instance_type* inst,
                                        uint16_t sample_rate_hz) {
-    DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+    RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
     return 0;
   }
   static inline void SetEncSampRateInDecoder(instance_type* inst,
                                              uint16_t sample_rate_hz) {
-    DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+    RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
   }
   static inline void SetInitialBweBottleneck(
       instance_type* inst,
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
index e78fc04..7151ab0 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
@@ -16,7 +16,7 @@
 
 AudioDecoderOpus::AudioDecoderOpus(size_t num_channels)
     : channels_(num_channels) {
-  DCHECK(num_channels == 1 || num_channels == 2);
+  RTC_DCHECK(num_channels == 1 || num_channels == 2);
   WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
   WebRtcOpus_DecoderInit(dec_state_);
 }
@@ -30,7 +30,7 @@
                                      int sample_rate_hz,
                                      int16_t* decoded,
                                      SpeechType* speech_type) {
-  DCHECK_EQ(sample_rate_hz, 48000);
+  RTC_DCHECK_EQ(sample_rate_hz, 48000);
   int16_t temp_type = 1;  // Default is speech.
   int ret =
       WebRtcOpus_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
@@ -51,7 +51,7 @@
                           speech_type);
   }
 
-  DCHECK_EQ(sample_rate_hz, 48000);
+  RTC_DCHECK_EQ(sample_rate_hz, 48000);
   int16_t temp_type = 1;  // Default is speech.
   int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded,
                                  &temp_type);
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index a68530e..d47236c 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -41,10 +41,10 @@
 // a loss rate from below, a higher threshold is used than jumping to the same
 // level from above.
 double OptimizePacketLossRate(double new_loss_rate, double old_loss_rate) {
-  DCHECK_GE(new_loss_rate, 0.0);
-  DCHECK_LE(new_loss_rate, 1.0);
-  DCHECK_GE(old_loss_rate, 0.0);
-  DCHECK_LE(old_loss_rate, 1.0);
+  RTC_DCHECK_GE(new_loss_rate, 0.0);
+  RTC_DCHECK_LE(new_loss_rate, 1.0);
+  RTC_DCHECK_GE(old_loss_rate, 0.0);
+  RTC_DCHECK_LE(old_loss_rate, 1.0);
   const double kPacketLossRate20 = 0.20;
   const double kPacketLossRate10 = 0.10;
   const double kPacketLossRate5 = 0.05;
@@ -90,14 +90,14 @@
 
 AudioEncoderOpus::AudioEncoderOpus(const Config& config)
     : packet_loss_rate_(0.0), inst_(nullptr) {
-  CHECK(RecreateEncoderInstance(config));
+  RTC_CHECK(RecreateEncoderInstance(config));
 }
 
 AudioEncoderOpus::AudioEncoderOpus(const CodecInst& codec_inst)
     : AudioEncoderOpus(CreateConfig(codec_inst)) {}
 
 AudioEncoderOpus::~AudioEncoderOpus() {
-  CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+  RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
 }
 
 size_t AudioEncoderOpus::MaxEncodedBytes() const {
@@ -143,14 +143,15 @@
       (static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame())) {
     return EncodedInfo();
   }
-  CHECK_EQ(input_buffer_.size(), static_cast<size_t>(Num10msFramesPerPacket()) *
-                                     SamplesPer10msFrame());
+  RTC_CHECK_EQ(
+      input_buffer_.size(),
+      static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame());
   int status = WebRtcOpus_Encode(
       inst_, &input_buffer_[0],
       rtc::CheckedDivExact(input_buffer_.size(),
                            static_cast<size_t>(config_.num_channels)),
       rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded);
-  CHECK_GE(status, 0);  // Fails only if fed invalid data.
+  RTC_CHECK_GE(status, 0);  // Fails only if fed invalid data.
   input_buffer_.clear();
   EncodedInfo info;
   info.encoded_bytes = static_cast<size_t>(status);
@@ -162,7 +163,7 @@
 }
 
 void AudioEncoderOpus::Reset() {
-  CHECK(RecreateEncoderInstance(config_));
+  RTC_CHECK(RecreateEncoderInstance(config_));
 }
 
 bool AudioEncoderOpus::SetFec(bool enable) {
@@ -193,23 +194,24 @@
 void AudioEncoderOpus::SetMaxPlaybackRate(int frequency_hz) {
   auto conf = config_;
   conf.max_playback_rate_hz = frequency_hz;
-  CHECK(RecreateEncoderInstance(conf));
+  RTC_CHECK(RecreateEncoderInstance(conf));
 }
 
 void AudioEncoderOpus::SetProjectedPacketLossRate(double fraction) {
   double opt_loss_rate = OptimizePacketLossRate(fraction, packet_loss_rate_);
   if (packet_loss_rate_ != opt_loss_rate) {
     packet_loss_rate_ = opt_loss_rate;
-    CHECK_EQ(0, WebRtcOpus_SetPacketLossRate(
-                    inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+    RTC_CHECK_EQ(
+        0, WebRtcOpus_SetPacketLossRate(
+               inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
   }
 }
 
 void AudioEncoderOpus::SetTargetBitrate(int bits_per_second) {
   config_.bitrate_bps =
       std::max(std::min(bits_per_second, kMaxBitrateBps), kMinBitrateBps);
-  DCHECK(config_.IsOk());
-  CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config_.bitrate_bps));
+  RTC_DCHECK(config_.IsOk());
+  RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config_.bitrate_bps));
 }
 
 int AudioEncoderOpus::Num10msFramesPerPacket() const {
@@ -227,27 +229,28 @@
   if (!config.IsOk())
     return false;
   if (inst_)
-    CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+    RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
   input_buffer_.clear();
   input_buffer_.reserve(Num10msFramesPerPacket() * SamplesPer10msFrame());
-  CHECK_EQ(0, WebRtcOpus_EncoderCreate(&inst_, config.num_channels,
-                                       config.application));
-  CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config.bitrate_bps));
+  RTC_CHECK_EQ(0, WebRtcOpus_EncoderCreate(&inst_, config.num_channels,
+                                           config.application));
+  RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config.bitrate_bps));
   if (config.fec_enabled) {
-    CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
+    RTC_CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
   } else {
-    CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
+    RTC_CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
   }
-  CHECK_EQ(0,
-           WebRtcOpus_SetMaxPlaybackRate(inst_, config.max_playback_rate_hz));
-  CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, config.complexity));
+  RTC_CHECK_EQ(
+      0, WebRtcOpus_SetMaxPlaybackRate(inst_, config.max_playback_rate_hz));
+  RTC_CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, config.complexity));
   if (config.dtx_enabled) {
-    CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
+    RTC_CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
   } else {
-    CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
+    RTC_CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
   }
-  CHECK_EQ(0, WebRtcOpus_SetPacketLossRate(
-                  inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+  RTC_CHECK_EQ(0,
+               WebRtcOpus_SetPacketLossRate(
+                   inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
   config_ = config;
   return true;
 }
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
index 5648c18..4e44b9a 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
@@ -104,7 +104,7 @@
 // Returns a vector with the n evenly-spaced numbers a, a + (b - a)/(n - 1),
 // ..., b.
 std::vector<double> IntervalSteps(double a, double b, size_t n) {
-  DCHECK_GT(n, 1u);
+  RTC_DCHECK_GT(n, 1u);
   const double step = (b - a) / (n - 1);
   std::vector<double> points;
   for (size_t i = 0; i < n; ++i)
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc b/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
index e3074df..90359a8 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
@@ -28,8 +28,8 @@
                                        int sample_rate_hz,
                                        int16_t* decoded,
                                        SpeechType* speech_type) {
-  DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
-         sample_rate_hz == 32000 || sample_rate_hz == 48000)
+  RTC_DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
+             sample_rate_hz == 32000 || sample_rate_hz == 48000)
       << "Unsupported sample rate " << sample_rate_hz;
   size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
   *speech_type = ConvertSpeechType(1);
@@ -44,7 +44,7 @@
 
 AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(size_t num_channels)
     : channels_(num_channels) {
-  DCHECK(num_channels > 0);
+  RTC_DCHECK(num_channels > 0);
 }
 
 size_t AudioDecoderPcm16BMultiCh::Channels() const {
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index c8ae53f..a19d194 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -19,7 +19,7 @@
 AudioEncoderCopyRed::AudioEncoderCopyRed(const Config& config)
     : speech_encoder_(config.speech_encoder),
       red_payload_type_(config.payload_type) {
-  CHECK(speech_encoder_) << "Speech encoder not provided.";
+  RTC_CHECK(speech_encoder_) << "Speech encoder not provided.";
 }
 
 AudioEncoderCopyRed::~AudioEncoderCopyRed() = default;
@@ -60,26 +60,26 @@
   EncodedInfo info = speech_encoder_->Encode(
       rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
       max_encoded_bytes, encoded);
-  CHECK_GE(max_encoded_bytes,
-           info.encoded_bytes + secondary_info_.encoded_bytes);
-  CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
+  RTC_CHECK_GE(max_encoded_bytes,
+               info.encoded_bytes + secondary_info_.encoded_bytes);
+  RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
 
   if (info.encoded_bytes > 0) {
     // |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
     // discarding the (empty) vector of redundant information. This is
     // intentional.
     info.redundant.push_back(info);
-    DCHECK_EQ(info.redundant.size(), 1u);
+    RTC_DCHECK_EQ(info.redundant.size(), 1u);
     if (secondary_info_.encoded_bytes > 0) {
       memcpy(&encoded[info.encoded_bytes], secondary_encoded_.data(),
              secondary_info_.encoded_bytes);
       info.redundant.push_back(secondary_info_);
-      DCHECK_EQ(info.redundant.size(), 2u);
+      RTC_DCHECK_EQ(info.redundant.size(), 2u);
     }
     // Save primary to secondary.
     secondary_encoded_.SetData(encoded, info.encoded_bytes);
     secondary_info_ = info;
-    DCHECK_EQ(info.speech, info.redundant[0].speech);
+    RTC_DCHECK_EQ(info.speech, info.redundant[0].speech);
   }
   // Update main EncodedInfo.
   info.payload_type = red_payload_type_;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index a1ddf4b..cb50652 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -87,8 +87,8 @@
                                    size_t max_encoded_bytes,
                                    uint8_t* encoded) {
     if (write_payload_) {
-      CHECK(encoded);
-      CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
+      RTC_CHECK(encoded);
+      RTC_CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
       memcpy(encoded, payload_, info_.encoded_bytes);
     }
     return info_;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
index 91df16f..b059686 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
@@ -71,7 +71,8 @@
   // Insert audio and process until one packet is produced.
   while (clock_.TimeInMilliseconds() < test_duration_ms_) {
     clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
-    CHECK(audio_source_->Read(input_block_size_samples_, input_frame_.data_));
+    RTC_CHECK(
+        audio_source_->Read(input_block_size_samples_, input_frame_.data_));
     if (input_frame_.num_channels_ > 1) {
       InputAudioFile::DuplicateInterleaved(input_frame_.data_,
                                            input_block_size_samples_,
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
index b84be29..7e2a3c6 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
@@ -53,8 +53,8 @@
                                       int payload_type,
                                       int frame_size_samples) {
   CodecInst codec;
-  CHECK_EQ(0, AudioCodingModule::Codec(payload_name, &codec, sampling_freq_hz,
-                                       channels));
+  RTC_CHECK_EQ(0, AudioCodingModule::Codec(payload_name, &codec,
+                                           sampling_freq_hz, channels));
   codec.pltype = payload_type;
   codec.pacsize = frame_size_samples;
   codec_registered_ = (acm_->RegisterSendCodec(codec) == 0);
@@ -84,7 +84,8 @@
   // Insert audio and process until one packet is produced.
   while (clock_.TimeInMilliseconds() < test_duration_ms_) {
     clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
-    CHECK(audio_source_->Read(input_block_size_samples_, input_frame_.data_));
+    RTC_CHECK(
+        audio_source_->Read(input_block_size_samples_, input_frame_.data_));
     if (input_frame_.num_channels_ > 1) {
       InputAudioFile::DuplicateInterleaved(input_frame_.data_,
                                            input_block_size_samples_,
@@ -92,7 +93,7 @@
                                            input_frame_.data_);
     }
     data_to_send_ = false;
-    CHECK_GE(acm_->Add10MsData(input_frame_), 0);
+    RTC_CHECK_GE(acm_->Add10MsData(input_frame_), 0);
     input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
     if (data_to_send_) {
       // Encoded packet received.
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 5aa320b..3013925 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -199,7 +199,7 @@
     frame_type = kFrameEmpty;
     encoded_info.payload_type = previous_pltype;
   } else {
-    DCHECK_GT(encode_buffer_.size(), 0u);
+    RTC_DCHECK_GT(encode_buffer_.size(), 0u);
     frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
   }
 
@@ -500,7 +500,7 @@
                                   bool enable_vad,
                                   ACMVADMode mode) {
   // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
-  DCHECK_EQ(enable_dtx, enable_vad);
+  RTC_DCHECK_EQ(enable_dtx, enable_vad);
   CriticalSectionScoped lock(acm_crit_sect_.get());
   return codec_manager_.SetVAD(enable_dtx, mode);
 }
@@ -580,7 +580,7 @@
 // for codecs, CNG (NB, WB and SWB), DTMF, RED.
 int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
   CriticalSectionScoped lock(acm_crit_sect_.get());
-  DCHECK(receiver_initialized_);
+  RTC_DCHECK(receiver_initialized_);
   if (codec.channels > 2 || codec.channels < 0) {
     LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
     return -1;
@@ -612,7 +612,7 @@
     int sample_rate_hz,
     int num_channels) {
   CriticalSectionScoped lock(acm_crit_sect_.get());
-  DCHECK(receiver_initialized_);
+  RTC_DCHECK(receiver_initialized_);
   if (num_channels > 2 || num_channels < 0) {
     LOG_F(LS_ERROR) << "Unsupported number of channels: " << num_channels;
     return -1;
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
index c2e07eb..39905ad 100644
--- a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
+++ b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
@@ -185,7 +185,7 @@
 CodecManager::~CodecManager() = default;
 
 int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   int codec_id = IsValidSendCodec(send_codec, true);
 
   // Check for reported errors from function IsValidSendCodec().
@@ -264,7 +264,7 @@
   bool new_codec = true;
   if (codec_owner_.Encoder()) {
     int new_codec_id = ACMCodecDB::CodecNumber(send_codec_inst_);
-    DCHECK_GE(new_codec_id, 0);
+    RTC_DCHECK_GE(new_codec_id, 0);
     new_codec = new_codec_id != codec_id;
   }
 
@@ -276,7 +276,7 @@
 
   if (new_codec) {
     // This is a new codec. Register it and return.
-    DCHECK(CodecSupported(send_codec));
+    RTC_DCHECK(CodecSupported(send_codec));
     if (IsOpus(send_codec)) {
       // VAD/DTX not supported.
       dtx_enabled_ = false;
@@ -284,7 +284,7 @@
     codec_owner_.SetEncoders(
         send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
         vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1);
-    DCHECK(codec_owner_.Encoder());
+    RTC_DCHECK(codec_owner_.Encoder());
 
     codec_fec_enabled_ = codec_fec_enabled_ &&
                          codec_owner_.Encoder()->SetFec(codec_fec_enabled_);
@@ -300,7 +300,7 @@
     codec_owner_.SetEncoders(
         send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
         vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1);
-    DCHECK(codec_owner_.Encoder());
+    RTC_DCHECK(codec_owner_.Encoder());
   }
   send_codec_inst_.plfreq = send_codec.plfreq;
   send_codec_inst_.pacsize = send_codec.pacsize;
@@ -381,8 +381,8 @@
 
 int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
   // Sanity check of the mode.
-  DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
-         mode == VADVeryAggr);
+  RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
+             mode == VADVeryAggr);
 
   // Check that the send codec is mono. We don't support VAD/DTX for stereo
   // sending.
@@ -427,7 +427,7 @@
     return -1;
   }
 
-  CHECK(codec_owner_.Encoder());
+  RTC_CHECK(codec_owner_.Encoder());
   codec_fec_enabled_ =
       codec_owner_.Encoder()->SetFec(enable_codec_fec) && enable_codec_fec;
   return codec_fec_enabled_ == enable_codec_fec ? 0 : -1;
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_owner.cc b/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
index e2c4548..c07ecec 100644
--- a/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
+++ b/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
@@ -202,7 +202,7 @@
   AudioEncoder* encoder =
       CreateRedEncoder(red_payload_type, speech_encoder, &red_encoder_);
   CreateCngEncoder(cng_payload_type, vad_mode, encoder, &cng_encoder_);
-  DCHECK_EQ(!!speech_encoder_ + !!external_speech_encoder_, 1);
+  RTC_DCHECK_EQ(!!speech_encoder_ + !!external_speech_encoder_, 1);
 }
 
 AudioDecoder* CodecOwner::GetIsacDecoder() {
@@ -230,7 +230,7 @@
 }
 
 const AudioEncoder* CodecOwner::SpeechEncoder() const {
-  DCHECK(!speech_encoder_ || !external_speech_encoder_);
+  RTC_DCHECK(!speech_encoder_ || !external_speech_encoder_);
   return external_speech_encoder_ ? external_speech_encoder_
                                   : speech_encoder_.get();
 }
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
index 8925550..274eec0 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
@@ -48,7 +48,7 @@
                                      int sample_rate_hz,
                                      int16_t* decoded,
                                      SpeechType* speech_type) {
-  DCHECK_EQ(sample_rate_hz, 8000);
+  RTC_DCHECK_EQ(sample_rate_hz, 8000);
   int16_t temp_type = 1;  // Default is speech.
   size_t ret = WebRtcG711_DecodeU(encoded, encoded_len, decoded, &temp_type);
   *speech_type = ConvertSpeechType(temp_type);
@@ -78,7 +78,7 @@
                                      int sample_rate_hz,
                                      int16_t* decoded,
                                      SpeechType* speech_type) {
-  DCHECK_EQ(sample_rate_hz, 8000);
+  RTC_DCHECK_EQ(sample_rate_hz, 8000);
   int16_t temp_type = 1;  // Default is speech.
   size_t ret = WebRtcG711_DecodeA(encoded, encoded_len, decoded, &temp_type);
   *speech_type = ConvertSpeechType(temp_type);
@@ -115,7 +115,7 @@
                                      int sample_rate_hz,
                                      int16_t* decoded,
                                      SpeechType* speech_type) {
-  DCHECK_EQ(sample_rate_hz, 16000);
+  RTC_DCHECK_EQ(sample_rate_hz, 16000);
   int16_t temp_type = 1;  // Default is speech.
   size_t ret =
       WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
@@ -154,7 +154,7 @@
                                            int sample_rate_hz,
                                            int16_t* decoded,
                                            SpeechType* speech_type) {
-  DCHECK_EQ(sample_rate_hz, 16000);
+  RTC_DCHECK_EQ(sample_rate_hz, 16000);
   int16_t temp_type = 1;  // Default is speech.
   // De-interleave the bit-stream into two separate payloads.
   uint8_t* encoded_deinterleaved = new uint8_t[encoded_len];
@@ -218,7 +218,7 @@
 #endif
 
 AudioDecoderCng::AudioDecoderCng() {
-  CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
+  RTC_CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
   WebRtcCng_InitDec(dec_state_);
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
index b476d7e..4b40dfd 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -140,8 +140,8 @@
                           uint8_t* output) {
     encoded_info_.encoded_bytes = 0;
     const size_t samples_per_10ms = audio_encoder_->SampleRateHz() / 100;
-    CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
-             input_len_samples);
+    RTC_CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
+                 input_len_samples);
     rtc::scoped_ptr<int16_t[]> interleaved_input(
         new int16_t[channels_ * samples_per_10ms]);
     for (size_t i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
diff --git a/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc b/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc
index b3c02e0..779d1d3 100644
--- a/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc
+++ b/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc
@@ -70,8 +70,8 @@
                            const uint8_t* payload,
                            size_t payload_length_bytes,
                            DtmfEvent* event) {
-  CHECK(payload);
-  CHECK(event);
+  RTC_CHECK(payload);
+  RTC_CHECK(event);
   if (payload_length_bytes < 4) {
     LOG(LS_WARNING) << "ParseEvent payload too short";
     return kPayloadTooShort;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index e6f7e60..02e9324 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -840,7 +840,7 @@
     // lookahead by moving the index.
     const size_t missing_lookahead_samples =
         expand_->overlap_length() - sync_buffer_->FutureLength();
-    DCHECK_GE(sync_buffer_->next_index(), missing_lookahead_samples);
+    RTC_DCHECK_GE(sync_buffer_->next_index(), missing_lookahead_samples);
     sync_buffer_->set_next_index(sync_buffer_->next_index() -
                                  missing_lookahead_samples);
   }
@@ -856,7 +856,7 @@
   *samples_per_channel = output_size_samples_;
 
   // Should always have overlap samples left in the |sync_buffer_|.
-  DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length());
+  RTC_DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length());
 
   if (play_dtmf) {
     return_value = DtmfOverdub(dtmf_event, sync_buffer_->Channels(), output);
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
index 773d691..78c5e25 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -22,7 +22,8 @@
 
 namespace webrtc {
 
-// Allocating the static const so that it can be passed by reference to DCHECK.
+// Allocating the static const so that it can be passed by reference to
+// RTC_DCHECK.
 const size_t StatisticsCalculator::kLenWaitingTimes;
 
 StatisticsCalculator::PeriodicUmaLogger::PeriodicUmaLogger(
@@ -45,7 +46,7 @@
   LogToUma(Metric());
   Reset();
   timer_ -= report_interval_ms_;
-  DCHECK_GE(timer_, 0);
+  RTC_DCHECK_GE(timer_, 0);
 }
 
 void StatisticsCalculator::PeriodicUmaLogger::LogToUma(int value) const {
@@ -194,7 +195,7 @@
 
 void StatisticsCalculator::StoreWaitingTime(int waiting_time_ms) {
   excess_buffer_delay_.RegisterSample(waiting_time_ms);
-  DCHECK_LE(waiting_times_.size(), kLenWaitingTimes);
+  RTC_DCHECK_LE(waiting_times_.size(), kLenWaitingTimes);
   if (waiting_times_.size() == kLenWaitingTimes) {
     // Erase first value.
     waiting_times_.pop_front();
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc b/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
index cbe4b04..0769fd3 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
@@ -69,7 +69,7 @@
   }
 
   const int16_t* Next30Ms() {
-    CHECK(input_file_->Read(block_size_, audio_.get()));
+    RTC_CHECK(input_file_->Read(block_size_, audio_.get()));
     return audio_.get();
   }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
index 016acde..dc07030 100644
--- a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -32,11 +32,11 @@
       timestamp_(0),
       payload_ssrc_(0xABCD1234) {
   size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
-  CHECK_EQ(2U, encoded_len);
+  RTC_CHECK_EQ(2U, encoded_len);
 }
 
 Packet* ConstantPcmPacketSource::NextPacket() {
-  CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
+  RTC_CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
   uint8_t* packet_memory = new uint8_t[packet_len_bytes_];
   // Fill the payload part of the packet memory with the pre-encoded value.
   for (unsigned i = 0; i < 2 * payload_len_samples_; ++i)
diff --git a/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc b/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
index e2ec419..76f3109 100644
--- a/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
@@ -45,16 +45,18 @@
   }
   // Find file boundaries.
   const long current_pos = ftell(fp_);
-  CHECK_NE(EOF, current_pos) << "Error returned when getting file position.";
-  CHECK_EQ(0, fseek(fp_, 0, SEEK_END));  // Move to end of file.
+  RTC_CHECK_NE(EOF, current_pos)
+      << "Error returned when getting file position.";
+  RTC_CHECK_EQ(0, fseek(fp_, 0, SEEK_END));  // Move to end of file.
   const long file_size = ftell(fp_);
-  CHECK_NE(EOF, file_size) << "Error returned when getting file position.";
+  RTC_CHECK_NE(EOF, file_size) << "Error returned when getting file position.";
   // Find new position.
   long new_pos = current_pos + sizeof(int16_t) * samples;  // Samples to bytes.
-  CHECK_GE(new_pos, 0) << "Trying to move to before the beginning of the file";
+  RTC_CHECK_GE(new_pos, 0)
+      << "Trying to move to before the beginning of the file";
   new_pos = new_pos % file_size;  // Wrap around the end of the file.
   // Move to new position relative to the beginning of the file.
-  CHECK_EQ(0, fseek(fp_, new_pos, SEEK_SET));
+  RTC_CHECK_EQ(0, fseek(fp_, new_pos, SEEK_SET));
   return true;
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index 1c028c9..0d3fb24 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -232,7 +232,7 @@
   const std::string out_filename = FLAGS_out_filename;
   const std::string log_filename = out_filename + ".log";
   log_file_.open(log_filename.c_str(), std::ofstream::out);
-  CHECK(log_file_.is_open());
+  RTC_CHECK(log_file_.is_open());
 
   if (out_filename.size() >= 4 &&
       out_filename.substr(out_filename.size() - 4) == ".wav") {
@@ -402,7 +402,7 @@
   } else {
     assert(channels == channels_);
     assert(samples == static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
-    CHECK(output_->WriteArray(out_data_.get(), samples * channels));
+    RTC_CHECK(output_->WriteArray(out_data_.get(), samples * channels));
     return static_cast<int>(samples);
   }
 }
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index d421976..300537b 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -417,7 +417,7 @@
   // Check if an SSRC value was provided.
   if (!FLAGS_ssrc.empty()) {
     uint32_t ssrc;
-    CHECK(ParseSsrc(FLAGS_ssrc, &ssrc)) << "Flag verification has failed.";
+    RTC_CHECK(ParseSsrc(FLAGS_ssrc, &ssrc)) << "Flag verification has failed.";
     file_source->SelectSsrc(ssrc);
   }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc b/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
index d69918b..7a0bb1a 100644
--- a/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
@@ -20,22 +20,22 @@
                                   int output_rate_hz,
                                   int16_t* destination) {
   const size_t samples_to_read = samples * file_rate_hz_ / output_rate_hz;
-  CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
+  RTC_CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
       << "Frame size and sample rates don't add up to an integer.";
   rtc::scoped_ptr<int16_t[]> temp_destination(new int16_t[samples_to_read]);
   if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
     return false;
   resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
   size_t output_length = 0;
-  CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read, destination,
-                           samples, output_length),
-           0);
-  CHECK_EQ(samples, output_length);
+  RTC_CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read,
+                               destination, samples, output_length),
+               0);
+  RTC_CHECK_EQ(samples, output_length);
   return true;
 }
 
 bool ResampleInputAudioFile::Read(size_t samples, int16_t* destination) {
-  CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
+  RTC_CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
   return Read(samples, output_rate_hz_, destination);
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc b/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
index c2bccca..14e1051 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
@@ -63,7 +63,7 @@
 
 RtcEventLogSource* RtcEventLogSource::Create(const std::string& file_name) {
   RtcEventLogSource* source = new RtcEventLogSource();
-  CHECK(source->OpenFile(file_name));
+  RTC_CHECK(source->OpenFile(file_name));
   return source;
 }
 
@@ -71,7 +71,7 @@
 
 bool RtcEventLogSource::RegisterRtpHeaderExtension(RTPExtensionType type,
                                                    uint8_t id) {
-  CHECK(parser_.get());
+  RTC_CHECK(parser_.get());
   return parser_->RegisterRtpHeaderExtension(type, id);
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc b/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
index f5d323e..be3a62b 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
@@ -28,7 +28,7 @@
 
 RtpFileSource* RtpFileSource::Create(const std::string& file_name) {
   RtpFileSource* source = new RtpFileSource();
-  CHECK(source->OpenFile(file_name));
+  RTC_CHECK(source->OpenFile(file_name));
   return source;
 }
 
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc b/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc
index f7490de..f2b87a5 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc
@@ -28,18 +28,18 @@
 
   scoped_ptr<RtpFileWriter> output(
       RtpFileWriter::Create(RtpFileWriter::kRtpDump, argv[argc - 1]));
-  CHECK(output.get() != NULL) << "Cannot open output file.";
+  RTC_CHECK(output.get() != NULL) << "Cannot open output file.";
   printf("Output RTP file: %s\n", argv[argc - 1]);
 
   for (int i = 1; i < argc - 1; i++) {
     scoped_ptr<RtpFileReader> input(
         RtpFileReader::Create(RtpFileReader::kRtpDump, argv[i]));
-    CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
+    RTC_CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
     printf("Input RTP file: %s\n", argv[i]);
 
     webrtc::test::RtpPacket packet;
     while (input->NextPacket(&packet))
-      CHECK(output->WritePacket(&packet));
+      RTC_CHECK(output->WritePacket(&packet));
   }
   return 0;
 }
diff --git a/webrtc/modules/audio_device/android/audio_device_template.h b/webrtc/modules/audio_device/android/audio_device_template.h
index 653ff11..3935a63 100644
--- a/webrtc/modules/audio_device/android/audio_device_template.h
+++ b/webrtc/modules/audio_device/android/audio_device_template.h
@@ -27,12 +27,12 @@
 // InputType/OutputType can be any class that implements the capturing/rendering
 // part of the AudioDeviceGeneric API.
 // Construction and destruction must be done on one and the same thread. Each
-// internal implementation of InputType and OutputType will DCHECK if that is
-// not the case. All implemented methods must also be called on the same thread.
-// See comments in each InputType/OutputType class for more
+// internal implementation of InputType and OutputType will RTC_DCHECK if that
+// is not the case. All implemented methods must also be called on the same
+// thread. See comments in each InputType/OutputType class for more info.
 // It is possible to call the two static methods (SetAndroidAudioDeviceObjects
 // and ClearAndroidAudioDeviceObjects) from a different thread but both will
-// CHECK that the calling thread is attached to a Java VM.
+// RTC_CHECK that the calling thread is attached to a Java VM.
 
 template <class InputType, class OutputType>
 class AudioDeviceTemplate : public AudioDeviceGeneric {
@@ -44,7 +44,7 @@
         output_(audio_manager_),
         input_(audio_manager_),
         initialized_(false) {
-    CHECK(audio_manager);
+    RTC_CHECK(audio_manager);
     audio_manager_->SetActiveAudioLayer(audio_layer);
   }
 
@@ -58,8 +58,8 @@
   }
 
   int32_t Init() override {
-    DCHECK(thread_checker_.CalledOnValidThread());
-    DCHECK(!initialized_);
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(!initialized_);
     if (!audio_manager_->Init())
       return -1;
     if (output_.Init() != 0) {
@@ -76,17 +76,17 @@
   }
 
   int32_t Terminate() override {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     int32_t err = input_.Terminate();
     err |= output_.Terminate();
     err |= !audio_manager_->Close();
     initialized_ = false;
-    DCHECK_EQ(err, 0);
+    RTC_DCHECK_EQ(err, 0);
     return err;
   }
 
   bool Initialized() const override {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return initialized_;
   }
 
@@ -388,14 +388,14 @@
   int32_t PlayoutDelay(uint16_t& delay_ms) const override {
     // Best guess we can do is to use half of the estimated total delay.
     delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
-    DCHECK_GT(delay_ms, 0);
+    RTC_DCHECK_GT(delay_ms, 0);
     return 0;
   }
 
   int32_t RecordingDelay(uint16_t& delay_ms) const override {
     // Best guess we can do is to use half of the estimated total delay.
     delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
-    DCHECK_GT(delay_ms, 0);
+    RTC_DCHECK_GT(delay_ms, 0);
     return 0;
   }
 
@@ -456,7 +456,7 @@
   }
 
   int32_t EnableBuiltInAEC(bool enable) override {
-    CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
+    RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
     return input_.EnableBuiltInAEC(enable);
   }
 
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc
index 9440d50..087bb2d 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -833,7 +833,8 @@
 
 // Verify that calling StopPlayout() will leave us in an uninitialized state
 // which will require a new call to InitPlayout(). This test does not call
-// StartPlayout() while being uninitialized since doing so will hit a DCHECK.
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK.
 TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
   EXPECT_EQ(0, audio_device()->InitPlayout());
   EXPECT_EQ(0, audio_device()->StartPlayout());
diff --git a/webrtc/modules/audio_device/android/audio_manager.cc b/webrtc/modules/audio_device/android/audio_manager.cc
index 77099ab..283b324 100644
--- a/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/webrtc/modules/audio_device/android/audio_manager.cc
@@ -71,7 +71,7 @@
       low_latency_playout_(false),
       delay_estimate_in_milliseconds_(0) {
   ALOGD("ctor%s", GetThreadInfo().c_str());
-  CHECK(j_environment_);
+  RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
       {"nativeCacheAudioParameters",
        "(IIZZIIJ)V",
@@ -88,15 +88,15 @@
 
 AudioManager::~AudioManager() {
   ALOGD("~dtor%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   Close();
 }
 
 void AudioManager::SetActiveAudioLayer(
     AudioDeviceModule::AudioLayer audio_layer) {
   ALOGD("SetActiveAudioLayer(%d)%s", audio_layer, GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!initialized_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!initialized_);
   // Store the currenttly utilized audio layer.
   audio_layer_ = audio_layer;
   // The delay estimate can take one of two fixed values depending on if the
@@ -112,9 +112,9 @@
 
 bool AudioManager::Init() {
   ALOGD("Init%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!initialized_);
-  DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
   if (!j_audio_manager_->Init()) {
     ALOGE("init failed!");
     return false;
@@ -125,7 +125,7 @@
 
 bool AudioManager::Close() {
   ALOGD("Close%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!initialized_)
     return true;
   j_audio_manager_->Close();
@@ -135,17 +135,17 @@
 
 bool AudioManager::IsCommunicationModeEnabled() const {
   ALOGD("IsCommunicationModeEnabled()");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return j_audio_manager_->IsCommunicationModeEnabled();
 }
 
 bool AudioManager::IsAcousticEchoCancelerSupported() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return hardware_aec_;
 }
 
 bool AudioManager::IsLowLatencyPlayoutSupported() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   ALOGD("IsLowLatencyPlayoutSupported()");
   // Some devices are blacklisted for usage of OpenSL ES even if they report
   // that low-latency playout is supported. See b/21485703 for details.
@@ -187,7 +187,7 @@
   ALOGD("channels: %d", channels);
   ALOGD("output_buffer_size: %d", output_buffer_size);
   ALOGD("input_buffer_size: %d", input_buffer_size);
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   hardware_aec_ = hardware_aec;
   low_latency_playout_ = low_latency_output;
   // TODO(henrika): add support for stereo output.
@@ -198,14 +198,14 @@
 }
 
 const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
-  CHECK(playout_parameters_.is_valid());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(playout_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return playout_parameters_;
 }
 
 const AudioParameters& AudioManager::GetRecordAudioParameters() {
-  CHECK(record_parameters_.is_valid());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_CHECK(record_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return record_parameters_;
 }
 
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc
index c9d0f99..dbebd3f 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -72,8 +72,8 @@
       recording_(false),
       audio_device_buffer_(nullptr) {
   ALOGD("ctor%s", GetThreadInfo().c_str());
-  DCHECK(audio_parameters_.is_valid());
-  CHECK(j_environment_);
+  RTC_DCHECK(audio_parameters_.is_valid());
+  RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
       reinterpret_cast<void*>(
@@ -95,28 +95,28 @@
 
 AudioRecordJni::~AudioRecordJni() {
   ALOGD("~dtor%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   Terminate();
 }
 
 int32_t AudioRecordJni::Init() {
   ALOGD("Init%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return 0;
 }
 
 int32_t AudioRecordJni::Terminate() {
   ALOGD("Terminate%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   StopRecording();
   return 0;
 }
 
 int32_t AudioRecordJni::InitRecording() {
   ALOGD("InitRecording%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!initialized_);
-  DCHECK(!recording_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!recording_);
   int frames_per_buffer = j_audio_record_->InitRecording(
       audio_parameters_.sample_rate(), audio_parameters_.channels());
   if (frames_per_buffer < 0) {
@@ -125,18 +125,18 @@
   }
   frames_per_buffer_ = static_cast<size_t>(frames_per_buffer);
   ALOGD("frames_per_buffer: %" PRIuS, frames_per_buffer_);
-  CHECK_EQ(direct_buffer_capacity_in_bytes_,
-           frames_per_buffer_ * kBytesPerFrame);
-  CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
+  RTC_CHECK_EQ(direct_buffer_capacity_in_bytes_,
+               frames_per_buffer_ * kBytesPerFrame);
+  RTC_CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
   initialized_ = true;
   return 0;
 }
 
 int32_t AudioRecordJni::StartRecording() {
   ALOGD("StartRecording%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(initialized_);
-  DCHECK(!recording_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!recording_);
   if (!j_audio_record_->StartRecording()) {
     ALOGE("StartRecording failed!");
     return -1;
@@ -147,7 +147,7 @@
 
 int32_t AudioRecordJni::StopRecording() {
   ALOGD("StopRecording%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!initialized_ || !recording_) {
     return 0;
   }
@@ -155,8 +155,9 @@
     ALOGE("StopRecording failed!");
     return -1;
   }
-  // If we don't detach here, we will hit a DCHECK in OnDataIsRecorded() next
-  // time StartRecording() is called since it will create a new Java thread.
+  // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+  // next time StartRecording() is called since it will create a new Java
+  // thread.
   thread_checker_java_.DetachFromThread();
   initialized_ = false;
   recording_ = false;
@@ -165,7 +166,7 @@
 
 void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
   ALOGD("AttachAudioBuffer");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   audio_device_buffer_ = audioBuffer;
   const int sample_rate_hz = audio_parameters_.sample_rate();
   ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
@@ -175,13 +176,13 @@
   audio_device_buffer_->SetRecordingChannels(channels);
   total_delay_in_milliseconds_ =
       audio_manager_->GetDelayEstimateInMilliseconds();
-  DCHECK_GT(total_delay_in_milliseconds_, 0);
+  RTC_DCHECK_GT(total_delay_in_milliseconds_, 0);
   ALOGD("total_delay_in_milliseconds: %d", total_delay_in_milliseconds_);
 }
 
 int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
   ALOGD("EnableBuiltInAEC%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return j_audio_record_->EnableBuiltInAEC(enable) ? 0 : -1;
 }
 
@@ -195,8 +196,8 @@
 void AudioRecordJni::OnCacheDirectBufferAddress(
     JNIEnv* env, jobject byte_buffer) {
   ALOGD("OnCacheDirectBufferAddress");
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!direct_buffer_address_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!direct_buffer_address_);
   direct_buffer_address_ =
       env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
@@ -214,7 +215,7 @@
 // This method is called on a high-priority thread from Java. The name of
 // the thread is 'AudioRecordThread'.
 void AudioRecordJni::OnDataIsRecorded(int length) {
-  DCHECK(thread_checker_java_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_java_.CalledOnValidThread());
   if (!audio_device_buffer_) {
     ALOGE("AttachAudioBuffer has not been called!");
     return;
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h
index 6a17eb3..adf381e 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -35,7 +35,7 @@
 //
 // An instance must be created and destroyed on one and the same thread.
 // All public methods must also be called on the same thread. A thread checker
-// will DCHECK if any method is called on an invalid thread.
+// will RTC_DCHECK if any method is called on an invalid thread.
 //
 // This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
 // and detach when the object goes out of scope. Additional thread checking
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc
index f92f93e..36c2c14 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -76,8 +76,8 @@
       playing_(false),
       audio_device_buffer_(nullptr) {
   ALOGD("ctor%s", GetThreadInfo().c_str());
-  DCHECK(audio_parameters_.is_valid());
-  CHECK(j_environment_);
+  RTC_DCHECK(audio_parameters_.is_valid());
+  RTC_CHECK(j_environment_);
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
       reinterpret_cast<void*>(
@@ -99,28 +99,28 @@
 
 AudioTrackJni::~AudioTrackJni() {
   ALOGD("~dtor%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   Terminate();
 }
 
 int32_t AudioTrackJni::Init() {
   ALOGD("Init%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return 0;
 }
 
 int32_t AudioTrackJni::Terminate() {
   ALOGD("Terminate%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   StopPlayout();
   return 0;
 }
 
 int32_t AudioTrackJni::InitPlayout() {
   ALOGD("InitPlayout%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!initialized_);
-  DCHECK(!playing_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!playing_);
   j_audio_track_->InitPlayout(
       audio_parameters_.sample_rate(), audio_parameters_.channels());
   initialized_ = true;
@@ -129,9 +129,9 @@
 
 int32_t AudioTrackJni::StartPlayout() {
   ALOGD("StartPlayout%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(initialized_);
-  DCHECK(!playing_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!playing_);
   if (!j_audio_track_->StartPlayout()) {
     ALOGE("StartPlayout failed!");
     return -1;
@@ -142,7 +142,7 @@
 
 int32_t AudioTrackJni::StopPlayout() {
   ALOGD("StopPlayout%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!initialized_ || !playing_) {
     return 0;
   }
@@ -150,8 +150,9 @@
     ALOGE("StopPlayout failed!");
     return -1;
   }
-  // If we don't detach here, we will hit a DCHECK in OnDataIsRecorded() next
-  // time StartRecording() is called since it will create a new Java thread.
+  // If we don't detach here, we will hit a RTC_DCHECK in OnDataIsRecorded()
+  // next time StartRecording() is called since it will create a new Java
+  // thread.
   thread_checker_java_.DetachFromThread();
   initialized_ = false;
   playing_ = false;
@@ -165,27 +166,27 @@
 
 int AudioTrackJni::SetSpeakerVolume(uint32_t volume) {
   ALOGD("SetSpeakerVolume(%d)%s", volume, GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return j_audio_track_->SetStreamVolume(volume) ? 0 : -1;
 }
 
 int AudioTrackJni::MaxSpeakerVolume(uint32_t& max_volume) const {
   ALOGD("MaxSpeakerVolume%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   max_volume = j_audio_track_->GetStreamMaxVolume();
   return 0;
 }
 
 int AudioTrackJni::MinSpeakerVolume(uint32_t& min_volume) const {
   ALOGD("MaxSpeakerVolume%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   min_volume = 0;
   return 0;
 }
 
 int AudioTrackJni::SpeakerVolume(uint32_t& volume) const {
   ALOGD("SpeakerVolume%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   volume = j_audio_track_->GetStreamVolume();
   return 0;
 }
@@ -193,7 +194,7 @@
 // TODO(henrika): possibly add stereo support.
 void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
   ALOGD("AttachAudioBuffer%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   audio_device_buffer_ = audioBuffer;
   const int sample_rate_hz = audio_parameters_.sample_rate();
   ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
@@ -213,7 +214,7 @@
 void AudioTrackJni::OnCacheDirectBufferAddress(
     JNIEnv* env, jobject byte_buffer) {
   ALOGD("OnCacheDirectBufferAddress");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   direct_buffer_address_ =
       env->GetDirectBufferAddress(byte_buffer);
   jlong capacity = env->GetDirectBufferCapacity(byte_buffer);
@@ -233,8 +234,8 @@
 // This method is called on a high-priority thread from Java. The name of
 // the thread is 'AudioRecordTrack'.
 void AudioTrackJni::OnGetPlayoutData(size_t length) {
-  DCHECK(thread_checker_java_.CalledOnValidThread());
-  DCHECK_EQ(frames_per_buffer_, length / kBytesPerFrame);
+  RTC_DCHECK(thread_checker_java_.CalledOnValidThread());
+  RTC_DCHECK_EQ(frames_per_buffer_, length / kBytesPerFrame);
   if (!audio_device_buffer_) {
     ALOGE("AttachAudioBuffer has not been called!");
     return;
@@ -245,11 +246,11 @@
     ALOGE("AudioDeviceBuffer::RequestPlayoutData failed!");
     return;
   }
-  DCHECK_EQ(static_cast<size_t>(samples), frames_per_buffer_);
+  RTC_DCHECK_EQ(static_cast<size_t>(samples), frames_per_buffer_);
   // Copy decoded data into common byte buffer to ensure that it can be
   // written to the Java based audio track.
   samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_);
-  DCHECK_EQ(length, kBytesPerFrame * samples);
+  RTC_DCHECK_EQ(length, kBytesPerFrame * samples);
 }
 
 }  // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h
index 058bd8d..43bfcad 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -31,7 +31,7 @@
 //
 // An instance must be created and destroyed on one and the same thread.
 // All public methods must also be called on the same thread. A thread checker
-// will DCHECK if any method is called on an invalid thread.
+// will RTC_DCHECK if any method is called on an invalid thread.
 //
 // This class uses AttachCurrentThreadIfNeeded to attach to a Java VM if needed
 // and detach when the object goes out of scope. Additional thread checking
diff --git a/webrtc/modules/audio_device/android/build_info.h b/webrtc/modules/audio_device/android/build_info.h
index aea71f7..d9b2871 100644
--- a/webrtc/modules/audio_device/android/build_info.h
+++ b/webrtc/modules/audio_device/android/build_info.h
@@ -23,7 +23,7 @@
 // The calling thread is attached to the JVM at construction if needed and a
 // valid Java environment object is also created.
 // All Get methods must be called on the creating thread. If not, the code will
-// hit DCHECKs when calling JNIEnvironment::JavaToStdString().
+// hit RTC_DCHECKs when calling JNIEnvironment::JavaToStdString().
 class BuildInfo {
  public:
   BuildInfo();
diff --git a/webrtc/modules/audio_device/android/ensure_initialized.cc b/webrtc/modules/audio_device/android/ensure_initialized.cc
index e870fae..e8197b7 100644
--- a/webrtc/modules/audio_device/android/ensure_initialized.cc
+++ b/webrtc/modules/audio_device/android/ensure_initialized.cc
@@ -12,12 +12,10 @@
 
 #include <pthread.h>
 
-// Note: this dependency is dangerous since it reaches into Chromium's
-// base. You can't include anything in this file that includes WebRTC's
-// base/checks.h, for instance, since it will clash with Chromium's
-// logging.h. Therefore, the CHECKs in this file will actually use
-// Chromium's checks rather than the WebRTC ones.
+// Note: this dependency is dangerous since it reaches into Chromium's base.
+// There's a risk of e.g. macro clashes. This file may only be used in tests.
 #include "base/android/jni_android.h"
+#include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_device/android/audio_record_jni.h"
 #include "webrtc/modules/audio_device/android/audio_track_jni.h"
 #include "webrtc/modules/utility/interface/jvm_android.h"
@@ -28,10 +26,10 @@
 static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
 
 void EnsureInitializedOnce() {
-  CHECK(::base::android::IsVMInitialized());
+  RTC_CHECK(::base::android::IsVMInitialized());
   JNIEnv* jni = ::base::android::AttachCurrentThread();
   JavaVM* jvm = NULL;
-  CHECK_EQ(0, jni->GetJavaVM(&jvm));
+  RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
   jobject context = ::base::android::GetApplicationContext();
 
   // Initialize the Java environment (currently only used by the audio manager).
@@ -39,7 +37,7 @@
 }
 
 void EnsureInitialized() {
-  CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+  RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
 }
 
 }  // namespace audiodevicemodule
diff --git a/webrtc/modules/audio_device/android/opensles_common.h b/webrtc/modules/audio_device/android/opensles_common.h
index 75e4ff4..a4487b0 100644
--- a/webrtc/modules/audio_device/android/opensles_common.h
+++ b/webrtc/modules/audio_device/android/opensles_common.h
@@ -28,7 +28,7 @@
   ~ScopedSLObject() { Reset(); }
 
   SLType* Receive() {
-    DCHECK(!obj_);
+    RTC_DCHECK(!obj_);
     return &obj_;
   }
 
diff --git a/webrtc/modules/audio_device/android/opensles_player.cc b/webrtc/modules/audio_device/android/opensles_player.cc
index 5cf2191..b9ccfd5 100644
--- a/webrtc/modules/audio_device/android/opensles_player.cc
+++ b/webrtc/modules/audio_device/android/opensles_player.cc
@@ -60,37 +60,37 @@
 
 OpenSLESPlayer::~OpenSLESPlayer() {
   ALOGD("dtor%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   Terminate();
   DestroyAudioPlayer();
   DestroyMix();
   DestroyEngine();
-  DCHECK(!engine_object_.Get());
-  DCHECK(!engine_);
-  DCHECK(!output_mix_.Get());
-  DCHECK(!player_);
-  DCHECK(!simple_buffer_queue_);
-  DCHECK(!volume_);
+  RTC_DCHECK(!engine_object_.Get());
+  RTC_DCHECK(!engine_);
+  RTC_DCHECK(!output_mix_.Get());
+  RTC_DCHECK(!player_);
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_DCHECK(!volume_);
 }
 
 int OpenSLESPlayer::Init() {
   ALOGD("Init%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return 0;
 }
 
 int OpenSLESPlayer::Terminate() {
   ALOGD("Terminate%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   StopPlayout();
   return 0;
 }
 
 int OpenSLESPlayer::InitPlayout() {
   ALOGD("InitPlayout%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!initialized_);
-  DCHECK(!playing_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!initialized_);
+  RTC_DCHECK(!playing_);
   CreateEngine();
   CreateMix();
   initialized_ = true;
@@ -100,9 +100,9 @@
 
 int OpenSLESPlayer::StartPlayout() {
   ALOGD("StartPlayout%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(initialized_);
-  DCHECK(!playing_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!playing_);
   // The number of lower latency audio players is limited, hence we create the
   // audio player in Start() and destroy it in Stop().
   CreateAudioPlayer();
@@ -118,13 +118,13 @@
   // state, adding buffers will implicitly start playback.
   RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING), -1);
   playing_ = (GetPlayState() == SL_PLAYSTATE_PLAYING);
-  DCHECK(playing_);
+  RTC_DCHECK(playing_);
   return 0;
 }
 
 int OpenSLESPlayer::StopPlayout() {
   ALOGD("StopPlayout%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!initialized_ || !playing_) {
     return 0;
   }
@@ -136,8 +136,8 @@
   // Verify that the buffer queue is in fact cleared as it should.
   SLAndroidSimpleBufferQueueState buffer_queue_state;
   (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &buffer_queue_state);
-  DCHECK_EQ(0u, buffer_queue_state.count);
-  DCHECK_EQ(0u, buffer_queue_state.index);
+  RTC_DCHECK_EQ(0u, buffer_queue_state.count);
+  RTC_DCHECK_EQ(0u, buffer_queue_state.index);
 #endif
   // The number of lower latency audio players is limited, hence we create the
   // audio player in Start() and destroy it in Stop().
@@ -171,7 +171,7 @@
 
 void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
   ALOGD("AttachAudioBuffer");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   audio_device_buffer_ = audioBuffer;
   const int sample_rate_hz = audio_parameters_.sample_rate();
   ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
@@ -179,7 +179,7 @@
   const int channels = audio_parameters_.channels();
   ALOGD("SetPlayoutChannels(%d)", channels);
   audio_device_buffer_->SetPlayoutChannels(channels);
-  CHECK(audio_device_buffer_);
+  RTC_CHECK(audio_device_buffer_);
   AllocateDataBuffers();
 }
 
@@ -188,7 +188,7 @@
     int sample_rate,
     size_t bits_per_sample) {
   ALOGD("CreatePCMConfiguration");
-  CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
+  RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
   SLDataFormat_PCM format;
   format.formatType = SL_DATAFORMAT_PCM;
   format.numChannels = static_cast<SLuint32>(channels);
@@ -213,7 +213,7 @@
       format.samplesPerSec = SL_SAMPLINGRATE_48;
       break;
     default:
-      CHECK(false) << "Unsupported sample rate: " << sample_rate;
+      RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
   }
   format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
   format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
@@ -223,15 +223,16 @@
   else if (format.numChannels == 2)
     format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
   else
-    CHECK(false) << "Unsupported number of channels: " << format.numChannels;
+    RTC_CHECK(false) << "Unsupported number of channels: "
+                     << format.numChannels;
   return format;
 }
 
 void OpenSLESPlayer::AllocateDataBuffers() {
   ALOGD("AllocateDataBuffers");
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!simple_buffer_queue_);
-  CHECK(audio_device_buffer_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_CHECK(audio_device_buffer_);
   bytes_per_buffer_ = audio_parameters_.GetBytesPerBuffer();
   ALOGD("native buffer size: %" PRIuS, bytes_per_buffer_);
   // Create a modified audio buffer class which allows us to ask for any number
@@ -252,10 +253,10 @@
 
 bool OpenSLESPlayer::CreateEngine() {
   ALOGD("CreateEngine");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (engine_object_.Get())
     return true;
-  DCHECK(!engine_);
+  RTC_DCHECK(!engine_);
   const SLEngineOption option[] = {
     {SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
   RETURN_ON_ERROR(
@@ -271,7 +272,7 @@
 
 void OpenSLESPlayer::DestroyEngine() {
   ALOGD("DestroyEngine");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!engine_object_.Get())
     return;
   engine_ = nullptr;
@@ -280,8 +281,8 @@
 
 bool OpenSLESPlayer::CreateMix() {
   ALOGD("CreateMix");
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(engine_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(engine_);
   if (output_mix_.Get())
     return true;
 
@@ -296,7 +297,7 @@
 
 void OpenSLESPlayer::DestroyMix() {
   ALOGD("DestroyMix");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!output_mix_.Get())
     return;
   output_mix_.Reset();
@@ -304,14 +305,14 @@
 
 bool OpenSLESPlayer::CreateAudioPlayer() {
   ALOGD("CreateAudioPlayer");
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(engine_object_.Get());
-  DCHECK(output_mix_.Get());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(engine_object_.Get());
+  RTC_DCHECK(output_mix_.Get());
   if (player_object_.Get())
     return true;
-  DCHECK(!player_);
-  DCHECK(!simple_buffer_queue_);
-  DCHECK(!volume_);
+  RTC_DCHECK(!player_);
+  RTC_DCHECK(!simple_buffer_queue_);
+  RTC_DCHECK(!volume_);
 
   // source: Android Simple Buffer Queue Data Locator is source.
   SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
@@ -389,7 +390,7 @@
 
 void OpenSLESPlayer::DestroyAudioPlayer() {
   ALOGD("DestroyAudioPlayer");
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!player_object_.Get())
     return;
   player_object_.Reset();
@@ -407,7 +408,7 @@
 }
 
 void OpenSLESPlayer::FillBufferQueue() {
-  DCHECK(thread_checker_opensles_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
   SLuint32 state = GetPlayState();
   if (state != SL_PLAYSTATE_PLAYING) {
     ALOGW("Buffer callback in non-playing state!");
@@ -433,7 +434,7 @@
 }
 
 SLuint32 OpenSLESPlayer::GetPlayState() const {
-  DCHECK(player_);
+  RTC_DCHECK(player_);
   SLuint32 state;
   SLresult err = (*player_)->GetPlayState(player_, &state);
   if (SL_RESULT_SUCCESS != err) {
diff --git a/webrtc/modules/audio_device/android/opensles_player.h b/webrtc/modules/audio_device/android/opensles_player.h
index 79cc6f4..d96388b 100644
--- a/webrtc/modules/audio_device/android/opensles_player.h
+++ b/webrtc/modules/audio_device/android/opensles_player.h
@@ -33,7 +33,7 @@
 //
 // An instance must be created and destroyed on one and the same thread.
 // All public methods must also be called on the same thread. A thread checker
-// will DCHECK if any method is called on an invalid thread. Decoded audio
+// will RTC_DCHECK if any method is called on an invalid thread. Decoded audio
 // buffers are requested on a dedicated internal thread managed by the OpenSL
 // ES layer.
 //
diff --git a/webrtc/modules/audio_device/fine_audio_buffer.cc b/webrtc/modules/audio_device/fine_audio_buffer.cc
index 374d8ed..c3b07ee 100644
--- a/webrtc/modules/audio_device/fine_audio_buffer.cc
+++ b/webrtc/modules/audio_device/fine_audio_buffer.cc
@@ -70,8 +70,8 @@
            desired_frame_size_bytes_);
     playout_cached_buffer_start_ += desired_frame_size_bytes_;
     playout_cached_bytes_ -= desired_frame_size_bytes_;
-    CHECK_LT(playout_cached_buffer_start_ + playout_cached_bytes_,
-             bytes_per_10_ms_);
+    RTC_CHECK_LT(playout_cached_buffer_start_ + playout_cached_bytes_,
+                 bytes_per_10_ms_);
     return;
   }
   memcpy(buffer, &playout_cache_buffer_.get()[playout_cached_buffer_start_],
@@ -88,15 +88,15 @@
     device_buffer_->RequestPlayoutData(samples_per_10_ms_);
     int num_out = device_buffer_->GetPlayoutData(unwritten_buffer);
     if (static_cast<size_t>(num_out) != samples_per_10_ms_) {
-      CHECK_EQ(num_out, 0);
+      RTC_CHECK_EQ(num_out, 0);
       playout_cached_bytes_ = 0;
       return;
     }
     unwritten_buffer += bytes_per_10_ms_;
-    CHECK_GE(bytes_left, 0);
+    RTC_CHECK_GE(bytes_left, 0);
     bytes_left -= static_cast<int>(bytes_per_10_ms_);
   }
-  CHECK_LE(bytes_left, 0);
+  RTC_CHECK_LE(bytes_left, 0);
   // Put the samples that were written to |buffer| but are not used in the
   // cache.
   size_t cache_location = desired_frame_size_bytes_;
@@ -105,8 +105,8 @@
                           (desired_frame_size_bytes_ - playout_cached_bytes_);
   // If playout_cached_bytes_ is larger than the cache buffer, uninitialized
   // memory will be read.
-  CHECK_LE(playout_cached_bytes_, bytes_per_10_ms_);
-  CHECK_EQ(static_cast<size_t>(-bytes_left), playout_cached_bytes_);
+  RTC_CHECK_LE(playout_cached_bytes_, bytes_per_10_ms_);
+  RTC_CHECK_EQ(static_cast<size_t>(-bytes_left), playout_cached_bytes_);
   playout_cached_buffer_start_ = 0;
   memcpy(playout_cache_buffer_.get(), cache_ptr, playout_cached_bytes_);
 }
@@ -115,7 +115,7 @@
                                           size_t size_in_bytes,
                                           int playout_delay_ms,
                                           int record_delay_ms) {
-  CHECK_EQ(size_in_bytes, desired_frame_size_bytes_);
+  RTC_CHECK_EQ(size_in_bytes, desired_frame_size_bytes_);
   // Check if the temporary buffer can store the incoming buffer. If not,
   // move the remaining (old) bytes to the beginning of the temporary buffer
   // and start adding new samples after the old samples.
diff --git a/webrtc/modules/audio_device/fine_audio_buffer.h b/webrtc/modules/audio_device/fine_audio_buffer.h
index 14d5e0c..4ab5cd2 100644
--- a/webrtc/modules/audio_device/fine_audio_buffer.h
+++ b/webrtc/modules/audio_device/fine_audio_buffer.h
@@ -58,7 +58,8 @@
   // They can be fixed values on most platforms and they are ignored if an
   // external (hardware/built-in) AEC is used.
   // The size of |buffer| is given by |size_in_bytes| and must be equal to
-  // |desired_frame_size_bytes_|. A CHECK will be hit if this is not the case.
+  // |desired_frame_size_bytes_|. A RTC_CHECK will be hit if this is not the
+  // case.
   // Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
   // 5ms of data and sends a total of 10ms to WebRTC and clears the intenal
   // cache. Call #3 restarts the scheme above.
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.h b/webrtc/modules/audio_device/ios/audio_device_ios.h
index 6fa2d4a..eb8b876 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -28,8 +28,8 @@
 //
 // An instance must be created and destroyed on one and the same thread.
 // All supported public methods must also be called on the same thread.
-// A thread checker will DCHECK if any supported method is called on an invalid
-// thread.
+// A thread checker will RTC_DCHECK if any supported method is called on an
+// invalid thread.
 //
 // Recorded audio will be delivered on a real-time internal I/O thread in the
 // audio unit. The audio unit will also ask for audio data to play out on this
@@ -218,7 +218,7 @@
   // audio session is activated and we verify that the preferred parameters
   // were granted by the OS. At this stage it is also possible to add a third
   // component to the parameters; the native I/O buffer duration.
-  // A CHECK will be hit if we for some reason fail to open an audio session
+  // A RTC_CHECK will be hit if we for some reason fail to open an audio session
   // using the specified parameters.
   AudioParameters _playoutParameters;
   AudioParameters _recordParameters;
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm
index 5a6047c..b134143 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.mm
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm
@@ -55,7 +55,7 @@
 // mono natively for built-in microphones and for BT headsets but not for
 // wired headsets. Wired headsets only support stereo as native channel format
 // but it is a low cost operation to do a format conversion to mono in the
-// audio unit. Hence, we will not hit a CHECK in
+// audio unit. Hence, we will not hit a RTC_CHECK in
 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the
 // preferred number of channels and the actual number of channels.
 const int kPreferredNumberOfChannels = 1;
@@ -80,7 +80,7 @@
     // Deactivate the audio session and return if |activate| is false.
     if (!activate) {
       success = [session setActive:NO error:&error];
-      DCHECK(CheckAndLogError(success, error));
+      RTC_DCHECK(CheckAndLogError(success, error));
       return;
     }
     // Use a category which supports simultaneous recording and playback.
@@ -91,13 +91,13 @@
       error = nil;
       success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
                                error:&error];
-      DCHECK(CheckAndLogError(success, error));
+      RTC_DCHECK(CheckAndLogError(success, error));
     }
     // Specify mode for two-way voice communication (e.g. VoIP).
     if (session.mode != AVAudioSessionModeVoiceChat) {
       error = nil;
       success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
-      DCHECK(CheckAndLogError(success, error));
+      RTC_DCHECK(CheckAndLogError(success, error));
     }
     // Set the session's sample rate or the hardware sample rate.
     // It is essential that we use the same sample rate as stream format
@@ -105,13 +105,13 @@
     error = nil;
     success =
         [session setPreferredSampleRate:kPreferredSampleRate error:&error];
-    DCHECK(CheckAndLogError(success, error));
+    RTC_DCHECK(CheckAndLogError(success, error));
     // Set the preferred audio I/O buffer duration, in seconds.
     // TODO(henrika): add more comments here.
     error = nil;
     success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
                                               error:&error];
-    DCHECK(CheckAndLogError(success, error));
+    RTC_DCHECK(CheckAndLogError(success, error));
 
     // TODO(henrika): add observers here...
 
@@ -119,12 +119,12 @@
     // session (e.g. phone call) has higher priority than ours.
     error = nil;
     success = [session setActive:YES error:&error];
-    DCHECK(CheckAndLogError(success, error));
-    CHECK(session.isInputAvailable) << "No input path is available!";
+    RTC_DCHECK(CheckAndLogError(success, error));
+    RTC_CHECK(session.isInputAvailable) << "No input path is available!";
     // Ensure that category and mode are actually activated.
-    DCHECK(
+    RTC_DCHECK(
         [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
-    DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
+    RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
     // Try to set the preferred number of hardware audio channels. These calls
     // must be done after setting the audio session’s category and mode and
     // activating the session.
@@ -136,12 +136,12 @@
     success =
         [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
                                              error:&error];
-    DCHECK(CheckAndLogError(success, error));
+    RTC_DCHECK(CheckAndLogError(success, error));
     error = nil;
     success =
         [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels
                                               error:&error];
-    DCHECK(CheckAndLogError(success, error));
+    RTC_DCHECK(CheckAndLogError(success, error));
   }
 }
 
@@ -190,20 +190,20 @@
 
 AudioDeviceIOS::~AudioDeviceIOS() {
   LOGI() << "~dtor";
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   Terminate();
 }
 
 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
   LOGI() << "AttachAudioBuffer";
-  DCHECK(audioBuffer);
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(audioBuffer);
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   _audioDeviceBuffer = audioBuffer;
 }
 
 int32_t AudioDeviceIOS::Init() {
   LOGI() << "Init";
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   if (_initialized) {
     return 0;
   }
@@ -227,7 +227,7 @@
 
 int32_t AudioDeviceIOS::Terminate() {
   LOGI() << "Terminate";
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   if (!_initialized) {
     return 0;
   }
@@ -238,10 +238,10 @@
 
 int32_t AudioDeviceIOS::InitPlayout() {
   LOGI() << "InitPlayout";
-  DCHECK(_threadChecker.CalledOnValidThread());
-  DCHECK(_initialized);
-  DCHECK(!_playIsInitialized);
-  DCHECK(!_playing);
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_initialized);
+  RTC_DCHECK(!_playIsInitialized);
+  RTC_DCHECK(!_playing);
   if (!_recIsInitialized) {
     if (!InitPlayOrRecord()) {
       LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
@@ -254,10 +254,10 @@
 
 int32_t AudioDeviceIOS::InitRecording() {
   LOGI() << "InitRecording";
-  DCHECK(_threadChecker.CalledOnValidThread());
-  DCHECK(_initialized);
-  DCHECK(!_recIsInitialized);
-  DCHECK(!_recording);
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_initialized);
+  RTC_DCHECK(!_recIsInitialized);
+  RTC_DCHECK(!_recording);
   if (!_playIsInitialized) {
     if (!InitPlayOrRecord()) {
       LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
@@ -270,9 +270,9 @@
 
 int32_t AudioDeviceIOS::StartPlayout() {
   LOGI() << "StartPlayout";
-  DCHECK(_threadChecker.CalledOnValidThread());
-  DCHECK(_playIsInitialized);
-  DCHECK(!_playing);
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_playIsInitialized);
+  RTC_DCHECK(!_playing);
   _fineAudioBuffer->ResetPlayout();
   if (!_recording) {
     OSStatus result = AudioOutputUnitStart(_vpioUnit);
@@ -287,7 +287,7 @@
 
 int32_t AudioDeviceIOS::StopPlayout() {
   LOGI() << "StopPlayout";
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   if (!_playIsInitialized || !_playing) {
     return 0;
   }
@@ -301,9 +301,9 @@
 
 int32_t AudioDeviceIOS::StartRecording() {
   LOGI() << "StartRecording";
-  DCHECK(_threadChecker.CalledOnValidThread());
-  DCHECK(_recIsInitialized);
-  DCHECK(!_recording);
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_recIsInitialized);
+  RTC_DCHECK(!_recording);
   _fineAudioBuffer->ResetRecord();
   if (!_playing) {
     OSStatus result = AudioOutputUnitStart(_vpioUnit);
@@ -318,7 +318,7 @@
 
 int32_t AudioDeviceIOS::StopRecording() {
   LOGI() << "StopRecording";
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   if (!_recIsInitialized || !_recording) {
     return 0;
   }
@@ -377,16 +377,16 @@
 
 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
   LOGI() << "GetPlayoutAudioParameters";
-  DCHECK(_playoutParameters.is_valid());
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_playoutParameters.is_valid());
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   *params = _playoutParameters;
   return 0;
 }
 
 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
   LOGI() << "GetRecordAudioParameters";
-  DCHECK(_recordParameters.is_valid());
-  DCHECK(_threadChecker.CalledOnValidThread());
+  RTC_DCHECK(_recordParameters.is_valid());
+  RTC_DCHECK(_threadChecker.CalledOnValidThread());
   *params = _recordParameters;
   return 0;
 }
@@ -395,7 +395,7 @@
   LOGI() << "UpdateAudioDevicebuffer";
   // AttachAudioBuffer() is called at construction by the main class but check
   // just in case.
-  DCHECK(_audioDeviceBuffer) << "AttachAudioBuffer must be called first";
+  RTC_DCHECK(_audioDeviceBuffer) << "AttachAudioBuffer must be called first";
   // Inform the audio device buffer (ADB) about the new audio format.
   _audioDeviceBuffer->SetPlayoutSampleRate(_playoutParameters.sample_rate());
   _audioDeviceBuffer->SetPlayoutChannels(_playoutParameters.channels());
@@ -428,16 +428,16 @@
   // Hence, 128 is the size we expect to see in upcoming render callbacks.
   _playoutParameters.reset(session.sampleRate, _playoutParameters.channels(),
                            session.IOBufferDuration);
-  DCHECK(_playoutParameters.is_complete());
+  RTC_DCHECK(_playoutParameters.is_complete());
   _recordParameters.reset(session.sampleRate, _recordParameters.channels(),
                           session.IOBufferDuration);
-  DCHECK(_recordParameters.is_complete());
+  RTC_DCHECK(_recordParameters.is_complete());
   LOG(LS_INFO) << " frames per I/O buffer: "
                << _playoutParameters.frames_per_buffer();
   LOG(LS_INFO) << " bytes per I/O buffer: "
                << _playoutParameters.GetBytesPerBuffer();
-  DCHECK_EQ(_playoutParameters.GetBytesPerBuffer(),
-            _recordParameters.GetBytesPerBuffer());
+  RTC_DCHECK_EQ(_playoutParameters.GetBytesPerBuffer(),
+                _recordParameters.GetBytesPerBuffer());
 
   // Update the ADB parameters since the sample rate might have changed.
   UpdateAudioDeviceBuffer();
@@ -445,7 +445,7 @@
   // Create a modified audio buffer class which allows us to ask for,
   // or deliver, any number of samples (and not only multiple of 10ms) to match
   // the native audio unit buffer size.
-  DCHECK(_audioDeviceBuffer);
+  RTC_DCHECK(_audioDeviceBuffer);
   _fineAudioBuffer.reset(new FineAudioBuffer(
       _audioDeviceBuffer, _playoutParameters.GetBytesPerBuffer(),
       _playoutParameters.sample_rate()));
@@ -474,7 +474,7 @@
 
 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
   LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit";
-  DCHECK(!_vpioUnit);
+  RTC_DCHECK(!_vpioUnit);
   // Create an audio component description to identify the Voice-Processing
   // I/O audio unit.
   AudioComponentDescription vpioUnitDescription;
@@ -519,8 +519,9 @@
   // - no need to specify interleaving since only mono is supported
   AudioStreamBasicDescription applicationFormat = {0};
   UInt32 size = sizeof(applicationFormat);
-  DCHECK_EQ(_playoutParameters.sample_rate(), _recordParameters.sample_rate());
-  DCHECK_EQ(1, kPreferredNumberOfChannels);
+  RTC_DCHECK_EQ(_playoutParameters.sample_rate(),
+                _recordParameters.sample_rate());
+  RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
   applicationFormat.mSampleRate = _playoutParameters.sample_rate();
   applicationFormat.mFormatID = kAudioFormatLinearPCM;
   applicationFormat.mFormatFlags =
@@ -680,8 +681,8 @@
     UInt32 inBusNumber,
     UInt32 inNumberFrames,
     AudioBufferList* ioData) {
-  DCHECK_EQ(1u, inBusNumber);
-  DCHECK(!ioData);  // no buffer should be allocated for input at this stage
+  RTC_DCHECK_EQ(1u, inBusNumber);
+  RTC_DCHECK(!ioData);  // no buffer should be allocated for input at this stage
   AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon);
   return audio_device_ios->OnRecordedDataIsAvailable(
       ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames);
@@ -692,7 +693,7 @@
     const AudioTimeStamp* inTimeStamp,
     UInt32 inBusNumber,
     UInt32 inNumberFrames) {
-  DCHECK_EQ(_recordParameters.frames_per_buffer(), inNumberFrames);
+  RTC_DCHECK_EQ(_recordParameters.frames_per_buffer(), inNumberFrames);
   OSStatus result = noErr;
   // Simply return if recording is not enabled.
   if (!rtc::AtomicOps::AcquireLoad(&_recording))
@@ -712,7 +713,7 @@
   // Use the FineAudioBuffer instance to convert between native buffer size
   // and the 10ms buffer size used by WebRTC.
   const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize;
-  CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames);
+  RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames);
   SInt8* data = static_cast<SInt8*>(ioData->mBuffers[0].mData);
   _fineAudioBuffer->DeliverRecordedData(data, dataSizeInBytes,
                                         kFixedPlayoutDelayEstimate,
@@ -727,8 +728,8 @@
     UInt32 inBusNumber,
     UInt32 inNumberFrames,
     AudioBufferList* ioData) {
-  DCHECK_EQ(0u, inBusNumber);
-  DCHECK(ioData);
+  RTC_DCHECK_EQ(0u, inBusNumber);
+  RTC_DCHECK(ioData);
   AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon);
   return audio_device_ios->OnGetPlayoutData(ioActionFlags, inNumberFrames,
                                             ioData);
@@ -739,12 +740,12 @@
     UInt32 inNumberFrames,
     AudioBufferList* ioData) {
   // Verify 16-bit, noninterleaved mono PCM signal format.
-  DCHECK_EQ(1u, ioData->mNumberBuffers);
-  DCHECK_EQ(1u, ioData->mBuffers[0].mNumberChannels);
+  RTC_DCHECK_EQ(1u, ioData->mNumberBuffers);
+  RTC_DCHECK_EQ(1u, ioData->mBuffers[0].mNumberChannels);
   // Get pointer to internal audio buffer to which new audio data shall be
   // written.
   const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize;
-  CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames);
+  RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames);
   SInt8* destination = static_cast<SInt8*>(ioData->mBuffers[0].mData);
   // Produce silence and give audio unit a hint about it if playout is not
   // activated.
diff --git a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
index 211be03..d639fea 100644
--- a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
+++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -627,7 +627,8 @@
 
 // Verify that calling StopPlayout() will leave us in an uninitialized state
 // which will require a new call to InitPlayout(). This test does not call
-// StartPlayout() while being uninitialized since doing so will hit a DCHECK.
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK.
 TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
   EXPECT_EQ(0, audio_device()->InitPlayout());
   EXPECT_EQ(0, audio_device()->StartPlayout());
diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
index 3bbc185..7bb7347 100644
--- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -106,7 +106,7 @@
 {
     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
                  "%s destroyed", __FUNCTION__);
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     Terminate();
 
     if (_recBuffer)
@@ -139,7 +139,7 @@
 
 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
     _ptrAudioBuffer = audioBuffer;
 
@@ -165,7 +165,7 @@
 
 int32_t AudioDeviceLinuxPulse::Init()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_initialized)
     {
         return 0;
@@ -235,7 +235,7 @@
 
 int32_t AudioDeviceLinuxPulse::Terminate()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (!_initialized)
     {
         return 0;
@@ -286,13 +286,13 @@
 
 bool AudioDeviceLinuxPulse::Initialized() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_initialized);
 }
 
 int32_t AudioDeviceLinuxPulse::InitSpeaker()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
     if (_playing)
     {
@@ -336,7 +336,7 @@
 
 int32_t AudioDeviceLinuxPulse::InitMicrophone()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_recording)
     {
         return -1;
@@ -379,19 +379,19 @@
 
 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_mixerManager.SpeakerIsInitialized());
 }
 
 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_mixerManager.MicrophoneIsInitialized());
 }
 
 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
 
     // Make an attempt to open up the
@@ -418,7 +418,7 @@
 
 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (!_playing) {
       // Only update the volume if it's been set while we weren't playing.
       update_speaker_volume_at_startup_ = true;
@@ -428,7 +428,7 @@
 
 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     uint32_t level(0);
 
     if (_mixerManager.SpeakerVolume(level) == -1)
@@ -464,7 +464,7 @@
 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
     uint32_t& maxVolume) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     uint32_t maxVol(0);
 
     if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
@@ -480,7 +480,7 @@
 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
     uint32_t& minVolume) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     uint32_t minVol(0);
 
     if (_mixerManager.MinSpeakerVolume(minVol) == -1)
@@ -496,7 +496,7 @@
 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
     uint16_t& stepSize) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     uint16_t delta(0);
 
     if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
@@ -511,7 +511,7 @@
 
 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool isAvailable(false);
     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
 
@@ -543,13 +543,13 @@
 
 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_mixerManager.SetSpeakerMute(enable));
 }
 
 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool muted(0);
     if (_mixerManager.SpeakerMute(muted) == -1)
     {
@@ -562,7 +562,7 @@
 
 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool isAvailable(false);
     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
 
@@ -595,13 +595,13 @@
 
 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_mixerManager.SetMicrophoneMute(enable));
 }
 
 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool muted(0);
     if (_mixerManager.MicrophoneMute(muted) == -1)
     {
@@ -614,7 +614,7 @@
 
 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool isAvailable(false);
     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
 
@@ -645,13 +645,13 @@
 
 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_mixerManager.SetMicrophoneBoost(enable));
 }
 
 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool onOff(0);
 
     if (_mixerManager.MicrophoneBoost(onOff) == -1)
@@ -666,7 +666,7 @@
 
 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_recChannels == 2 && _recording) {
       available = true;
       return 0;
@@ -700,7 +700,7 @@
 
 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (enable)
         _recChannels = 2;
     else
@@ -711,7 +711,7 @@
 
 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_recChannels == 2)
         enabled = true;
     else
@@ -722,7 +722,7 @@
 
 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_playChannels == 2 && _playing) {
       available = true;
       return 0;
@@ -755,7 +755,7 @@
 
 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (enable)
         _playChannels = 2;
     else
@@ -766,7 +766,7 @@
 
 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_playChannels == 2)
         enabled = true;
     else
@@ -792,7 +792,7 @@
 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
     bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
 
     // Make an attempt to open up the
@@ -876,7 +876,7 @@
 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
     uint16_t& stepSize) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     uint16_t delta(0);
 
     if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
@@ -910,7 +910,7 @@
 
 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_playIsInitialized)
     {
         return -1;
@@ -947,7 +947,7 @@
     char name[kAdmMaxDeviceNameSize],
     char guid[kAdmMaxGuidSize])
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     const uint16_t nDevices = PlayoutDevices();
 
     if ((index > (nDevices - 1)) || (name == NULL))
@@ -989,7 +989,7 @@
     char name[kAdmMaxDeviceNameSize],
     char guid[kAdmMaxGuidSize])
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     const uint16_t nDevices(RecordingDevices());
 
     if ((index > (nDevices - 1)) || (name == NULL))
@@ -1047,7 +1047,7 @@
 
 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_recIsInitialized)
     {
         return -1;
@@ -1081,7 +1081,7 @@
 
 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     available = false;
 
     // Try to initialize the playout side
@@ -1100,7 +1100,7 @@
 
 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     available = false;
 
     // Try to initialize the playout side
@@ -1119,7 +1119,7 @@
 
 int32_t AudioDeviceLinuxPulse::InitPlayout()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
     if (_playing)
     {
@@ -1241,7 +1241,7 @@
 
 int32_t AudioDeviceLinuxPulse::InitRecording()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
     if (_recording)
     {
@@ -1353,7 +1353,7 @@
 
 int32_t AudioDeviceLinuxPulse::StartRecording()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (!_recIsInitialized)
     {
         return -1;
@@ -1400,7 +1400,7 @@
 
 int32_t AudioDeviceLinuxPulse::StopRecording()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     CriticalSectionScoped lock(&_critSect);
 
     if (!_recIsInitialized)
@@ -1463,25 +1463,25 @@
 
 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_recIsInitialized);
 }
 
 bool AudioDeviceLinuxPulse::Recording() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_recording);
 }
 
 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_playIsInitialized);
 }
 
 int32_t AudioDeviceLinuxPulse::StartPlayout()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
     if (!_playIsInitialized)
     {
@@ -1535,7 +1535,7 @@
 
 int32_t AudioDeviceLinuxPulse::StopPlayout()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     CriticalSectionScoped lock(&_critSect);
 
     if (!_playIsInitialized)
@@ -1607,14 +1607,14 @@
 
 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     delayMS = (uint16_t) _sndCardRecDelay;
     return 0;
 }
 
 bool AudioDeviceLinuxPulse::Playing() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     return (_playing);
 }
 
@@ -1622,7 +1622,7 @@
     const AudioDeviceModule::BufferType type,
     uint16_t sizeMS)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (type != AudioDeviceModule::kFixedBufferSize)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -1640,7 +1640,7 @@
     AudioDeviceModule::BufferType& type,
     uint16_t& sizeMS) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     type = _playBufType;
     sizeMS = _playBufDelayFixed;
 
diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
index 418dd3d..495a7eb 100644
--- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
+++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -304,7 +304,7 @@
     // Stores thread ID in constructor.
     // We can then use ThreadChecker::CalledOnValidThread() to ensure that
     // other methods are called from the same thread.
-    // Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
+    // Currently only does RTC_DCHECK(thread_checker_.CalledOnValidThread()).
     rtc::ThreadChecker thread_checker_;
 
     bool _initialized;
diff --git a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
index 4df2d94..bc2662e 100644
--- a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
+++ b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
@@ -63,7 +63,7 @@
 
 AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
                  "%s destructed", __FUNCTION__);
 
@@ -78,7 +78,7 @@
     pa_threaded_mainloop* mainloop,
     pa_context* context)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
                  __FUNCTION__);
 
@@ -101,7 +101,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::Close()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
                  __FUNCTION__);
 
@@ -118,7 +118,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::CloseSpeaker()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
                  __FUNCTION__);
 
@@ -131,7 +131,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::CloseMicrophone()
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
                  __FUNCTION__);
 
@@ -144,7 +144,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)");
 
@@ -154,7 +154,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::SetRecStream(recStream)");
 
@@ -165,7 +165,7 @@
 int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(
     uint16_t deviceIndex)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex=%d)",
                  deviceIndex);
@@ -192,7 +192,7 @@
 int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(
     uint16_t deviceIndex)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::OpenMicrophone"
                  "(deviceIndex=%d)", deviceIndex);
@@ -218,7 +218,7 @@
 
 bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
                  __FUNCTION__);
 
@@ -227,7 +227,7 @@
 
 bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
                  __FUNCTION__);
 
@@ -237,7 +237,7 @@
 int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
     uint32_t volume)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume=%u)",
                  volume);
@@ -372,7 +372,7 @@
 int32_t
 AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paOutputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -394,7 +394,7 @@
 int32_t
 AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paOutputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -411,7 +411,7 @@
 int32_t
 AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paOutputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -427,7 +427,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable=%u)",
                  enable);
@@ -512,7 +512,7 @@
 int32_t
 AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paOutputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -546,7 +546,7 @@
 int32_t
 AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paInputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -590,7 +590,7 @@
 int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
     bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paInputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -606,7 +606,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=%u)",
                  enable);
@@ -661,7 +661,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paInputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -698,7 +698,7 @@
 int32_t
 AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paInputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -716,7 +716,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
                  "AudioMixerManagerLinuxPulse::SetMicrophoneBoost(enable=%u)",
                  enable);
@@ -745,7 +745,7 @@
 
 int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paInputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -762,7 +762,7 @@
 int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
     bool& available)
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paInputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -931,7 +931,7 @@
 int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize(
     uint16_t& stepSize) const
 {
-    DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
     if (_paInputDeviceIndex == -1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
diff --git a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
index 8567631..cb3d632 100644
--- a/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
+++ b/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
@@ -111,7 +111,7 @@
     // Stores thread ID in constructor.
     // We can then use ThreadChecker::CalledOnValidThread() to ensure that
     // other methods are called from the same thread.
-    // Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
+    // Currently only does RTC_DCHECK(thread_checker_.CalledOnValidThread()).
     rtc::ThreadChecker thread_checker_;
 };
 
diff --git a/webrtc/modules/audio_device/mac/audio_device_mac.cc b/webrtc/modules/audio_device/mac/audio_device_mac.cc
index 90e32dc..77dab0b 100644
--- a/webrtc/modules/audio_device/mac/audio_device_mac.cc
+++ b/webrtc/modules/audio_device/mac/audio_device_mac.cc
@@ -91,8 +91,8 @@
                               const int32_t id, const char *msg,
                               const char *err)
 {
-    DCHECK(msg != NULL);
-    DCHECK(err != NULL);
+  RTC_DCHECK(msg != NULL);
+  RTC_DCHECK(err != NULL);
 
 #ifdef WEBRTC_ARCH_BIG_ENDIAN
     WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
@@ -154,8 +154,8 @@
     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
                  "%s created", __FUNCTION__);
 
-    DCHECK(&_stopEvent != NULL);
-    DCHECK(&_stopEventRec != NULL);
+    RTC_DCHECK(&_stopEvent != NULL);
+    RTC_DCHECK(&_stopEventRec != NULL);
 
     memset(_renderConvertData, 0, sizeof(_renderConvertData));
     memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
@@ -175,8 +175,8 @@
         Terminate();
     }
 
-    DCHECK(!capture_worker_thread_.get());
-    DCHECK(!render_worker_thread_.get());
+    RTC_DCHECK(!capture_worker_thread_.get());
+    RTC_DCHECK(!render_worker_thread_.get());
 
     if (_paRenderBuffer)
     {
@@ -1664,10 +1664,10 @@
         return -1;
     }
 
-    DCHECK(!capture_worker_thread_.get());
+    RTC_DCHECK(!capture_worker_thread_.get());
     capture_worker_thread_ =
         ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
-    DCHECK(capture_worker_thread_.get());
+    RTC_DCHECK(capture_worker_thread_.get());
     capture_worker_thread_->Start();
     capture_worker_thread_->SetPriority(kRealtimePriority);
 
@@ -1819,7 +1819,7 @@
         return 0;
     }
 
-    DCHECK(!render_worker_thread_.get());
+    RTC_DCHECK(!render_worker_thread_.get());
     render_worker_thread_ =
         ThreadWrapper::CreateThread(RunRender, this, "RenderWorkerThread");
     render_worker_thread_->Start();
@@ -2466,7 +2466,7 @@
     void* clientData)
 {
     AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
-    DCHECK(ptrThis != NULL);
+    RTC_DCHECK(ptrThis != NULL);
 
     ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
 
@@ -2752,7 +2752,7 @@
                                       void *clientData)
 {
     AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
-    DCHECK(ptrThis != NULL);
+    RTC_DCHECK(ptrThis != NULL);
 
     ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
 
@@ -2767,7 +2767,7 @@
                                           void *userData)
 {
     AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData;
-    DCHECK(ptrThis != NULL);
+    RTC_DCHECK(ptrThis != NULL);
 
     return ptrThis->implOutConverterProc(numberDataPackets, data);
 }
@@ -2779,7 +2779,7 @@
                                         const AudioTimeStamp*, void* clientData)
 {
     AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
-    DCHECK(ptrThis != NULL);
+    RTC_DCHECK(ptrThis != NULL);
 
     ptrThis->implInDeviceIOProc(inputData, inputTime);
 
@@ -2795,7 +2795,7 @@
     void *userData)
 {
     AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (userData);
-    DCHECK(ptrThis != NULL);
+    RTC_DCHECK(ptrThis != NULL);
 
     return ptrThis->implInConverterProc(numberDataPackets, data);
 }
@@ -2852,7 +2852,7 @@
         return 0;
     }
 
-    DCHECK(_outStreamFormat.mBytesPerFrame != 0);
+    RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
     UInt32 size = outputData->mBuffers->mDataByteSize
         / _outStreamFormat.mBytesPerFrame;
 
@@ -2893,7 +2893,7 @@
 OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets,
                                               AudioBufferList *data)
 {
-    DCHECK(data->mNumberBuffers == 1);
+  RTC_DCHECK(data->mNumberBuffers == 1);
     PaRingBufferSize numSamples = *numberDataPackets
         * _outDesiredFormat.mChannelsPerFrame;
 
@@ -2967,7 +2967,7 @@
 
     AtomicSet32(&_captureDelayUs, captureDelayUs);
 
-    DCHECK(inputData->mNumberBuffers == 1);
+    RTC_DCHECK(inputData->mNumberBuffers == 1);
     PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize
         * _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket;
     PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
@@ -2986,7 +2986,7 @@
 OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets,
                                              AudioBufferList *data)
 {
-    DCHECK(data->mNumberBuffers == 1);
+  RTC_DCHECK(data->mNumberBuffers == 1);
     PaRingBufferSize numSamples = *numberDataPackets
         * _inStreamFormat.mChannelsPerFrame;
 
diff --git a/webrtc/modules/audio_processing/agc/agc.cc b/webrtc/modules/audio_processing/agc/agc.cc
index 9786d7b..706b963 100644
--- a/webrtc/modules/audio_processing/agc/agc.cc
+++ b/webrtc/modules/audio_processing/agc/agc.cc
@@ -54,7 +54,7 @@
   const std::vector<double>& rms = vad_.chunkwise_rms();
   const std::vector<double>& probabilities =
       vad_.chunkwise_voice_probabilities();
-  DCHECK_EQ(rms.size(), probabilities.size());
+  RTC_DCHECK_EQ(rms.size(), probabilities.size());
   for (size_t i = 0; i < rms.size(); ++i) {
     histogram_->Update(rms[i], probabilities[i]);
   }
diff --git a/webrtc/modules/audio_processing/beamformer/complex_matrix.h b/webrtc/modules/audio_processing/beamformer/complex_matrix.h
index f5be2b2..bfa3563 100644
--- a/webrtc/modules/audio_processing/beamformer/complex_matrix.h
+++ b/webrtc/modules/audio_processing/beamformer/complex_matrix.h
@@ -59,8 +59,8 @@
   }
 
   ComplexMatrix& ConjugateTranspose(const ComplexMatrix& operand) {
-    CHECK_EQ(operand.num_rows(), this->num_columns());
-    CHECK_EQ(operand.num_columns(), this->num_rows());
+    RTC_CHECK_EQ(operand.num_rows(), this->num_columns());
+    RTC_CHECK_EQ(operand.num_columns(), this->num_rows());
     return ConjugateTranspose(operand.elements());
   }
 
diff --git a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
index ed81247..efc5b0f 100644
--- a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
+++ b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
@@ -32,8 +32,8 @@
     float wave_number,
     const std::vector<Point>& geometry,
     ComplexMatrix<float>* mat) {
-  CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
-  CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
+  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
 
   complex<float>* const* mat_els = mat->elements();
   for (size_t i = 0; i < geometry.size(); ++i) {
@@ -57,8 +57,8 @@
     int sample_rate,
     const std::vector<Point>& geometry,
     ComplexMatrix<float>* mat) {
-  CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
-  CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
+  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
 
   ComplexMatrix<float> interf_cov_vector(1, geometry.size());
   ComplexMatrix<float> interf_cov_vector_transposed(geometry.size(), 1);
@@ -82,8 +82,8 @@
     const std::vector<Point>& geometry,
     float angle,
     ComplexMatrix<float>* mat) {
-  CHECK_EQ(1, mat->num_rows());
-  CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+  RTC_CHECK_EQ(1, mat->num_rows());
+  RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
 
   float freq_in_hertz =
       (static_cast<float>(frequency_bin) / fft_size) * sample_rate;
diff --git a/webrtc/modules/audio_processing/beamformer/matrix.h b/webrtc/modules/audio_processing/beamformer/matrix.h
index 442ddce..162aef1 100644
--- a/webrtc/modules/audio_processing/beamformer/matrix.h
+++ b/webrtc/modules/audio_processing/beamformer/matrix.h
@@ -121,7 +121,7 @@
   const T* const* elements() const { return &elements_[0]; }
 
   T Trace() {
-    CHECK_EQ(num_rows_, num_columns_);
+    RTC_CHECK_EQ(num_rows_, num_columns_);
 
     T trace = 0;
     for (int i = 0; i < num_rows_; ++i) {
@@ -138,8 +138,8 @@
   }
 
   Matrix& Transpose(const Matrix& operand) {
-    CHECK_EQ(operand.num_rows_, num_columns_);
-    CHECK_EQ(operand.num_columns_, num_rows_);
+    RTC_CHECK_EQ(operand.num_rows_, num_columns_);
+    RTC_CHECK_EQ(operand.num_columns_, num_rows_);
 
     return Transpose(operand.elements());
   }
@@ -160,8 +160,8 @@
   }
 
   Matrix& Add(const Matrix& operand) {
-    CHECK_EQ(num_rows_, operand.num_rows_);
-    CHECK_EQ(num_columns_, operand.num_columns_);
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
 
     for (size_t i = 0; i < data_.size(); ++i) {
       data_[i] += operand.data_[i];
@@ -176,8 +176,8 @@
   }
 
   Matrix& Subtract(const Matrix& operand) {
-    CHECK_EQ(num_rows_, operand.num_rows_);
-    CHECK_EQ(num_columns_, operand.num_columns_);
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
 
     for (size_t i = 0; i < data_.size(); ++i) {
       data_[i] -= operand.data_[i];
@@ -192,8 +192,8 @@
   }
 
   Matrix& PointwiseMultiply(const Matrix& operand) {
-    CHECK_EQ(num_rows_, operand.num_rows_);
-    CHECK_EQ(num_columns_, operand.num_columns_);
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
 
     for (size_t i = 0; i < data_.size(); ++i) {
       data_[i] *= operand.data_[i];
@@ -208,8 +208,8 @@
   }
 
   Matrix& PointwiseDivide(const Matrix& operand) {
-    CHECK_EQ(num_rows_, operand.num_rows_);
-    CHECK_EQ(num_columns_, operand.num_columns_);
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
 
     for (size_t i = 0; i < data_.size(); ++i) {
       data_[i] /= operand.data_[i];
@@ -263,15 +263,15 @@
   }
 
   Matrix& Multiply(const Matrix& lhs, const Matrix& rhs) {
-    CHECK_EQ(lhs.num_columns_, rhs.num_rows_);
-    CHECK_EQ(num_rows_, lhs.num_rows_);
-    CHECK_EQ(num_columns_, rhs.num_columns_);
+    RTC_CHECK_EQ(lhs.num_columns_, rhs.num_rows_);
+    RTC_CHECK_EQ(num_rows_, lhs.num_rows_);
+    RTC_CHECK_EQ(num_columns_, rhs.num_columns_);
 
     return Multiply(lhs.elements(), rhs.num_rows_, rhs.elements());
   }
 
   Matrix& Multiply(const Matrix& rhs) {
-    CHECK_EQ(num_columns_, rhs.num_rows_);
+    RTC_CHECK_EQ(num_columns_, rhs.num_rows_);
 
     CopyDataToScratch();
     Resize(num_rows_, rhs.num_columns_);
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
index f7e80b5..da7ad0d 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -80,9 +80,9 @@
 // The returned norm is clamped to be non-negative.
 float Norm(const ComplexMatrix<float>& mat,
            const ComplexMatrix<float>& norm_mat) {
-  CHECK_EQ(norm_mat.num_rows(), 1);
-  CHECK_EQ(norm_mat.num_columns(), mat.num_rows());
-  CHECK_EQ(norm_mat.num_columns(), mat.num_columns());
+  RTC_CHECK_EQ(norm_mat.num_rows(), 1);
+  RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_rows());
+  RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_columns());
 
   complex<float> first_product = complex<float>(0.f, 0.f);
   complex<float> second_product = complex<float>(0.f, 0.f);
@@ -103,9 +103,9 @@
 // Does conjugate(|lhs|) * |rhs| for row vectors |lhs| and |rhs|.
 complex<float> ConjugateDotProduct(const ComplexMatrix<float>& lhs,
                                    const ComplexMatrix<float>& rhs) {
-  CHECK_EQ(lhs.num_rows(), 1);
-  CHECK_EQ(rhs.num_rows(), 1);
-  CHECK_EQ(lhs.num_columns(), rhs.num_columns());
+  RTC_CHECK_EQ(lhs.num_rows(), 1);
+  RTC_CHECK_EQ(rhs.num_rows(), 1);
+  RTC_CHECK_EQ(lhs.num_columns(), rhs.num_columns());
 
   const complex<float>* const* lhs_elements = lhs.elements();
   const complex<float>* const* rhs_elements = rhs.elements();
@@ -151,9 +151,9 @@
 // Does |out| = |in|.' * conj(|in|) for row vector |in|.
 void TransposedConjugatedProduct(const ComplexMatrix<float>& in,
                                  ComplexMatrix<float>* out) {
-  CHECK_EQ(in.num_rows(), 1);
-  CHECK_EQ(out->num_rows(), in.num_columns());
-  CHECK_EQ(out->num_columns(), in.num_columns());
+  RTC_CHECK_EQ(in.num_rows(), 1);
+  RTC_CHECK_EQ(out->num_rows(), in.num_columns());
+  RTC_CHECK_EQ(out->num_columns(), in.num_columns());
   const complex<float>* in_elements = in.elements()[0];
   complex<float>* const* out_elements = out->elements();
   for (int i = 0; i < out->num_rows(); ++i) {
@@ -207,11 +207,11 @@
   //   constant               ^                        ^
   //             low_mean_end_bin_       high_mean_end_bin_
   //
-  DCHECK_GT(low_mean_start_bin_, 0U);
-  DCHECK_LT(low_mean_start_bin_, low_mean_end_bin_);
-  DCHECK_LT(low_mean_end_bin_, high_mean_end_bin_);
-  DCHECK_LT(high_mean_start_bin_, high_mean_end_bin_);
-  DCHECK_LT(high_mean_end_bin_, kNumFreqBins - 1);
+  RTC_DCHECK_GT(low_mean_start_bin_, 0U);
+  RTC_DCHECK_LT(low_mean_start_bin_, low_mean_end_bin_);
+  RTC_DCHECK_LT(low_mean_end_bin_, high_mean_end_bin_);
+  RTC_DCHECK_LT(high_mean_start_bin_, high_mean_end_bin_);
+  RTC_DCHECK_LT(high_mean_end_bin_, kNumFreqBins - 1);
 
   high_pass_postfilter_mask_ = 1.f;
   is_target_present_ = false;
@@ -312,8 +312,8 @@
 
 void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>& input,
                                        ChannelBuffer<float>* output) {
-  DCHECK_EQ(input.num_channels(), num_input_channels_);
-  DCHECK_EQ(input.num_frames_per_band(), chunk_length_);
+  RTC_DCHECK_EQ(input.num_channels(), num_input_channels_);
+  RTC_DCHECK_EQ(input.num_frames_per_band(), chunk_length_);
 
   float old_high_pass_mask = high_pass_postfilter_mask_;
   lapped_transform_->ProcessChunk(input.channels(0), output->channels(0));
@@ -352,9 +352,9 @@
                                             size_t num_freq_bins,
                                             int num_output_channels,
                                             complex_f* const* output) {
-  CHECK_EQ(num_freq_bins, kNumFreqBins);
-  CHECK_EQ(num_input_channels, num_input_channels_);
-  CHECK_EQ(num_output_channels, 1);
+  RTC_CHECK_EQ(num_freq_bins, kNumFreqBins);
+  RTC_CHECK_EQ(num_input_channels, num_input_channels_);
+  RTC_CHECK_EQ(num_output_channels, 1);
 
   // Calculating the post-filter masks. Note that we need two for each
   // frequency bin to account for the positive and negative interferer
@@ -493,7 +493,7 @@
 
 // Compute mean over the given range of time_smooth_mask_, [first, last).
 float NonlinearBeamformer::MaskRangeMean(size_t first, size_t last) {
-  DCHECK_GT(last, first);
+  RTC_DCHECK_GT(last, first);
   const float sum = std::accumulate(time_smooth_mask_ + first,
                                     time_smooth_mask_ + last, 0.f);
   return sum / (last - first);
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
index 82a6cb0..cc75248 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
@@ -47,7 +47,7 @@
   const size_t num_mics = in_file.num_channels();
   const std::vector<Point> array_geometry =
       ParseArrayGeometry(FLAGS_mic_positions, num_mics);
-  CHECK_EQ(array_geometry.size(), num_mics);
+  RTC_CHECK_EQ(array_geometry.size(), num_mics);
 
   NonlinearBeamformer bf(array_geometry);
   bf.Initialize(kChunkSizeMs, in_file.sample_rate());
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
index 33ff5cd..d014ce0 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
@@ -58,7 +58,7 @@
     size_t frames,
     int /* out_channels */,
     complex<float>* const* out_block) {
-  DCHECK_EQ(parent_->freqs_, frames);
+  RTC_DCHECK_EQ(parent_->freqs_, frames);
   for (int i = 0; i < in_channels; ++i) {
     parent_->DispatchAudio(source_, in_block[i], out_block[i]);
   }
@@ -103,7 +103,7 @@
       capture_callback_(this, AudioSource::kCaptureStream),
       block_count_(0),
       analysis_step_(0) {
-  DCHECK_LE(config.rho, 1.0f);
+  RTC_DCHECK_LE(config.rho, 1.0f);
 
   CreateErbBank();
 
@@ -130,8 +130,8 @@
 void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio,
                                                  int sample_rate_hz,
                                                  int num_channels) {
-  CHECK_EQ(sample_rate_hz_, sample_rate_hz);
-  CHECK_EQ(num_render_channels_, num_channels);
+  RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
+  RTC_CHECK_EQ(num_render_channels_, num_channels);
 
   if (active_) {
     render_mangler_->ProcessChunk(audio, temp_render_out_buffer_.channels());
@@ -148,8 +148,8 @@
 void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio,
                                                   int sample_rate_hz,
                                                   int num_channels) {
-  CHECK_EQ(sample_rate_hz_, sample_rate_hz);
-  CHECK_EQ(num_capture_channels_, num_channels);
+  RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
+  RTC_CHECK_EQ(num_capture_channels_, num_channels);
 
   capture_mangler_->ProcessChunk(audio, temp_capture_out_buffer_.channels());
 }
@@ -357,7 +357,7 @@
 }
 
 void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) {
-  DCHECK_GT(freqs_, 0u);
+  RTC_DCHECK_GT(freqs_, 0u);
   for (size_t i = 0; i < bank_size_; ++i) {
     result[i] = DotProduct(&filter_bank_[i][0], var, freqs_);
   }
diff --git a/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc b/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc
index c35ddb4..3a43471 100644
--- a/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc
+++ b/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc
@@ -34,9 +34,9 @@
                               instance_index, process_rate);
 
   // Ensure there was no buffer output error.
-  DCHECK_GE(written, 0);
+  RTC_DCHECK_GE(written, 0);
   // Ensure that the buffer size was sufficient.
-  DCHECK_LT(static_cast<size_t>(written), sizeof(filename));
+  RTC_DCHECK_LT(static_cast<size_t>(written), sizeof(filename));
 
   *wav_file = rtc_WavOpen(filename, sample_rate, 1);
 }
@@ -47,9 +47,9 @@
                               instance_index);
 
   // Ensure there was no buffer output error.
-  DCHECK_GE(written, 0);
+  RTC_DCHECK_GE(written, 0);
   // Ensure that the buffer size was sufficient.
-  DCHECK_LT(static_cast<size_t>(written), sizeof(filename));
+  RTC_DCHECK_LT(static_cast<size_t>(written), sizeof(filename));
 
   *file = fopen(filename, "wb");
 }
diff --git a/webrtc/modules/audio_processing/splitting_filter.cc b/webrtc/modules/audio_processing/splitting_filter.cc
index 06af56e..60427e2 100644
--- a/webrtc/modules/audio_processing/splitting_filter.cc
+++ b/webrtc/modules/audio_processing/splitting_filter.cc
@@ -20,7 +20,7 @@
                                  size_t num_bands,
                                  size_t num_frames)
     : num_bands_(num_bands) {
-  CHECK(num_bands_ == 2 || num_bands_ == 3);
+  RTC_CHECK(num_bands_ == 2 || num_bands_ == 3);
   if (num_bands_ == 2) {
     two_bands_states_.resize(num_channels);
   } else if (num_bands_ == 3) {
@@ -32,10 +32,10 @@
 
 void SplittingFilter::Analysis(const IFChannelBuffer* data,
                                IFChannelBuffer* bands) {
-  DCHECK_EQ(num_bands_, bands->num_bands());
-  DCHECK_EQ(data->num_channels(), bands->num_channels());
-  DCHECK_EQ(data->num_frames(),
-            bands->num_frames_per_band() * bands->num_bands());
+  RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+  RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+  RTC_DCHECK_EQ(data->num_frames(),
+                bands->num_frames_per_band() * bands->num_bands());
   if (bands->num_bands() == 2) {
     TwoBandsAnalysis(data, bands);
   } else if (bands->num_bands() == 3) {
@@ -45,10 +45,10 @@
 
 void SplittingFilter::Synthesis(const IFChannelBuffer* bands,
                                 IFChannelBuffer* data) {
-  DCHECK_EQ(num_bands_, bands->num_bands());
-  DCHECK_EQ(data->num_channels(), bands->num_channels());
-  DCHECK_EQ(data->num_frames(),
-            bands->num_frames_per_band() * bands->num_bands());
+  RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+  RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+  RTC_DCHECK_EQ(data->num_frames(),
+                bands->num_frames_per_band() * bands->num_bands());
   if (bands->num_bands() == 2) {
     TwoBandsSynthesis(bands, data);
   } else if (bands->num_bands() == 3) {
@@ -58,7 +58,8 @@
 
 void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
                                        IFChannelBuffer* bands) {
-  DCHECK_EQ(static_cast<int>(two_bands_states_.size()), data->num_channels());
+  RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
+                data->num_channels());
   for (size_t i = 0; i < two_bands_states_.size(); ++i) {
     WebRtcSpl_AnalysisQMF(data->ibuf_const()->channels()[i],
                           data->num_frames(),
@@ -71,7 +72,8 @@
 
 void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
                                         IFChannelBuffer* data) {
-  DCHECK_EQ(static_cast<int>(two_bands_states_.size()), data->num_channels());
+  RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
+                data->num_channels());
   for (size_t i = 0; i < two_bands_states_.size(); ++i) {
     WebRtcSpl_SynthesisQMF(bands->ibuf_const()->channels(0)[i],
                            bands->ibuf_const()->channels(1)[i],
@@ -84,8 +86,8 @@
 
 void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
                                          IFChannelBuffer* bands) {
-  DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
-            data->num_channels());
+  RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
+                data->num_channels());
   for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
     three_band_filter_banks_[i]->Analysis(data->fbuf_const()->channels()[i],
                                           data->num_frames(),
@@ -95,8 +97,8 @@
 
 void SplittingFilter::ThreeBandsSynthesis(const IFChannelBuffer* bands,
                                           IFChannelBuffer* data) {
-  DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
-            data->num_channels());
+  RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
+                data->num_channels());
   for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
     three_band_filter_banks_[i]->Synthesis(bands->fbuf_const()->bands(i),
                                            bands->num_frames_per_band(),
diff --git a/webrtc/modules/audio_processing/test/audioproc_float.cc b/webrtc/modules/audio_processing/test/audioproc_float.cc
index f4aab32..9c44d76 100644
--- a/webrtc/modules/audio_processing/test/audioproc_float.cc
+++ b/webrtc/modules/audio_processing/test/audioproc_float.cc
@@ -105,26 +105,29 @@
     const size_t num_mics = in_file.num_channels();
     const std::vector<Point> array_geometry =
         ParseArrayGeometry(FLAGS_mic_positions, num_mics);
-    CHECK_EQ(array_geometry.size(), num_mics);
+    RTC_CHECK_EQ(array_geometry.size(), num_mics);
 
     config.Set<Beamforming>(new Beamforming(true, array_geometry));
   }
 
   rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config));
   if (!FLAGS_dump.empty()) {
-    CHECK_EQ(kNoErr, ap->echo_cancellation()->Enable(FLAGS_aec || FLAGS_all));
+    RTC_CHECK_EQ(kNoErr,
+                 ap->echo_cancellation()->Enable(FLAGS_aec || FLAGS_all));
   } else if (FLAGS_aec) {
     fprintf(stderr, "-aec requires a -dump file.\n");
     return -1;
   }
   bool process_reverse = !FLAGS_i_rev.empty();
-  CHECK_EQ(kNoErr, ap->gain_control()->Enable(FLAGS_agc || FLAGS_all));
-  CHECK_EQ(kNoErr, ap->gain_control()->set_mode(GainControl::kFixedDigital));
-  CHECK_EQ(kNoErr, ap->high_pass_filter()->Enable(FLAGS_hpf || FLAGS_all));
-  CHECK_EQ(kNoErr, ap->noise_suppression()->Enable(FLAGS_ns || FLAGS_all));
+  RTC_CHECK_EQ(kNoErr, ap->gain_control()->Enable(FLAGS_agc || FLAGS_all));
+  RTC_CHECK_EQ(kNoErr,
+               ap->gain_control()->set_mode(GainControl::kFixedDigital));
+  RTC_CHECK_EQ(kNoErr, ap->high_pass_filter()->Enable(FLAGS_hpf || FLAGS_all));
+  RTC_CHECK_EQ(kNoErr, ap->noise_suppression()->Enable(FLAGS_ns || FLAGS_all));
   if (FLAGS_ns_level != -1)
-    CHECK_EQ(kNoErr, ap->noise_suppression()->set_level(
-        static_cast<NoiseSuppression::Level>(FLAGS_ns_level)));
+    RTC_CHECK_EQ(kNoErr,
+                 ap->noise_suppression()->set_level(
+                     static_cast<NoiseSuppression::Level>(FLAGS_ns_level)));
 
   printf("Input file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
          FLAGS_i.c_str(), in_file.num_channels(), in_file.sample_rate());
@@ -196,12 +199,12 @@
     if (FLAGS_perf) {
       processing_start_time = TickTime::Now();
     }
-    CHECK_EQ(kNoErr, ap->ProcessStream(in_buf.channels(), input_config,
-                                       output_config, out_buf.channels()));
+    RTC_CHECK_EQ(kNoErr, ap->ProcessStream(in_buf.channels(), input_config,
+                                           output_config, out_buf.channels()));
     if (process_reverse) {
-      CHECK_EQ(kNoErr, ap->ProcessReverseStream(
-                           in_rev_buf->channels(), reverse_input_config,
-                           reverse_output_config, out_rev_buf->channels()));
+      RTC_CHECK_EQ(kNoErr, ap->ProcessReverseStream(
+                               in_rev_buf->channels(), reverse_input_config,
+                               reverse_output_config, out_rev_buf->channels()));
     }
     if (FLAGS_perf) {
       accumulated_time += TickTime::Now() - processing_start_time;
diff --git a/webrtc/modules/audio_processing/test/test_utils.cc b/webrtc/modules/audio_processing/test/test_utils.cc
index fe33ec0..1b9ac3c 100644
--- a/webrtc/modules/audio_processing/test/test_utils.cc
+++ b/webrtc/modules/audio_processing/test/test_utils.cc
@@ -100,8 +100,8 @@
 std::vector<Point> ParseArrayGeometry(const std::string& mic_positions,
                                       size_t num_mics) {
   const std::vector<float> values = ParseList<float>(mic_positions);
-  CHECK_EQ(values.size(), 3 * num_mics) <<
-      "Could not parse mic_positions or incorrect number of points.";
+  RTC_CHECK_EQ(values.size(), 3 * num_mics)
+      << "Could not parse mic_positions or incorrect number of points.";
 
   std::vector<Point> result;
   result.reserve(num_mics);
diff --git a/webrtc/modules/audio_processing/three_band_filter_bank.cc b/webrtc/modules/audio_processing/three_band_filter_bank.cc
index e81e519..91e58df 100644
--- a/webrtc/modules/audio_processing/three_band_filter_bank.cc
+++ b/webrtc/modules/audio_processing/three_band_filter_bank.cc
@@ -138,7 +138,7 @@
 void ThreeBandFilterBank::Analysis(const float* in,
                                    size_t length,
                                    float* const* out) {
-  CHECK_EQ(in_buffer_.size(), rtc::CheckedDivExact(length, kNumBands));
+  RTC_CHECK_EQ(in_buffer_.size(), rtc::CheckedDivExact(length, kNumBands));
   for (size_t i = 0; i < kNumBands; ++i) {
     memset(out[i], 0, in_buffer_.size() * sizeof(*out[i]));
   }
@@ -163,7 +163,7 @@
 void ThreeBandFilterBank::Synthesis(const float* const* in,
                                     size_t split_length,
                                     float* out) {
-  CHECK_EQ(in_buffer_.size(), split_length);
+  RTC_CHECK_EQ(in_buffer_.size(), split_length);
   memset(out, 0, kNumBands * in_buffer_.size() * sizeof(*out));
   for (size_t i = 0; i < kNumBands; ++i) {
     for (size_t j = 0; j < kSparsity; ++j) {
diff --git a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
index c5c8498..ef56a35 100644
--- a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
+++ b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
@@ -37,23 +37,23 @@
 void VoiceActivityDetector::ProcessChunk(const int16_t* audio,
                                          size_t length,
                                          int sample_rate_hz) {
-  DCHECK_EQ(static_cast<int>(length), sample_rate_hz / 100);
-  DCHECK_LE(length, kMaxLength);
+  RTC_DCHECK_EQ(static_cast<int>(length), sample_rate_hz / 100);
+  RTC_DCHECK_LE(length, kMaxLength);
   // Resample to the required rate.
   const int16_t* resampled_ptr = audio;
   if (sample_rate_hz != kSampleRateHz) {
-    CHECK_EQ(
+    RTC_CHECK_EQ(
         resampler_.ResetIfNeeded(sample_rate_hz, kSampleRateHz, kNumChannels),
         0);
     resampler_.Push(audio, length, resampled_, kLength10Ms, length);
     resampled_ptr = resampled_;
   }
-  DCHECK_EQ(length, kLength10Ms);
+  RTC_DCHECK_EQ(length, kLength10Ms);
 
   // Each chunk needs to be passed into |standalone_vad_|, because internally it
   // buffers the audio and processes it all at once when GetActivity() is
   // called.
-  CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0);
+  RTC_CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0);
 
   audio_processing_.ExtractFeatures(resampled_ptr, length, &features_);
 
@@ -70,13 +70,13 @@
     } else {
       std::fill(chunkwise_voice_probabilities_.begin(),
                 chunkwise_voice_probabilities_.end(), kNeutralProbability);
-      CHECK_GE(
+      RTC_CHECK_GE(
           standalone_vad_->GetActivity(&chunkwise_voice_probabilities_[0],
                                        chunkwise_voice_probabilities_.size()),
           0);
-      CHECK_GE(pitch_based_vad_.VoicingProbability(
-                   features_, &chunkwise_voice_probabilities_[0]),
-               0);
+      RTC_CHECK_GE(pitch_based_vad_.VoicingProbability(
+                       features_, &chunkwise_voice_probabilities_[0]),
+                   0);
     }
     last_voice_probability_ = chunkwise_voice_probabilities_.back();
   }
diff --git a/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc b/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
index 10deb28..8505e7f 100644
--- a/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
+++ b/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
@@ -88,7 +88,7 @@
 SendSideBandwidthEstimation::~SendSideBandwidthEstimation() {}
 
 void SendSideBandwidthEstimation::SetSendBitrate(int bitrate) {
-  DCHECK_GT(bitrate, 0);
+  RTC_DCHECK_GT(bitrate, 0);
   bitrate_ = bitrate;
 
   // Clear last sent bitrate history so the new value can be used directly
@@ -98,7 +98,7 @@
 
 void SendSideBandwidthEstimation::SetMinMaxBitrate(int min_bitrate,
                                                    int max_bitrate) {
-  DCHECK_GE(min_bitrate, 0);
+  RTC_DCHECK_GE(min_bitrate, 0);
   min_bitrate_configured_ = std::max(min_bitrate, kDefaultMinBitrateBps);
   if (max_bitrate > 0) {
     max_bitrate_configured_ =
diff --git a/webrtc/modules/desktop_capture/screen_capturer_x11.cc b/webrtc/modules/desktop_capture/screen_capturer_x11.cc
index 714583b..7565576 100644
--- a/webrtc/modules/desktop_capture/screen_capturer_x11.cc
+++ b/webrtc/modules/desktop_capture/screen_capturer_x11.cc
@@ -30,9 +30,12 @@
 
 // TODO(sergeyu): Move this to a header where it can be shared.
 #if defined(NDEBUG)
-#define DCHECK(condition) (void)(condition)
+#define RTC_DCHECK(condition) (void)(condition)
 #else  // NDEBUG
-#define DCHECK(condition) if (!(condition)) {abort();}
+#define RTC_DCHECK(condition) \
+  if (!(condition)) {         \
+    abort();                  \
+  }
 #endif
 
 namespace webrtc {
@@ -233,8 +236,8 @@
 }
 
 void ScreenCapturerLinux::Start(Callback* callback) {
-  DCHECK(!callback_);
-  DCHECK(callback);
+  RTC_DCHECK(!callback_);
+  RTC_DCHECK(callback);
 
   callback_ = callback;
 }
@@ -285,7 +288,7 @@
 }
 
 bool ScreenCapturerLinux::GetScreenList(ScreenList* screens) {
-  DCHECK(screens->size() == 0);
+  RTC_DCHECK(screens->size() == 0);
   // TODO(jiayl): implement screen enumeration.
   Screen default_screen;
   default_screen.id = 0;
@@ -304,7 +307,7 @@
         reinterpret_cast<const XDamageNotifyEvent*>(&event);
     if (damage_event->damage != damage_handle_)
       return false;
-    DCHECK(damage_event->level == XDamageReportNonEmpty);
+    RTC_DCHECK(damage_event->level == XDamageReportNonEmpty);
     return true;
   } else if (event.type == ConfigureNotify) {
     ScreenConfigurationChanged();
@@ -367,8 +370,8 @@
     if (queue_.previous_frame()) {
       // Full-screen polling, so calculate the invalid rects here, based on the
       // changed pixels between current and previous buffers.
-      DCHECK(differ_.get() != NULL);
-      DCHECK(queue_.previous_frame()->data());
+      RTC_DCHECK(differ_.get() != NULL);
+      RTC_DCHECK(queue_.previous_frame()->data());
       differ_->CalcDirtyRegion(queue_.previous_frame()->data(),
                                frame->data(), updated_region);
     } else {
@@ -403,11 +406,11 @@
   // TODO(hclam): We can reduce the amount of copying here by subtracting
   // |capturer_helper_|s region from |last_invalid_region_|.
   // http://crbug.com/92354
-  DCHECK(queue_.previous_frame());
+  RTC_DCHECK(queue_.previous_frame());
 
   DesktopFrame* current = queue_.current_frame();
   DesktopFrame* last = queue_.previous_frame();
-  DCHECK(current != last);
+  RTC_DCHECK(current != last);
   for (DesktopRegion::Iterator it(last_invalid_region_);
        !it.IsAtEnd(); it.Advance()) {
     current->CopyPixelsFrom(*last, it.rect().top_left(), it.rect());
diff --git a/webrtc/modules/pacing/packet_router.cc b/webrtc/modules/pacing/packet_router.cc
index ac11903..563773b 100644
--- a/webrtc/modules/pacing/packet_router.cc
+++ b/webrtc/modules/pacing/packet_router.cc
@@ -22,20 +22,20 @@
 }
 
 PacketRouter::~PacketRouter() {
-  DCHECK(rtp_modules_.empty());
+  RTC_DCHECK(rtp_modules_.empty());
 }
 
 void PacketRouter::AddRtpModule(RtpRtcp* rtp_module) {
   rtc::CritScope cs(&modules_lock_);
-  DCHECK(std::find(rtp_modules_.begin(), rtp_modules_.end(), rtp_module) ==
-         rtp_modules_.end());
+  RTC_DCHECK(std::find(rtp_modules_.begin(), rtp_modules_.end(), rtp_module) ==
+             rtp_modules_.end());
   rtp_modules_.push_back(rtp_module);
 }
 
 void PacketRouter::RemoveRtpModule(RtpRtcp* rtp_module) {
   rtc::CritScope cs(&modules_lock_);
   auto it = std::find(rtp_modules_.begin(), rtp_modules_.end(), rtp_module);
-  DCHECK(it != rtp_modules_.end());
+  RTC_DCHECK(it != rtp_modules_.end());
   rtp_modules_.erase(it);
 }
 
diff --git a/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc b/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
index 9bac153..6771c45 100644
--- a/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
+++ b/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
@@ -104,7 +104,7 @@
   // second.
   if (!bitrate_is_initialized_) {
     const int64_t kInitializationTimeMs = 5000;
-    DCHECK_LE(kBitrateWindowMs, kInitializationTimeMs);
+    RTC_DCHECK_LE(kBitrateWindowMs, kInitializationTimeMs);
     if (time_first_incoming_estimate_ < 0) {
       if (input->_incomingBitRate > 0) {
         time_first_incoming_estimate_ = now_ms;
diff --git a/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc b/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
index b21933a..62bb2e1 100644
--- a/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
+++ b/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
@@ -143,7 +143,7 @@
 }
 
 void OveruseDetector::InitializeExperiment() {
-  DCHECK(in_experiment_);
+  RTC_DCHECK(in_experiment_);
   double k_up = 0.0;
   double k_down = 0.0;
   overusing_time_threshold_ = kOverUsingTimeThreshold;
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
index bfbe36a..a7086f3 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
@@ -46,12 +46,12 @@
         num_above_min_delta(0) {}
 
   int GetSendBitrateBps() const {
-    CHECK_GT(send_mean_ms, 0.0f);
+    RTC_CHECK_GT(send_mean_ms, 0.0f);
     return mean_size * 8 * 1000 / send_mean_ms;
   }
 
   int GetRecvBitrateBps() const {
-    CHECK_GT(recv_mean_ms, 0.0f);
+    RTC_CHECK_GT(recv_mean_ms, 0.0f);
     return mean_size * 8 * 1000 / recv_mean_ms;
   }
 
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
index 3ded0df..e91f1c0 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
@@ -45,7 +45,7 @@
                                           size_t payload_size,
                                           const RTPHeader& header,
                                           bool was_paced) {
-  DCHECK(header.extension.hasTransportSequenceNumber);
+  RTC_DCHECK(header.extension.hasTransportSequenceNumber);
   rtc::CritScope cs(&lock_);
   media_ssrc_ = header.ssrc;
   OnPacketArrival(header.extension.transportSequenceNumber, arrival_time_ms);
@@ -87,7 +87,7 @@
   while (more_to_build) {
     rtcp::TransportFeedback feedback_packet;
     if (BuildFeedbackPacket(&feedback_packet)) {
-      DCHECK(packet_router_ != nullptr);
+      RTC_DCHECK(packet_router_ != nullptr);
       packet_router_->SendFeedback(&feedback_packet);
     } else {
       more_to_build = false;
@@ -115,7 +115,7 @@
     window_start_seq_ = seq;
   }
 
-  DCHECK(packet_arrival_times_.end() == packet_arrival_times_.find(seq));
+  RTC_DCHECK(packet_arrival_times_.end() == packet_arrival_times_.find(seq));
   packet_arrival_times_[seq] = arrival_time;
 }
 
@@ -129,7 +129,7 @@
   // feedback packet. Some older may still be in the map, in case a reordering
   // happens and we need to retransmit them.
   auto it = packet_arrival_times_.find(window_start_seq_);
-  DCHECK(it != packet_arrival_times_.end());
+  RTC_DCHECK(it != packet_arrival_times_.end());
 
   // TODO(sprang): Measure receive times in microseconds and remove the
   // conversions below.
@@ -142,7 +142,7 @@
             static_cast<uint16_t>(it->first & 0xFFFF), it->second * 1000)) {
       // If we can't even add the first seq to the feedback packet, we won't be
       // able to build it at all.
-      CHECK_NE(window_start_seq_, it->first);
+      RTC_CHECK_NE(window_start_seq_, it->first);
 
       // Could not add timestamp, feedback packet might be full. Return and
       // try again with a fresh packet.
diff --git a/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc b/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
index cde93a1..21c2f36 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
@@ -402,7 +402,7 @@
 
 void TcpSender::UpdateCongestionControl(const FeedbackPacket* fb) {
   const TcpFeedback* tcp_fb = static_cast<const TcpFeedback*>(fb);
-  DCHECK(!tcp_fb->acked_packets().empty());
+  RTC_DCHECK(!tcp_fb->acked_packets().empty());
   ack_received_ = true;
 
   uint16_t expected = tcp_fb->acked_packets().back() - last_acked_seq_num_;
diff --git a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
index c6e34f2..4c01098 100644
--- a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
+++ b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
@@ -88,7 +88,7 @@
     int64_t offset_us = 0;
     for (auto symbol : feedback.GetStatusVector()) {
       if (symbol != rtcp::TransportFeedback::StatusSymbol::kNotReceived) {
-        DCHECK(delta_it != delta_vec.end());
+        RTC_DCHECK(delta_it != delta_vec.end());
         offset_us += *(delta_it++);
         int64_t timestamp_ms = current_offset_ms_ + (offset_us / 1000);
         PacketInfo info = {timestamp_ms, 0, sequence_number, 0, false};
@@ -100,14 +100,14 @@
       }
       ++sequence_number;
     }
-    DCHECK(delta_it == delta_vec.end());
+    RTC_DCHECK(delta_it == delta_vec.end());
     if (failed_lookups > 0) {
       LOG(LS_WARNING) << "Failed to lookup send time for " << failed_lookups
                       << " packet" << (failed_lookups > 1 ? "s" : "")
                       << ". Send time history too small?";
     }
   }
-  DCHECK(bitrate_estimator_.get() != nullptr);
+  RTC_DCHECK(bitrate_estimator_.get() != nullptr);
   bitrate_estimator_->IncomingPacketFeedbackVector(packet_feedback_vector);
 }
 
@@ -119,7 +119,7 @@
 
 void TransportFeedbackAdapter::OnRttUpdate(int64_t avg_rtt_ms,
                                            int64_t max_rtt_ms) {
-  DCHECK(bitrate_estimator_.get() != nullptr);
+  RTC_DCHECK(bitrate_estimator_.get() != nullptr);
   bitrate_estimator_->OnRttUpdate(avg_rtt_ms, max_rtt_ms);
 }
 
diff --git a/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc b/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
index 4ab3864..1def671 100644
--- a/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
+++ b/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
@@ -69,7 +69,7 @@
   *out_multiple_loss_event_count = multiple_loss_historic_event_count_;
   *out_multiple_loss_packet_count = multiple_loss_historic_packet_count_;
   if (lost_packets_buffer_.empty()) {
-    DCHECK(lost_packets_wrapped_buffer_.empty());
+    RTC_DCHECK(lost_packets_wrapped_buffer_.empty());
     return;
   }
   uint16_t last_num = 0;
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
index e5ea37e..d25a754 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
@@ -670,7 +670,7 @@
         : called_(false), packet_(packet) {}
     virtual ~PacketVerifier() {}
     void OnPacketReady(uint8_t* data, size_t length) override {
-      CHECK(!called_) << "Fragmentation not supported.";
+      RTC_CHECK(!called_) << "Fragmentation not supported.";
       called_ = true;
       packet_->SetLength(length);
     }
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc
index 9cd5ac3..fba4547 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc
@@ -134,13 +134,13 @@
     buffer[0] = 0x80u;
     for (int i = 0; i < kSymbolsInFirstByte; ++i) {
       uint8_t encoded_symbol = EncodeSymbol(symbols_[i]);
-      DCHECK_LE(encoded_symbol, 1u);
+      RTC_DCHECK_LE(encoded_symbol, 1u);
       buffer[0] |= encoded_symbol << (kSymbolsInFirstByte - (i + 1));
     }
     buffer[1] = 0x00u;
     for (int i = 0; i < kSymbolsInSecondByte; ++i) {
       uint8_t encoded_symbol = EncodeSymbol(symbols_[i + kSymbolsInFirstByte]);
-      DCHECK_LE(encoded_symbol, 1u);
+      RTC_DCHECK_LE(encoded_symbol, 1u);
       buffer[1] |= encoded_symbol << (kSymbolsInSecondByte - (i + 1));
     }
   }
@@ -248,7 +248,7 @@
  public:
   RunLengthChunk(TransportFeedback::StatusSymbol symbol, size_t size)
       : symbol_(symbol), size_(size) {
-    DCHECK_LE(size, 0x1FFFu);
+    RTC_DCHECK_LE(size, 0x1FFFu);
   }
 
   virtual ~RunLengthChunk() {}
@@ -267,7 +267,7 @@
   }
 
   static RunLengthChunk* ParseFrom(const uint8_t* buffer) {
-    DCHECK_EQ(0, buffer[0] & 0x80);
+    RTC_DCHECK_EQ(0, buffer[0] & 0x80);
     TransportFeedback::StatusSymbol symbol =
         DecodeSymbol((buffer[0] >> 5) & 0x03);
     uint16_t count = (static_cast<uint16_t>(buffer[0] & 0x1F) << 8) | buffer[1];
@@ -314,8 +314,8 @@
 }
 void TransportFeedback::WithBase(uint16_t base_sequence,
                                  int64_t ref_timestamp_us) {
-  DCHECK_EQ(-1, base_seq_);
-  DCHECK_NE(-1, ref_timestamp_us);
+  RTC_DCHECK_EQ(-1, base_seq_);
+  RTC_DCHECK_NE(-1, ref_timestamp_us);
   base_seq_ = base_sequence;
   last_seq_ = base_sequence;
   base_time_ = ref_timestamp_us / kBaseScaleFactor;
@@ -328,7 +328,7 @@
 
 bool TransportFeedback::WithReceivedPacket(uint16_t sequence_number,
                                            int64_t timestamp) {
-  DCHECK_NE(-1, base_seq_);
+  RTC_DCHECK_NE(-1, base_seq_);
   int64_t seq = Unwrap(sequence_number);
   if (seq != base_seq_ && seq <= last_seq_)
     return false;
@@ -520,7 +520,7 @@
 }
 
 void TransportFeedback::EmitRunLengthChunk() {
-  DCHECK_GE(first_symbol_cardinality_, symbol_vec_.size());
+  RTC_DCHECK_GE(first_symbol_cardinality_, symbol_vec_.size());
   status_chunks_.push_back(
       new RunLengthChunk(symbol_vec_.front(), first_symbol_cardinality_));
   symbol_vec_.clear();
@@ -585,12 +585,12 @@
   ByteWriter<uint32_t>::WriteBigEndian(&packet[*position], media_source_ssrc_);
   *position += 4;
 
-  DCHECK_LE(base_seq_, 0xFFFF);
+  RTC_DCHECK_LE(base_seq_, 0xFFFF);
   ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], base_seq_);
   *position += 2;
 
   int64_t status_count = last_seq_ - base_seq_ + 1;
-  DCHECK_LE(status_count, 0xFFFF);
+  RTC_DCHECK_LE(status_count, 0xFFFF);
   ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], status_count);
   *position += 2;
 
@@ -714,7 +714,7 @@
 
   std::vector<StatusSymbol> symbols = packet->GetStatusVector();
 
-  DCHECK_EQ(num_packets, symbols.size());
+  RTC_DCHECK_EQ(num_packets, symbols.size());
 
   for (StatusSymbol symbol : symbols) {
     switch (symbol) {
@@ -740,8 +740,8 @@
     }
   }
 
-  DCHECK_GE(index, end_index - 3);
-  DCHECK_LE(index, end_index);
+  RTC_DCHECK_GE(index, end_index - 3);
+  RTC_DCHECK_LE(index, end_index);
 
   return packet;
 }
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc b/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
index 732772c..f9dc96e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -1339,7 +1339,7 @@
     // report can generate several RTCP packets, based on number relayed/mixed
     // a send report block should go out to all receivers.
     if (_cbRtcpIntraFrameObserver) {
-      DCHECK(!receiver_only_);
+      RTC_DCHECK(!receiver_only_);
       if ((rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpPli) ||
           (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpFir)) {
         if (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpPli) {
@@ -1361,7 +1361,7 @@
       }
     }
     if (_cbRtcpBandwidthObserver) {
-      DCHECK(!receiver_only_);
+      RTC_DCHECK(!receiver_only_);
       if (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpRemb) {
         LOG(LS_VERBOSE) << "Incoming REMB: "
                         << rtcpPacketInformation.receiverEstimatedMaxBitrate;
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc b/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
index 6040805..ea7931f 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -94,7 +94,7 @@
         position(0) {}
 
   uint8_t* AllocateData(uint32_t bytes) {
-    DCHECK_LE(position + bytes, buffer_size);
+    RTC_DCHECK_LE(position + bytes, buffer_size);
     uint8_t* ptr = &buffer[position];
     position += bytes;
     return ptr;
@@ -319,7 +319,7 @@
   if (!c_name)
     return -1;
 
-  DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
+  RTC_DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
   CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
   cname_ = c_name;
   return 0;
@@ -327,7 +327,7 @@
 
 int32_t RTCPSender::AddMixedCNAME(uint32_t SSRC, const char* c_name) {
   assert(c_name);
-  DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
+  RTC_DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
   CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
   if (csrc_cnames_.size() >= kRtpCsrcSize)
     return -1;
@@ -516,7 +516,7 @@
 
 RTCPSender::BuildResult RTCPSender::BuildSDES(RtcpContext* ctx) {
   size_t length_cname = cname_.length();
-  CHECK_LT(length_cname, static_cast<size_t>(RTCP_CNAME_SIZE));
+  RTC_CHECK_LT(length_cname, static_cast<size_t>(RTCP_CNAME_SIZE));
 
   rtcp::Sdes sdes;
   sdes.WithCName(ssrc_, cname_);
@@ -982,7 +982,7 @@
   if (IsFlagPresent(kRtcpSr) || IsFlagPresent(kRtcpRr)) {
     // Report type already explicitly set, don't automatically populate.
     generate_report = true;
-    DCHECK(ConsumeFlag(kRtcpReport) == false);
+    RTC_DCHECK(ConsumeFlag(kRtcpReport) == false);
   } else {
     generate_report =
         (ConsumeFlag(kRtcpReport) && method_ == kRtcpNonCompound) ||
@@ -1041,7 +1041,7 @@
   auto it = report_flags_.begin();
   while (it != report_flags_.end()) {
     auto builder = builders_.find(it->type);
-    DCHECK(builder != builders_.end());
+    RTC_DCHECK(builder != builders_.end());
     if (it->is_volatile) {
       report_flags_.erase(it++);
     } else {
@@ -1070,7 +1070,7 @@
         remote_ssrc_, packet_type_counter_);
   }
 
-  DCHECK(AllVolatileFlagsConsumed());
+  RTC_DCHECK(AllVolatileFlagsConsumed());
 
   return context.position;
 }
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc b/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
index 47a6331..caffb63 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
@@ -465,7 +465,7 @@
 bool RTCPUtility::RtcpParseCommonHeader(const uint8_t* packet,
                                         size_t size_bytes,
                                         RtcpCommonHeader* parsed_header) {
-  DCHECK(parsed_header != nullptr);
+  RTC_DCHECK(parsed_header != nullptr);
   if (size_bytes < RtcpCommonHeader::kHeaderSizeBytes) {
     LOG(LS_WARNING) << "Too little data (" << size_bytes << " byte"
                     << (size_bytes != 1 ? "s" : "")
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
index 2f5e2e9..ed30fc1 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
@@ -106,8 +106,8 @@
   if (!hdr.inter_pic_predicted || !hdr.flexible_mode)
     return 0;
 
-  DCHECK_GT(hdr.num_ref_pics, 0U);
-  DCHECK_LE(hdr.num_ref_pics, kMaxVp9RefPics);
+  RTC_DCHECK_GT(hdr.num_ref_pics, 0U);
+  RTC_DCHECK_LE(hdr.num_ref_pics, kMaxVp9RefPics);
   size_t length = 0;
   for (size_t i = 0; i < hdr.num_ref_pics; ++i) {
     length += hdr.pid_diff[i] > 0x3F ? 2 : 1;   // P_DIFF > 6 bits => extended
@@ -137,10 +137,10 @@
   if (!hdr.ss_data_available)
     return 0;
 
-  DCHECK_GT(hdr.num_spatial_layers, 0U);
-  DCHECK_LE(hdr.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
-  DCHECK_GT(hdr.gof.num_frames_in_gof, 0U);
-  DCHECK_LE(hdr.gof.num_frames_in_gof, kMaxVp9FramesInGof);
+  RTC_DCHECK_GT(hdr.num_spatial_layers, 0U);
+  RTC_DCHECK_LE(hdr.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
+  RTC_DCHECK_GT(hdr.gof.num_frames_in_gof, 0U);
+  RTC_DCHECK_LE(hdr.gof.num_frames_in_gof, kMaxVp9FramesInGof);
   size_t length = 1;                           // V
   if (hdr.spatial_layer_resolution_present) {
     length += 4 * hdr.num_spatial_layers;      // Y
@@ -148,7 +148,7 @@
   // N_G
   length += hdr.gof.num_frames_in_gof;  // T, U, R
   for (size_t i = 0; i < hdr.gof.num_frames_in_gof; ++i) {
-    DCHECK_LE(hdr.gof.num_ref_pics[i], kMaxVp9RefPics);
+    RTC_DCHECK_LE(hdr.gof.num_ref_pics[i], kMaxVp9RefPics);
     length += hdr.gof.num_ref_pics[i];  // R times
   }
   return length;
@@ -286,10 +286,10 @@
 //      +-+-+-+-+-+-+-+-+              -|           -|
 //
 bool WriteSsData(const RTPVideoHeaderVP9& vp9, rtc::BitBufferWriter* writer) {
-  DCHECK_GT(vp9.num_spatial_layers, 0U);
-  DCHECK_LE(vp9.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
-  DCHECK_GT(vp9.gof.num_frames_in_gof, 0U);
-  DCHECK_LE(vp9.gof.num_frames_in_gof, kMaxVp9FramesInGof);
+  RTC_DCHECK_GT(vp9.num_spatial_layers, 0U);
+  RTC_DCHECK_LE(vp9.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
+  RTC_DCHECK_GT(vp9.gof.num_frames_in_gof, 0U);
+  RTC_DCHECK_LE(vp9.gof.num_frames_in_gof, kMaxVp9FramesInGof);
 
   RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.num_spatial_layers - 1, 3));
   RETURN_FALSE_ON_ERROR(
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc b/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
index ff64e49..7537d8e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
@@ -61,7 +61,7 @@
                rtp_header->header.timestamp);
   rtp_header->type.Video.codec = specific_payload.Video.videoCodecType;
 
-  DCHECK_GE(payload_length, rtp_header->header.paddingLength);
+  RTC_DCHECK_GE(payload_length, rtp_header->header.paddingLength);
   const size_t payload_data_length =
       payload_length - rtp_header->header.paddingLength;
 
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index 5d15195..451360a 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -475,7 +475,7 @@
 }
 
 int32_t ModuleRtpRtcpImpl::SetMaxTransferUnit(const uint16_t mtu) {
-  DCHECK_LE(mtu, IP_PACKET_SIZE) << "Invalid mtu: " << mtu;
+  RTC_DCHECK_LE(mtu, IP_PACKET_SIZE) << "Invalid mtu: " << mtu;
   return rtp_sender_.SetMaxPayloadLength(mtu - packet_overhead_,
                                          packet_overhead_);
 }
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
index 0b050b7..8e1f77a 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -359,7 +359,7 @@
 int32_t RTPSender::SetMaxPayloadLength(size_t max_payload_length,
                                        uint16_t packet_over_head) {
   // Sanity check.
-  DCHECK(max_payload_length >= 100 && max_payload_length <= IP_PACKET_SIZE)
+  RTC_DCHECK(max_payload_length >= 100 && max_payload_length <= IP_PACKET_SIZE)
       << "Invalid max payload length: " << max_payload_length;
   CriticalSectionScoped cs(send_critsect_.get());
   max_payload_length_ = max_payload_length;
@@ -411,8 +411,8 @@
 void RTPSender::SetRtxPayloadType(int payload_type,
                                   int associated_payload_type) {
   CriticalSectionScoped cs(send_critsect_.get());
-  DCHECK_LE(payload_type, 127);
-  DCHECK_LE(associated_payload_type, 127);
+  RTC_DCHECK_LE(payload_type, 127);
+  RTC_DCHECK_LE(associated_payload_type, 127);
   if (payload_type < 0) {
     LOG(LS_ERROR) << "Invalid RTX payload type: " << payload_type;
     return;
@@ -1792,14 +1792,14 @@
 void RTPSender::SetGenericFECStatus(bool enable,
                                     uint8_t payload_type_red,
                                     uint8_t payload_type_fec) {
-  DCHECK(!audio_configured_);
+  RTC_DCHECK(!audio_configured_);
   video_->SetGenericFECStatus(enable, payload_type_red, payload_type_fec);
 }
 
 void RTPSender::GenericFECStatus(bool* enable,
                                     uint8_t* payload_type_red,
                                     uint8_t* payload_type_fec) const {
-  DCHECK(!audio_configured_);
+  RTC_DCHECK(!audio_configured_);
   video_->GenericFECStatus(*enable, *payload_type_red, *payload_type_fec);
 }
 
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
index 4c740e8..f44cda1 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -142,7 +142,7 @@
       fec_packets = producer_fec_.GetFecPackets(
           _payloadTypeRED, _payloadTypeFEC, next_fec_sequence_number,
           rtp_header_length);
-      DCHECK_EQ(num_fec_packets, fec_packets.size());
+      RTC_DCHECK_EQ(num_fec_packets, fec_packets.size());
       if (_retransmissionSettings & kRetransmitFECPackets)
         fec_storage = kAllowRetransmission;
     }
@@ -236,8 +236,8 @@
 void RTPSenderVideo::SetFecParameters(const FecProtectionParams* delta_params,
                                       const FecProtectionParams* key_params) {
   CriticalSectionScoped cs(crit_.get());
-  DCHECK(delta_params);
-  DCHECK(key_params);
+  RTC_DCHECK(delta_params);
+  RTC_DCHECK(key_params);
   delta_fec_params_ = *delta_params;
   key_fec_params_ = *key_params;
 }
@@ -313,7 +313,7 @@
     // value sent.
     // Here we are adding it to every packet of every frame at this point.
     if (!rtpHdr) {
-      DCHECK(!_rtpSender.IsRtpHeaderExtensionRegistered(
+      RTC_DCHECK(!_rtpSender.IsRtpHeaderExtensionRegistered(
           kRtpExtensionVideoRotation));
     } else if (cvo_mode == RTPSenderInterface::kCVOActivated) {
       // Checking whether CVO header extension is registered will require taking
diff --git a/webrtc/modules/utility/interface/helpers_android.h b/webrtc/modules/utility/interface/helpers_android.h
index 19ff098..5c73fe4 100644
--- a/webrtc/modules/utility/interface/helpers_android.h
+++ b/webrtc/modules/utility/interface/helpers_android.h
@@ -16,8 +16,8 @@
 
 // Abort the process if |jni| has a Java exception pending.
 // TODO(henrika): merge with CHECK_JNI_EXCEPTION() in jni_helpers.h.
-#define CHECK_EXCEPTION(jni)    \
-  CHECK(!jni->ExceptionCheck()) \
+#define CHECK_EXCEPTION(jni)        \
+  RTC_CHECK(!jni->ExceptionCheck()) \
       << (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
 
 namespace webrtc {
@@ -31,8 +31,8 @@
 jlong PointerTojlong(void* ptr);
 
 // JNIEnv-helper methods that wraps the API which uses the JNI interface
-// pointer (JNIEnv*). It allows us to CHECK success and that no Java exception
-// is thrown while calling the method.
+// pointer (JNIEnv*). It allows us to RTC_CHECK success and that no Java
+// exception is thrown while calling the method.
 jmethodID GetMethodID(
     JNIEnv* jni, jclass c, const char* name, const char* signature);
 
diff --git a/webrtc/modules/utility/source/helpers_android.cc b/webrtc/modules/utility/source/helpers_android.cc
index 175dd23..25652f2 100644
--- a/webrtc/modules/utility/source/helpers_android.cc
+++ b/webrtc/modules/utility/source/helpers_android.cc
@@ -25,8 +25,8 @@
 JNIEnv* GetEnv(JavaVM* jvm) {
   void* env = NULL;
   jint status = jvm->GetEnv(&env, JNI_VERSION_1_6);
-  CHECK(((env != NULL) && (status == JNI_OK)) ||
-        ((env == NULL) && (status == JNI_EDETACHED)))
+  RTC_CHECK(((env != NULL) && (status == JNI_OK)) ||
+            ((env == NULL) && (status == JNI_EDETACHED)))
       << "Unexpected GetEnv return: " << status << ":" << env;
   return reinterpret_cast<JNIEnv*>(env);
 }
@@ -41,7 +41,7 @@
   // conversion from pointer to integral type.  intptr_t to jlong is a standard
   // widening by the static_assert above.
   jlong ret = reinterpret_cast<intptr_t>(ptr);
-  DCHECK(reinterpret_cast<void*>(ret) == ptr);
+  RTC_DCHECK(reinterpret_cast<void*>(ret) == ptr);
   return ret;
 }
 
@@ -50,7 +50,7 @@
   jmethodID m = jni->GetMethodID(c, name, signature);
   CHECK_EXCEPTION(jni) << "Error during GetMethodID: " << name << ", "
                        << signature;
-  CHECK(m) << name << ", " << signature;
+  RTC_CHECK(m) << name << ", " << signature;
   return m;
 }
 
@@ -59,21 +59,21 @@
   jmethodID m = jni->GetStaticMethodID(c, name, signature);
   CHECK_EXCEPTION(jni) << "Error during GetStaticMethodID: " << name << ", "
                        << signature;
-  CHECK(m) << name << ", " << signature;
+  RTC_CHECK(m) << name << ", " << signature;
   return m;
 }
 
 jclass FindClass(JNIEnv* jni, const char* name) {
   jclass c = jni->FindClass(name);
   CHECK_EXCEPTION(jni) << "Error during FindClass: " << name;
-  CHECK(c) << name;
+  RTC_CHECK(c) << name;
   return c;
 }
 
 jobject NewGlobalRef(JNIEnv* jni, jobject o) {
   jobject ret = jni->NewGlobalRef(o);
   CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
-  CHECK(ret);
+  RTC_CHECK(ret);
   return ret;
 }
 
@@ -85,8 +85,9 @@
 std::string GetThreadId() {
   char buf[21];  // Big enough to hold a kuint64max plus terminating NULL.
   int thread_id = gettid();
-  CHECK_LT(snprintf(buf, sizeof(buf), "%i", thread_id),
-      static_cast<int>(sizeof(buf))) << "Thread id is bigger than uint64??";
+  RTC_CHECK_LT(snprintf(buf, sizeof(buf), "%i", thread_id),
+               static_cast<int>(sizeof(buf)))
+      << "Thread id is bigger than uint64??";
   return std::string(buf);
 }
 
@@ -104,7 +105,7 @@
     ALOGD("Attaching thread to JVM%s", GetThreadInfo().c_str());
     jint res = jvm->AttachCurrentThread(&env_, NULL);
     attached_ = (res == JNI_OK);
-    CHECK(attached_) << "AttachCurrentThread failed: " << res;
+    RTC_CHECK(attached_) << "AttachCurrentThread failed: " << res;
   }
 }
 
@@ -112,8 +113,8 @@
   if (attached_) {
     ALOGD("Detaching thread from JVM%s", GetThreadInfo().c_str());
     jint res = jvm_->DetachCurrentThread();
-    CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
-    CHECK(!GetEnv(jvm_));
+    RTC_CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+    RTC_CHECK(!GetEnv(jvm_));
   }
 }
 
diff --git a/webrtc/modules/utility/source/jvm_android.cc b/webrtc/modules/utility/source/jvm_android.cc
index 777b8d5..648c168 100644
--- a/webrtc/modules/utility/source/jvm_android.cc
+++ b/webrtc/modules/utility/source/jvm_android.cc
@@ -41,10 +41,10 @@
   for (auto& c : loaded_classes) {
     jclass localRef = FindClass(jni, c.name);
     CHECK_EXCEPTION(jni) << "Error during FindClass: " << c.name;
-    CHECK(localRef) << c.name;
+    RTC_CHECK(localRef) << c.name;
     jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
     CHECK_EXCEPTION(jni) << "Error during NewGlobalRef: " << c.name;
-    CHECK(globalRef) << c.name;
+    RTC_CHECK(globalRef) << c.name;
     c.clazz = globalRef;
   }
 }
@@ -61,7 +61,7 @@
     if (strcmp(c.name, name) == 0)
       return c.clazz;
   }
-  CHECK(false) << "Unable to find class in lookup table";
+  RTC_CHECK(false) << "Unable to find class in lookup table";
   return 0;
 }
 
@@ -70,7 +70,7 @@
     : attached_(false) {
   ALOGD("AttachCurrentThreadIfNeeded::ctor%s", GetThreadInfo().c_str());
   JavaVM* jvm = JVM::GetInstance()->jvm();
-  CHECK(jvm);
+  RTC_CHECK(jvm);
   JNIEnv* jni = GetEnv(jvm);
   if (!jni) {
     ALOGD("Attaching thread to JVM");
@@ -82,11 +82,11 @@
 
 AttachCurrentThreadIfNeeded::~AttachCurrentThreadIfNeeded() {
   ALOGD("AttachCurrentThreadIfNeeded::dtor%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (attached_) {
     ALOGD("Detaching thread from JVM");
     jint res = JVM::GetInstance()->jvm()->DetachCurrentThread();
-    CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+    RTC_CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
   }
 }
 
@@ -178,13 +178,13 @@
 
 JNIEnvironment::~JNIEnvironment() {
   ALOGD("JNIEnvironment::dtor%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
 }
 
 rtc::scoped_ptr<NativeRegistration> JNIEnvironment::RegisterNatives(
     const char* name, const JNINativeMethod *methods, int num_methods) {
   ALOGD("JNIEnvironment::RegisterNatives(%s)", name);
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   jclass clazz = LookUpClass(name);
   jni_->RegisterNatives(clazz, methods, num_methods);
   CHECK_EXCEPTION(jni_) << "Error during RegisterNatives";
@@ -193,7 +193,7 @@
 }
 
 std::string JNIEnvironment::JavaToStdString(const jstring& j_string) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   const char* jchars = jni_->GetStringUTFChars(j_string, nullptr);
   CHECK_EXCEPTION(jni_);
   const int size = jni_->GetStringUTFLength(j_string);
@@ -207,35 +207,35 @@
 // static
 void JVM::Initialize(JavaVM* jvm, jobject context) {
   ALOGD("JVM::Initialize%s", GetThreadInfo().c_str());
-  CHECK(!g_jvm);
+  RTC_CHECK(!g_jvm);
   g_jvm = new JVM(jvm, context);
 }
 
 // static
 void JVM::Uninitialize() {
   ALOGD("JVM::Uninitialize%s", GetThreadInfo().c_str());
-  DCHECK(g_jvm);
+  RTC_DCHECK(g_jvm);
   delete g_jvm;
   g_jvm = nullptr;
 }
 
 // static
 JVM* JVM::GetInstance() {
-  DCHECK(g_jvm);
+  RTC_DCHECK(g_jvm);
   return g_jvm;
 }
 
 JVM::JVM(JavaVM* jvm, jobject context)
     : jvm_(jvm) {
   ALOGD("JVM::JVM%s", GetThreadInfo().c_str());
-  CHECK(jni()) << "AttachCurrentThread() must be called on this thread.";
+  RTC_CHECK(jni()) << "AttachCurrentThread() must be called on this thread.";
   context_ = NewGlobalRef(jni(), context);
   LoadClasses(jni());
 }
 
 JVM::~JVM() {
   ALOGD("JVM::~JVM%s", GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   FreeClassReferences(jni());
   DeleteGlobalRef(jni(), context_);
 }
@@ -257,7 +257,7 @@
 
 JavaClass JVM::GetClass(const char* name) {
   ALOGD("JVM::GetClass(%s)%s", name, GetThreadInfo().c_str());
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   return JavaClass(jni(), LookUpClass(name));
 }
 
diff --git a/webrtc/modules/utility/source/process_thread_impl.cc b/webrtc/modules/utility/source/process_thread_impl.cc
index 51b7494..df56fe3 100644
--- a/webrtc/modules/utility/source/process_thread_impl.cc
+++ b/webrtc/modules/utility/source/process_thread_impl.cc
@@ -48,9 +48,9 @@
       thread_name_(thread_name) {}
 
 ProcessThreadImpl::~ProcessThreadImpl() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!thread_.get());
-  DCHECK(!stop_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!thread_.get());
+  RTC_DCHECK(!stop_);
 
   while (!queue_.empty()) {
     delete queue_.front();
@@ -59,12 +59,12 @@
 }
 
 void ProcessThreadImpl::Start() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!thread_.get());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!thread_.get());
   if (thread_.get())
     return;
 
-  DCHECK(!stop_);
+  RTC_DCHECK(!stop_);
 
   {
     // TODO(tommi): Since DeRegisterModule is currently being called from
@@ -78,11 +78,11 @@
 
   thread_ = ThreadWrapper::CreateThread(&ProcessThreadImpl::Run, this,
                                         thread_name_);
-  CHECK(thread_->Start());
+  RTC_CHECK(thread_->Start());
 }
 
 void ProcessThreadImpl::Stop() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if(!thread_.get())
     return;
 
@@ -93,7 +93,7 @@
 
   wake_up_->Set();
 
-  CHECK(thread_->Stop());
+  RTC_CHECK(thread_->Stop());
   stop_ = false;
 
   // TODO(tommi): Since DeRegisterModule is currently being called from
@@ -130,15 +130,15 @@
 }
 
 void ProcessThreadImpl::RegisterModule(Module* module) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(module);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(module);
 
 #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
   {
     // Catch programmer error.
     rtc::CritScope lock(&lock_);
     for (const ModuleCallback& mc : modules_)
-      DCHECK(mc.module != module);
+      RTC_DCHECK(mc.module != module);
   }
 #endif
 
@@ -162,7 +162,7 @@
 void ProcessThreadImpl::DeRegisterModule(Module* module) {
   // Allowed to be called on any thread.
   // TODO(tommi): Disallow this ^^^
-  DCHECK(module);
+  RTC_DCHECK(module);
 
   {
     rtc::CritScope lock(&lock_);
diff --git a/webrtc/modules/video_capture/ensure_initialized.cc b/webrtc/modules/video_capture/ensure_initialized.cc
index 68cac04..bc606bb 100644
--- a/webrtc/modules/video_capture/ensure_initialized.cc
+++ b/webrtc/modules/video_capture/ensure_initialized.cc
@@ -22,12 +22,10 @@
 
 #include <pthread.h>
 
-// Note: this dependency is dangerous since it reaches into Chromium's
-// base. You can't include anything in this file that includes WebRTC's
-// base/checks.h, for instance, since it will clash with Chromium's
-// logging.h. Therefore, the CHECKs in this file will actually use
-// Chromium's checks rather than the WebRTC ones.
+// Note: this dependency is dangerous since it reaches into Chromium's base.
+// There's a risk of e.g. macro clashes. This file may only be used in tests.
 #include "base/android/jni_android.h"
+#include "webrtc/base/checks.h"
 #include "webrtc/modules/video_capture/video_capture_internal.h"
 
 namespace webrtc {
@@ -39,12 +37,12 @@
   JNIEnv* jni = ::base::android::AttachCurrentThread();
   jobject context = ::base::android::GetApplicationContext();
   JavaVM* jvm = NULL;
-  CHECK_EQ(0, jni->GetJavaVM(&jvm));
-  CHECK_EQ(0, webrtc::SetCaptureAndroidVM(jvm, context));
+  RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
+  RTC_CHECK_EQ(0, webrtc::SetCaptureAndroidVM(jvm, context));
 }
 
 void EnsureInitialized() {
-  CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+  RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
 }
 
 }  // namespace videocapturemodule
diff --git a/webrtc/modules/video_coding/codecs/h264/h264.cc b/webrtc/modules/video_coding/codecs/h264/h264.cc
index d4123a2..645ed2c 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264.cc
@@ -36,7 +36,7 @@
 }
 
 H264Encoder* H264Encoder::Create() {
-  DCHECK(H264Encoder::IsSupported());
+  RTC_DCHECK(H264Encoder::IsSupported());
 #if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
   return new H264VideoToolboxEncoder();
 #else
@@ -50,7 +50,7 @@
 }
 
 H264Decoder* H264Decoder::Create() {
-  DCHECK(H264Decoder::IsSupported());
+  RTC_DCHECK(H264Decoder::IsSupported());
 #if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
   return new H264VideoToolboxDecoder();
 #else
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
index c80ccbb..36646a9 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
@@ -47,9 +47,9 @@
 // instead once the pipeline supports it.
 rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer(
     CVPixelBufferRef pixel_buffer) {
-  DCHECK(pixel_buffer);
-  DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
-         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+  RTC_DCHECK(pixel_buffer);
+  RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+             kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
   size_t width = CVPixelBufferGetWidthOfPlane(pixel_buffer, 0);
   size_t height = CVPixelBufferGetHeightOfPlane(pixel_buffer, 0);
   // TODO(tkchin): Use a frame buffer pool.
@@ -125,7 +125,7 @@
     const RTPFragmentationHeader* fragmentation,
     const CodecSpecificInfo* codec_specific_info,
     int64_t render_time_ms) {
-  DCHECK(input_image._buffer);
+  RTC_DCHECK(input_image._buffer);
 
   CMSampleBufferRef sample_buffer = nullptr;
   if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
@@ -134,7 +134,7 @@
                                         &sample_buffer)) {
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
-  DCHECK(sample_buffer);
+  RTC_DCHECK(sample_buffer);
   // Check if the video format has changed, and reinitialize decoder if needed.
   CMVideoFormatDescriptionRef description =
       CMSampleBufferGetFormatDescription(sample_buffer);
@@ -160,7 +160,7 @@
 
 int H264VideoToolboxDecoder::RegisterDecodeCompleteCallback(
     DecodedImageCallback* callback) {
-  DCHECK(!callback_);
+  RTC_DCHECK(!callback_);
   callback_ = callback;
   return WEBRTC_VIDEO_CODEC_OK;
 }
@@ -238,7 +238,7 @@
 }
 
 void H264VideoToolboxDecoder::ConfigureDecompressionSession() {
-  DCHECK(decompression_session_);
+  RTC_DCHECK(decompression_session_);
 #if defined(WEBRTC_IOS)
   VTSessionSetProperty(decompression_session_,
                        kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index 3dfd6cf..fec3226 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -35,7 +35,7 @@
 
 // Copies characters from a CFStringRef into a std::string.
 std::string CFStringToString(const CFStringRef cf_string) {
-  DCHECK(cf_string);
+  RTC_DCHECK(cf_string);
   std::string std_string;
   // Get the size needed for UTF8 plus terminating character.
   size_t buffer_size =
@@ -123,13 +123,13 @@
 // TODO(tkchin): See if encoder will accept i420 frames and compare performance.
 bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
                                  CVPixelBufferRef pixel_buffer) {
-  DCHECK(pixel_buffer);
-  DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
-         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
-  DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
-         static_cast<size_t>(frame.height()));
-  DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
-         static_cast<size_t>(frame.width()));
+  RTC_DCHECK(pixel_buffer);
+  RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+             kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+  RTC_DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
+             static_cast<size_t>(frame.height()));
+  RTC_DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
+             static_cast<size_t>(frame.width()));
 
   CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
   if (cvRet != kCVReturnSuccess) {
@@ -224,8 +224,8 @@
 int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
                                         int number_of_cores,
                                         size_t max_payload_size) {
-  DCHECK(codec_settings);
-  DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
+  RTC_DCHECK(codec_settings);
+  RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
   // TODO(tkchin): We may need to enforce width/height dimension restrictions
   // to match what the encoder supports.
   width_ = codec_settings->width;
@@ -266,7 +266,7 @@
     // that the pool is empty.
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
-  DCHECK(pixel_buffer);
+  RTC_DCHECK(pixel_buffer);
   if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) {
     LOG(LS_ERROR) << "Failed to copy frame data.";
     CVBufferRelease(pixel_buffer);
@@ -397,7 +397,7 @@
 }
 
 void H264VideoToolboxEncoder::ConfigureCompressionSession() {
-  DCHECK(compression_session_);
+  RTC_DCHECK(compression_session_);
   internal::SetVTSessionProperty(compression_session_,
                                  kVTCompressionPropertyKey_RealTime, true);
   internal::SetVTSessionProperty(compression_session_,
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
index 7d595a8..43a7de0 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
@@ -29,8 +29,8 @@
     bool is_keyframe,
     rtc::Buffer* annexb_buffer,
     webrtc::RTPFragmentationHeader** out_header) {
-  DCHECK(avcc_sample_buffer);
-  DCHECK(out_header);
+  RTC_DCHECK(avcc_sample_buffer);
+  RTC_DCHECK(out_header);
   *out_header = nullptr;
 
   // Get format description from the sample buffer.
@@ -51,8 +51,8 @@
     return false;
   }
   // TODO(tkchin): handle other potential sizes.
-  DCHECK_EQ(nalu_header_size, 4);
-  DCHECK_EQ(param_set_count, 2u);
+  RTC_DCHECK_EQ(nalu_header_size, 4);
+  RTC_DCHECK_EQ(param_set_count, 2u);
 
   // Truncate any previous data in the buffer without changing its capacity.
   annexb_buffer->SetSize(0);
@@ -122,7 +122,7 @@
     // The size type here must match |nalu_header_size|, we expect 4 bytes.
     // Read the length of the next packet of data. Must convert from big endian
     // to host endian.
-    DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
+    RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
     uint32_t* uint32_data_ptr = reinterpret_cast<uint32*>(data_ptr);
     uint32_t packet_size = CFSwapInt32BigToHost(*uint32_data_ptr);
     // Update buffer.
@@ -137,12 +137,12 @@
     bytes_remaining -= bytes_written;
     data_ptr += bytes_written;
   }
-  DCHECK_EQ(bytes_remaining, (size_t)0);
+  RTC_DCHECK_EQ(bytes_remaining, (size_t)0);
 
   rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
   header.reset(new webrtc::RTPFragmentationHeader());
   header->VerifyAndAllocateFragmentationHeader(frag_offsets.size());
-  DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
+  RTC_DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
   for (size_t i = 0; i < frag_offsets.size(); ++i) {
     header->fragmentationOffset[i] = frag_offsets[i];
     header->fragmentationLength[i] = frag_lengths[i];
@@ -159,8 +159,8 @@
     size_t annexb_buffer_size,
     CMVideoFormatDescriptionRef video_format,
     CMSampleBufferRef* out_sample_buffer) {
-  DCHECK(annexb_buffer);
-  DCHECK(out_sample_buffer);
+  RTC_DCHECK(annexb_buffer);
+  RTC_DCHECK(out_sample_buffer);
   *out_sample_buffer = nullptr;
 
   // The buffer we receive via RTP has 00 00 00 01 start code artifically
@@ -193,7 +193,7 @@
       return false;
     }
   } else {
-    DCHECK(video_format);
+    RTC_DCHECK(video_format);
     description = video_format;
     // We don't need to retain, but it makes logic easier since we are creating
     // in the other block.
@@ -241,7 +241,7 @@
     CFRelease(contiguous_buffer);
     return false;
   }
-  DCHECK(block_buffer_size == reader.BytesRemaining());
+  RTC_DCHECK(block_buffer_size == reader.BytesRemaining());
 
   // Write Avcc NALUs into block buffer memory.
   AvccBufferWriter writer(reinterpret_cast<uint8_t*>(data_ptr),
@@ -272,7 +272,7 @@
 AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
                                        size_t length)
     : start_(annexb_buffer), offset_(0), next_offset_(0), length_(length) {
-  DCHECK(annexb_buffer);
+  RTC_DCHECK(annexb_buffer);
   offset_ = FindNextNaluHeader(start_, length_, 0);
   next_offset_ =
       FindNextNaluHeader(start_, length_, offset_ + sizeof(kAnnexBHeaderBytes));
@@ -280,8 +280,8 @@
 
 bool AnnexBBufferReader::ReadNalu(const uint8_t** out_nalu,
                                   size_t* out_length) {
-  DCHECK(out_nalu);
-  DCHECK(out_length);
+  RTC_DCHECK(out_nalu);
+  RTC_DCHECK(out_length);
   *out_nalu = nullptr;
   *out_length = 0;
 
@@ -304,7 +304,7 @@
 size_t AnnexBBufferReader::FindNextNaluHeader(const uint8_t* start,
                                               size_t length,
                                               size_t offset) const {
-  DCHECK(start);
+  RTC_DCHECK(start);
   if (offset + sizeof(kAnnexBHeaderBytes) > length) {
     return length;
   }
@@ -329,7 +329,7 @@
 
 AvccBufferWriter::AvccBufferWriter(uint8_t* const avcc_buffer, size_t length)
     : start_(avcc_buffer), offset_(0), length_(length) {
-  DCHECK(avcc_buffer);
+  RTC_DCHECK(avcc_buffer);
 }
 
 bool AvccBufferWriter::WriteNalu(const uint8_t* data, size_t data_size) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
index f94dd55..0fbb2a6 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -220,14 +220,14 @@
     RTC_NOTREACHED();
     return false;
   }
-  DCHECK_NE(-1, layers_[0].last_qp);
+  RTC_DCHECK_NE(-1, layers_[0].last_qp);
   if (layers_[1].last_qp == -1) {
     // First frame in TL1 should only depend on TL0 since there are no
     // previous frames in TL1.
     return true;
   }
 
-  DCHECK_NE(-1, last_sync_timestamp_);
+  RTC_DCHECK_NE(-1, last_sync_timestamp_);
   int64_t timestamp_diff = timestamp - last_sync_timestamp_;
   if (timestamp_diff > kMaxTimeBetweenSyncs) {
     // After a certain time, force a sync frame.
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 3b6df75..48ed02a 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -725,8 +725,8 @@
   // |raw_images_[0]|, the resolution of these frames must match. Note that
   // |input_image| might be scaled from |frame|. In that case, the resolution of
   // |raw_images_[0]| should have been updated in UpdateCodecFrameSize.
-  DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
-  DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
+  RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
+  RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
 
   // Image in vpx_image_t format.
   // Input image is const. VP8's raw image is not defined as const.
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
index 6e16bc1..ce600ec 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -34,7 +34,7 @@
 
 bool Vp9FrameBufferPool::InitializeVpxUsePool(
     vpx_codec_ctx* vpx_codec_context) {
-  DCHECK(vpx_codec_context);
+  RTC_DCHECK(vpx_codec_context);
   // Tell libvpx to use this pool.
   if (vpx_codec_set_frame_buffer_functions(
           // In which context to use these callback functions.
@@ -53,7 +53,7 @@
 
 rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
 Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
-  DCHECK_GT(min_size, 0u);
+  RTC_DCHECK_GT(min_size, 0u);
   rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
   {
     rtc::CritScope cs(&buffers_lock_);
@@ -101,8 +101,8 @@
 int32 Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
                                             size_t min_size,
                                             vpx_codec_frame_buffer* fb) {
-  DCHECK(user_priv);
-  DCHECK(fb);
+  RTC_DCHECK(user_priv);
+  RTC_DCHECK(fb);
   Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
 
   rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
@@ -120,8 +120,8 @@
 // static
 int32 Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
                                                 vpx_codec_frame_buffer* fb) {
-  DCHECK(user_priv);
-  DCHECK(fb);
+  RTC_DCHECK(user_priv);
+  RTC_DCHECK(fb);
   Vp9FrameBuffer* buffer = static_cast<Vp9FrameBuffer*>(fb->priv);
   if (buffer != nullptr) {
     buffer->Release();
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index 0c4dee7..2a87fc1 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -441,8 +441,8 @@
   if (frame_types && frame_types->size() > 0) {
     frame_type = (*frame_types)[0];
   }
-  DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
-  DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
+  RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
+  RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
 
   // Set input image for use in the callback.
   // This was necessary since you need some information from input_image.
diff --git a/webrtc/modules/video_coding/main/source/codec_database.cc b/webrtc/modules/video_coding/main/source/codec_database.cc
index c0ec2c8..14eea65 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.cc
+++ b/webrtc/modules/video_coding/main/source/codec_database.cc
@@ -241,15 +241,15 @@
     int number_of_cores,
     size_t max_payload_size,
     VCMEncodedFrameCallback* encoded_frame_callback) {
-  DCHECK(send_codec);
+  RTC_DCHECK(send_codec);
   if (max_payload_size == 0) {
     max_payload_size = kDefaultPayloadSize;
   }
-  DCHECK_GE(number_of_cores, 1);
-  DCHECK_GE(send_codec->plType, 1);
+  RTC_DCHECK_GE(number_of_cores, 1);
+  RTC_DCHECK_GE(send_codec->plType, 1);
   // Make sure the start bit rate is sane...
-  DCHECK_LE(send_codec->startBitrate, 1000000u);
-  DCHECK(send_codec->codecType != kVideoCodecUnknown);
+  RTC_DCHECK_LE(send_codec->startBitrate, 1000000u);
+  RTC_DCHECK(send_codec->codecType != kVideoCodecUnknown);
   bool reset_required = pending_encoder_reset_;
   if (number_of_cores_ != number_of_cores) {
     number_of_cores_ = number_of_cores;
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.cc b/webrtc/modules/video_coding/main/source/frame_buffer.cc
index 8bd3758..82a755a 100644
--- a/webrtc/modules/video_coding/main/source/frame_buffer.cc
+++ b/webrtc/modules/video_coding/main/source/frame_buffer.cc
@@ -154,7 +154,7 @@
     // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
     // (HEVC)).
     if (packet.markerBit) {
-      DCHECK(!_rotation_set);
+      RTC_DCHECK(!_rotation_set);
       _rotation = packet.codecSpecificHeader.rotation;
       _rotation_set = true;
     }
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.cc b/webrtc/modules/video_coding/main/source/generic_encoder.cc
index e4408d1..31c3f17 100644
--- a/webrtc/modules/video_coding/main/source/generic_encoder.cc
+++ b/webrtc/modules/video_coding/main/source/generic_encoder.cc
@@ -21,7 +21,7 @@
 // Map information from info into rtp. If no relevant information is found
 // in info, rtp is set to NULL.
 void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
-  DCHECK(info);
+  RTC_DCHECK(info);
   switch (info->codecType) {
     case kVideoCodecVP8: {
       rtp->codec = kRtpVideoVp8;
diff --git a/webrtc/modules/video_coding/main/source/receiver_unittest.cc b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
index dc63e81..eb5e471 100644
--- a/webrtc/modules/video_coding/main/source/receiver_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
@@ -348,7 +348,7 @@
     bool frame_injected = false;
     while (!timestamps_.empty() &&
            timestamps_.front().arrive_time <= end_time) {
-      DCHECK(timestamps_.front().arrive_time >= start_time);
+      RTC_DCHECK(timestamps_.front().arrive_time >= start_time);
 
       SimulatedClock::AdvanceTimeMicroseconds(timestamps_.front().arrive_time -
                                               TimeInMicroseconds());
@@ -376,7 +376,7 @@
                  size_t size) {
     int64_t previous_arrive_timestamp = 0;
     for (size_t i = 0; i < size; i++) {
-      CHECK(arrive_timestamps[i] >= previous_arrive_timestamp);
+      RTC_CHECK(arrive_timestamps[i] >= previous_arrive_timestamp);
       timestamps_.push(TimestampPair(arrive_timestamps[i] * 1000,
                                      render_timestamps[i] * 1000));
       previous_arrive_timestamp = arrive_timestamps[i];
diff --git a/webrtc/modules/video_coding/main/source/video_receiver.cc b/webrtc/modules/video_coding/main/source/video_receiver.cc
index 8b0509e..7371f9d 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver.cc
+++ b/webrtc/modules/video_coding/main/source/video_receiver.cc
@@ -188,14 +188,14 @@
   _receiver.SetDecodeErrorMode(kNoErrors);
   switch (videoProtection) {
     case kProtectionNack: {
-      DCHECK(enable);
+      RTC_DCHECK(enable);
       _receiver.SetNackMode(kNack, -1, -1);
       break;
     }
 
     case kProtectionNackFEC: {
       CriticalSectionScoped cs(_receiveCritSect);
-      DCHECK(enable);
+      RTC_DCHECK(enable);
       _receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
       _receiver.SetDecodeErrorMode(kNoErrors);
       break;
@@ -203,7 +203,7 @@
     case kProtectionFEC:
     case kProtectionNone:
       // No receiver-side protection.
-      DCHECK(enable);
+      RTC_DCHECK(enable);
       _receiver.SetNackMode(kNoNack, -1, -1);
       _receiver.SetDecodeErrorMode(kWithErrors);
       break;
diff --git a/webrtc/modules/video_coding/main/source/video_sender.cc b/webrtc/modules/video_coding/main/source/video_sender.cc
index fd5cb1e..c59d05a 100644
--- a/webrtc/modules/video_coding/main/source/video_sender.cc
+++ b/webrtc/modules/video_coding/main/source/video_sender.cc
@@ -84,7 +84,7 @@
 int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
                                        uint32_t numberOfCores,
                                        uint32_t maxPayloadSize) {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
   rtc::CritScope lock(&send_crit_);
   if (sendCodec == nullptr) {
     return VCM_PARAMETER_ERROR;
@@ -133,7 +133,7 @@
 }
 
 const VideoCodec& VideoSender::GetSendCodec() const {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
   return current_codec_;
 }
 
@@ -155,7 +155,7 @@
 int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
                                              uint8_t payloadType,
                                              bool internalSource /*= false*/) {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
 
   rtc::CritScope lock(&send_crit_);
 
@@ -193,7 +193,7 @@
 
 // Get encode bitrate
 int VideoSender::Bitrate(unsigned int* bitrate) const {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
   // Since we're running on the thread that's the only thread known to modify
   // the value of _encoder, we don't need to grab the lock here.
 
@@ -207,7 +207,7 @@
 
 // Get encode frame rate
 int VideoSender::FrameRate(unsigned int* framerate) const {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
   // Since we're running on the thread that's the only thread known to modify
   // the value of _encoder, we don't need to grab the lock here.
 
@@ -274,7 +274,7 @@
 // used in this class.
 int32_t VideoSender::RegisterProtectionCallback(
     VCMProtectionCallback* protection_callback) {
-  DCHECK(protection_callback == nullptr || protection_callback_ == nullptr);
+  RTC_DCHECK(protection_callback == nullptr || protection_callback_ == nullptr);
   protection_callback_ = protection_callback;
   return VCM_OK;
 }
@@ -334,7 +334,7 @@
     // This module only supports software encoding.
     // TODO(pbos): Offload conversion from the encoder thread.
     converted_frame = converted_frame.ConvertNativeToI420Frame();
-    CHECK(!converted_frame.IsZeroSize())
+    RTC_CHECK(!converted_frame.IsZeroSize())
         << "Frame conversion failed, won't be able to encode frame.";
   }
   int32_t ret =
@@ -376,7 +376,7 @@
 }
 
 void VideoSender::SuspendBelowMinBitrate() {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
   int threshold_bps;
   if (current_codec_.numberOfSimulcastStreams == 0) {
     threshold_bps = current_codec_.minBitrate * 1000;
diff --git a/webrtc/modules/video_processing/main/source/video_decimator.cc b/webrtc/modules/video_processing/main/source/video_decimator.cc
index 449c3bd..9991c4f 100644
--- a/webrtc/modules/video_processing/main/source/video_decimator.cc
+++ b/webrtc/modules/video_processing/main/source/video_decimator.cc
@@ -38,7 +38,7 @@
 }
 
 void VPMVideoDecimator::SetTargetFramerate(int frame_rate) {
-  DCHECK(frame_rate);
+  RTC_DCHECK(frame_rate);
   target_frame_rate_ = frame_rate;
 }
 
diff --git a/webrtc/overrides/webrtc/base/logging.cc b/webrtc/overrides/webrtc/base/logging.cc
index 55d7c70..58a834d 100644
--- a/webrtc/overrides/webrtc/base/logging.cc
+++ b/webrtc/overrides/webrtc/base/logging.cc
@@ -35,9 +35,9 @@
 // ~DiagnosticLogMessage. Note that the second parameter to the LAZY_STREAM
 // macro is true since the filter check has already been done for
 // DIAGNOSTIC_LOG.
-#define LOG_LAZY_STREAM_DIRECT(file_name, line_number, sev) \
-  LAZY_STREAM(logging::LogMessage(file_name, line_number, \
-                                  sev).stream(), true)
+#define LOG_LAZY_STREAM_DIRECT(file_name, line_number, sev)                  \
+  LAZY_STREAM(logging::LogMessage(file_name, line_number, sev).stream(), \
+                  true)
 
 namespace rtc {
 
diff --git a/webrtc/p2p/base/dtlstransport.h b/webrtc/p2p/base/dtlstransport.h
index 8850cfc..9559c1e 100644
--- a/webrtc/p2p/base/dtlstransport.h
+++ b/webrtc/p2p/base/dtlstransport.h
@@ -24,7 +24,7 @@
 class PortAllocator;
 
 // Base should be a descendant of cricket::Transport
-// TODO(hbos): Add appropriate DCHECK thread checks to all methods.
+// TODO(hbos): Add appropriate RTC_DCHECK thread checks to all methods.
 template<class Base>
 class DtlsTransport : public Base {
  public:
@@ -44,12 +44,12 @@
   }
   void SetCertificate_w(
       const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) override {
-    DCHECK(Base::worker_thread()->IsCurrent());
+    RTC_DCHECK(Base::worker_thread()->IsCurrent());
     certificate_ = certificate;
   }
   bool GetCertificate_w(
       rtc::scoped_refptr<rtc::RTCCertificate>* certificate) override {
-    DCHECK(Base::worker_thread()->IsCurrent());
+    RTC_DCHECK(Base::worker_thread()->IsCurrent());
     if (!certificate_)
       return false;
 
@@ -58,14 +58,14 @@
   }
 
   bool SetSslMaxProtocolVersion_w(rtc::SSLProtocolVersion version) override {
-    DCHECK(Base::worker_thread()->IsCurrent());
+    RTC_DCHECK(Base::worker_thread()->IsCurrent());
     ssl_max_version_ = version;
     return true;
   }
 
   bool ApplyLocalTransportDescription_w(TransportChannelImpl* channel,
                                         std::string* error_desc) override {
-    DCHECK(Base::worker_thread()->IsCurrent());
+    RTC_DCHECK(Base::worker_thread()->IsCurrent());
     rtc::SSLFingerprint* local_fp =
         Base::local_description()->identity_fingerprint.get();
 
@@ -103,7 +103,7 @@
 
   bool NegotiateTransportDescription_w(ContentAction local_role,
                                        std::string* error_desc) override {
-    DCHECK(Base::worker_thread()->IsCurrent());
+    RTC_DCHECK(Base::worker_thread()->IsCurrent());
     if (!Base::local_description() || !Base::remote_description()) {
       const std::string msg = "Local and Remote description must be set before "
                               "transport descriptions are negotiated";
@@ -220,7 +220,7 @@
   }
 
   bool GetSslRole_w(rtc::SSLRole* ssl_role) const override {
-    DCHECK(Base::worker_thread()->IsCurrent());
+    RTC_DCHECK(Base::worker_thread()->IsCurrent());
     ASSERT(ssl_role != NULL);
     *ssl_role = secure_role_;
     return true;
@@ -230,7 +230,7 @@
   bool ApplyNegotiatedTransportDescription_w(
       TransportChannelImpl* channel,
       std::string* error_desc) override {
-    DCHECK(Base::worker_thread()->IsCurrent());
+    RTC_DCHECK(Base::worker_thread()->IsCurrent());
     // Set ssl role. Role must be set before fingerprint is applied, which
     // initiates DTLS setup.
     if (!channel->SetSslRole(secure_role_)) {
diff --git a/webrtc/p2p/base/dtlstransportchannel.cc b/webrtc/p2p/base/dtlstransportchannel.cc
index bf1dc35..3474237 100644
--- a/webrtc/p2p/base/dtlstransportchannel.cc
+++ b/webrtc/p2p/base/dtlstransportchannel.cc
@@ -79,7 +79,7 @@
 bool StreamInterfaceChannel::OnPacketReceived(const char* data, size_t size) {
   // We force a read event here to ensure that we don't overflow our queue.
   bool ret = packets_.WriteBack(data, size, NULL);
-  CHECK(ret) << "Failed to write packet to queue.";
+  RTC_CHECK(ret) << "Failed to write packet to queue.";
   if (ret) {
     SignalEvent(this, rtc::SE_READ, 0);
   }
diff --git a/webrtc/p2p/stunprober/stunprober.cc b/webrtc/p2p/stunprober/stunprober.cc
index c1342dd..5bfa711 100644
--- a/webrtc/p2p/stunprober/stunprober.cc
+++ b/webrtc/p2p/stunprober/stunprober.cc
@@ -130,7 +130,7 @@
 }
 
 void StunProber::Requester::SendStunRequest() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   requests_.push_back(new Request());
   Request& request = *(requests_.back());
   cricket::StunMessage message;
@@ -164,7 +164,7 @@
   request.sent_time_ms = rtc::Time();
 
   num_request_sent_++;
-  DCHECK(static_cast<size_t>(num_request_sent_) <= server_ips_.size());
+  RTC_DCHECK(static_cast<size_t>(num_request_sent_) <= server_ips_.size());
 }
 
 void StunProber::Requester::Request::ProcessResponse(const char* buf,
@@ -202,8 +202,8 @@
     size_t size,
     const rtc::SocketAddress& addr,
     const rtc::PacketTime& time) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(socket_);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(socket_);
   Request* request = GetRequestByAddress(addr.ipaddr());
   if (!request) {
     // Something is wrong, finish the test.
@@ -217,7 +217,7 @@
 
 StunProber::Requester::Request* StunProber::Requester::GetRequestByAddress(
     const rtc::IPAddress& ipaddr) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   for (auto request : requests_) {
     if (request->server_addr == ipaddr) {
       return request;
@@ -255,7 +255,7 @@
                        int num_request_per_ip,
                        int timeout_ms,
                        const AsyncCallback callback) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   interval_ms_ = interval_ms;
   shared_socket_mode_ = shared_socket_mode;
 
@@ -290,7 +290,7 @@
 }
 
 void StunProber::OnServerResolved(rtc::AsyncResolverInterface* resolver) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
 
   if (resolver->GetError() == 0) {
     rtc::SocketAddress addr(resolver->address().ipaddr(),
@@ -343,7 +343,7 @@
 }
 
 StunProber::Requester* StunProber::CreateRequester() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!sockets_.size()) {
     return nullptr;
   }
@@ -375,7 +375,7 @@
 }
 
 void StunProber::MaybeScheduleStunRequests() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   uint32 now = rtc::Time();
 
   if (Done()) {
@@ -460,7 +460,7 @@
   int num_server_ip_with_response = 0;
 
   for (const auto& kv : num_response_per_server) {
-    DCHECK_GT(kv.second, 0);
+    RTC_DCHECK_GT(kv.second, 0);
     num_server_ip_with_response++;
     num_received += kv.second;
     num_sent += num_request_per_server[kv.first];
@@ -521,7 +521,7 @@
 }
 
 void StunProber::End(StunProber::Status status) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!finished_callback_.empty()) {
     AsyncCallback callback = finished_callback_;
     finished_callback_ = AsyncCallback();
diff --git a/webrtc/system_wrappers/interface/aligned_array.h b/webrtc/system_wrappers/interface/aligned_array.h
index 3648c7c..6d6c81b 100644
--- a/webrtc/system_wrappers/interface/aligned_array.h
+++ b/webrtc/system_wrappers/interface/aligned_array.h
@@ -24,7 +24,7 @@
       : rows_(rows),
         cols_(cols),
         alignment_(alignment) {
-    CHECK_GT(alignment_, 0);
+    RTC_CHECK_GT(alignment_, 0);
     head_row_ = static_cast<T**>(AlignedMalloc(rows_ * sizeof(*head_row_),
                                                alignment_));
     for (int i = 0; i < rows_; ++i) {
@@ -49,22 +49,22 @@
   }
 
   T* Row(int row) {
-    CHECK_LE(row, rows_);
+    RTC_CHECK_LE(row, rows_);
     return head_row_[row];
   }
 
   const T* Row(int row) const {
-    CHECK_LE(row, rows_);
+    RTC_CHECK_LE(row, rows_);
     return head_row_[row];
   }
 
   T& At(int row, size_t col) {
-    CHECK_LE(col, cols_);
+    RTC_CHECK_LE(col, cols_);
     return Row(row)[col];
   }
 
   const T& At(int row, size_t col) const {
-    CHECK_LE(col, cols_);
+    RTC_CHECK_LE(col, cols_);
     return Row(row)[col];
   }
 
diff --git a/webrtc/system_wrappers/interface/scoped_vector.h b/webrtc/system_wrappers/interface/scoped_vector.h
index 1e12645..1a70a2c 100644
--- a/webrtc/system_wrappers/interface/scoped_vector.h
+++ b/webrtc/system_wrappers/interface/scoped_vector.h
@@ -84,7 +84,7 @@
   void push_back(T* elem) { v_.push_back(elem); }
 
   void pop_back() {
-    DCHECK(!empty());
+    RTC_DCHECK(!empty());
     delete v_.back();
     v_.pop_back();
   }
diff --git a/webrtc/system_wrappers/source/critical_section_posix.cc b/webrtc/system_wrappers/source/critical_section_posix.cc
index 36b9f13..41b7732 100644
--- a/webrtc/system_wrappers/source/critical_section_posix.cc
+++ b/webrtc/system_wrappers/source/critical_section_posix.cc
@@ -10,8 +10,7 @@
 
 // General note: return values for the various pthread synchronization APIs
 // are explicitly ignored here. In Chromium, the same thing is done for release.
-// However, in debugging, failure in these APIs are logged. There is currently
-// no equivalent to DCHECK_EQ in WebRTC code so this is the best we can do here.
+// However, in debugging, failure in these APIs are logged.
 // TODO(henrike): add logging when pthread synchronization APIs are failing.
 
 #include "webrtc/system_wrappers/source/critical_section_posix.h"
diff --git a/webrtc/system_wrappers/source/event_timer_posix.cc b/webrtc/system_wrappers/source/event_timer_posix.cc
index b5ed461..99eebcb 100644
--- a/webrtc/system_wrappers/source/event_timer_posix.cc
+++ b/webrtc/system_wrappers/source/event_timer_posix.cc
@@ -60,7 +60,7 @@
 
 // TODO(pbos): Make this void.
 bool EventTimerPosix::Set() {
-  CHECK_EQ(0, pthread_mutex_lock(&mutex_));
+  RTC_CHECK_EQ(0, pthread_mutex_lock(&mutex_));
   event_set_ = true;
   pthread_cond_signal(&cond_);
   pthread_mutex_unlock(&mutex_);
@@ -69,7 +69,7 @@
 
 EventTypeWrapper EventTimerPosix::Wait(unsigned long timeout) {
   int ret_val = 0;
-  CHECK_EQ(0, pthread_mutex_lock(&mutex_));
+  RTC_CHECK_EQ(0, pthread_mutex_lock(&mutex_));
 
   if (!event_set_) {
     if (WEBRTC_EVENT_INFINITE != timeout) {
@@ -103,7 +103,7 @@
     }
   }
 
-  DCHECK(ret_val == 0 || ret_val == ETIMEDOUT);
+  RTC_DCHECK(ret_val == 0 || ret_val == ETIMEDOUT);
 
   // Reset and signal if set, regardless of why the thread woke up.
   if (event_set_) {
@@ -117,12 +117,12 @@
 
 EventTypeWrapper EventTimerPosix::Wait(timespec* end_at) {
   int ret_val = 0;
-  CHECK_EQ(0, pthread_mutex_lock(&mutex_));
+  RTC_CHECK_EQ(0, pthread_mutex_lock(&mutex_));
 
   while (ret_val == 0 && !event_set_)
     ret_val = pthread_cond_timedwait(&cond_, &mutex_, end_at);
 
-  DCHECK(ret_val == 0 || ret_val == ETIMEDOUT);
+  RTC_DCHECK(ret_val == 0 || ret_val == ETIMEDOUT);
 
   // Reset and signal if set, regardless of why the thread woke up.
   if (event_set_) {
diff --git a/webrtc/system_wrappers/source/file_impl.cc b/webrtc/system_wrappers/source/file_impl.cc
index dfb1388..89a9185 100644
--- a/webrtc/system_wrappers/source/file_impl.cc
+++ b/webrtc/system_wrappers/source/file_impl.cc
@@ -271,7 +271,7 @@
 }
 
 int FileWrapper::Rewind() {
-  DCHECK(false);
+  RTC_DCHECK(false);
   return -1;
 }
 
diff --git a/webrtc/system_wrappers/source/thread_posix.cc b/webrtc/system_wrappers/source/thread_posix.cc
index 3eb7f2a..fdfbf80 100644
--- a/webrtc/system_wrappers/source/thread_posix.cc
+++ b/webrtc/system_wrappers/source/thread_posix.cc
@@ -39,7 +39,7 @@
 
 int ConvertToSystemPriority(ThreadPriority priority, int min_prio,
                             int max_prio) {
-  DCHECK(max_prio - min_prio > 2);
+  RTC_DCHECK(max_prio - min_prio > 2);
   const int top_prio = max_prio - 1;
   const int low_prio = min_prio + 1;
 
@@ -57,7 +57,7 @@
     case kRealtimePriority:
       return top_prio;
   }
-  DCHECK(false);
+  RTC_DCHECK(false);
   return low_prio;
 }
 
@@ -74,7 +74,7 @@
       stop_event_(false, false),
       name_(thread_name ? thread_name : "webrtc"),
       thread_(0) {
-  DCHECK(name_.length() < 64);
+  RTC_DCHECK(name_.length() < 64);
 }
 
 uint32_t ThreadWrapper::GetThreadId() {
@@ -82,36 +82,36 @@
 }
 
 ThreadPosix::~ThreadPosix() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
 }
 
 // TODO(pbos): Make Start void, calling code really doesn't support failures
 // here.
 bool ThreadPosix::Start() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(!thread_) << "Thread already started?";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!thread_) << "Thread already started?";
 
   ThreadAttributes attr;
   // Set the stack stack size to 1M.
   pthread_attr_setstacksize(&attr, 1024 * 1024);
-  CHECK_EQ(0, pthread_create(&thread_, &attr, &StartThread, this));
+  RTC_CHECK_EQ(0, pthread_create(&thread_, &attr, &StartThread, this));
   return true;
 }
 
 bool ThreadPosix::Stop() {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!thread_)
     return true;
 
   stop_event_.Set();
-  CHECK_EQ(0, pthread_join(thread_, nullptr));
+  RTC_CHECK_EQ(0, pthread_join(thread_, nullptr));
   thread_ = 0;
 
   return true;
 }
 
 bool ThreadPosix::SetPriority(ThreadPriority priority) {
-  DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
   if (!thread_)
     return false;
 #if defined(WEBRTC_CHROMIUM_BUILD) && defined(WEBRTC_LINUX)
diff --git a/webrtc/system_wrappers/source/thread_win.cc b/webrtc/system_wrappers/source/thread_win.cc
index 7c6bd89..2773f7e 100644
--- a/webrtc/system_wrappers/source/thread_win.cc
+++ b/webrtc/system_wrappers/source/thread_win.cc
@@ -32,12 +32,12 @@
       stop_(false),
       thread_(NULL),
       name_(thread_name ? thread_name : "webrtc") {
-  DCHECK(func);
+  RTC_DCHECK(func);
 }
 
 ThreadWindows::~ThreadWindows() {
-  DCHECK(main_thread_.CalledOnValidThread());
-  DCHECK(!thread_);
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(!thread_);
 }
 
 // static
@@ -52,8 +52,8 @@
 }
 
 bool ThreadWindows::Start() {
-  DCHECK(main_thread_.CalledOnValidThread());
-  DCHECK(!thread_);
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(!thread_);
 
   stop_ = false;
 
@@ -64,7 +64,7 @@
   thread_ = ::CreateThread(NULL, 1024 * 1024, &StartThread, this,
       STACK_SIZE_PARAM_IS_A_RESERVATION, &thread_id);
   if (!thread_ ) {
-    DCHECK(false) << "CreateThread failed";
+    RTC_DCHECK(false) << "CreateThread failed";
     return false;
   }
 
@@ -72,7 +72,7 @@
 }
 
 bool ThreadWindows::Stop() {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
   if (thread_) {
     // Set stop_ to |true| on the worker thread.
     QueueUserAPC(&RaiseFlag, thread_, reinterpret_cast<ULONG_PTR>(&stop_));
@@ -85,7 +85,7 @@
 }
 
 bool ThreadWindows::SetPriority(ThreadPriority priority) {
-  DCHECK(main_thread_.CalledOnValidThread());
+  RTC_DCHECK(main_thread_.CalledOnValidThread());
   return thread_ && SetThreadPriority(thread_, priority);
 }
 
diff --git a/webrtc/system_wrappers/source/tick_util.cc b/webrtc/system_wrappers/source/tick_util.cc
index 8895b91..9602ab2 100644
--- a/webrtc/system_wrappers/source/tick_util.cc
+++ b/webrtc/system_wrappers/source/tick_util.cc
@@ -75,8 +75,8 @@
     // Recommended by Apple's QA1398.
     kern_return_t retval = mach_timebase_info(&timebase);
     if (retval != KERN_SUCCESS) {
-      // TODO(wu): Implement CHECK similar to chrome for all the platforms.
-      // Then replace this with a CHECK(retval == KERN_SUCCESS);
+      // TODO(wu): Implement RTC_CHECK for all the platforms. Then replace this
+      // with a RTC_CHECK_EQ(retval, KERN_SUCCESS);
 #ifndef WEBRTC_IOS
       asm("int3");
 #else
diff --git a/webrtc/test/frame_generator.cc b/webrtc/test/frame_generator.cc
index 782e392..db51261 100644
--- a/webrtc/test/frame_generator.cc
+++ b/webrtc/test/frame_generator.cc
@@ -146,13 +146,13 @@
         current_frame_num_(num_frames_ - 1),
         current_source_frame_(nullptr),
         file_generator_(files, source_width, source_height, 1) {
-    DCHECK(clock_ != nullptr);
-    DCHECK_GT(num_frames_, 0u);
-    DCHECK_GE(source_height, target_height);
-    DCHECK_GE(source_width, target_width);
-    DCHECK_GE(scroll_time_ms, 0);
-    DCHECK_GE(pause_time_ms, 0);
-    DCHECK_GT(scroll_time_ms + pause_time_ms, 0);
+    RTC_DCHECK(clock_ != nullptr);
+    RTC_DCHECK_GT(num_frames_, 0u);
+    RTC_DCHECK_GE(source_height, target_height);
+    RTC_DCHECK_GE(source_width, target_width);
+    RTC_DCHECK_GE(scroll_time_ms, 0);
+    RTC_DCHECK_GE(pause_time_ms, 0);
+    RTC_DCHECK_GT(scroll_time_ms + pause_time_ms, 0);
     current_frame_.CreateEmptyFrame(static_cast<int>(target_width),
                                     static_cast<int>(target_height),
                                     static_cast<int>(target_width),
@@ -187,7 +187,7 @@
       current_source_frame_ = file_generator_.NextFrame();
       current_frame_num_ = (current_frame_num_ + 1) % num_frames_;
     }
-    DCHECK(current_source_frame_ != nullptr);
+    RTC_DCHECK(current_source_frame_ != nullptr);
   }
 
   void CropSourceToScrolledImage(double scroll_factor) {
@@ -247,7 +247,7 @@
   std::vector<FILE*> files;
   for (const std::string& filename : filenames) {
     FILE* file = fopen(filename.c_str(), "rb");
-    DCHECK(file != nullptr);
+    RTC_DCHECK(file != nullptr);
     files.push_back(file);
   }
 
@@ -267,7 +267,7 @@
   std::vector<FILE*> files;
   for (const std::string& filename : filenames) {
     FILE* file = fopen(filename.c_str(), "rb");
-    DCHECK(file != nullptr);
+    RTC_DCHECK(file != nullptr);
     files.push_back(file);
   }
 
diff --git a/webrtc/test/layer_filtering_transport.cc b/webrtc/test/layer_filtering_transport.cc
index 102f63e..5ad3f8c 100644
--- a/webrtc/test/layer_filtering_transport.cc
+++ b/webrtc/test/layer_filtering_transport.cc
@@ -47,9 +47,9 @@
   if (header.payloadType == vp8_video_payload_type_ ||
       header.payloadType == vp9_video_payload_type_) {
     const uint8_t* payload = packet + header.headerLength;
-    DCHECK_GT(length, header.headerLength);
+    RTC_DCHECK_GT(length, header.headerLength);
     const size_t payload_length = length - header.headerLength;
-    DCHECK_GT(payload_length, header.paddingLength);
+    RTC_DCHECK_GT(payload_length, header.paddingLength);
     const size_t payload_data_length = payload_length - header.paddingLength;
 
     const bool is_vp8 = header.payloadType == vp8_video_payload_type_;
diff --git a/webrtc/test/rtp_file_writer.cc b/webrtc/test/rtp_file_writer.cc
index 90c46be..793e51a 100644
--- a/webrtc/test/rtp_file_writer.cc
+++ b/webrtc/test/rtp_file_writer.cc
@@ -28,7 +28,7 @@
 class RtpDumpWriter : public RtpFileWriter {
  public:
   explicit RtpDumpWriter(FILE* file) : file_(file) {
-    CHECK(file_ != NULL);
+    RTC_CHECK(file_ != NULL);
     Init();
   }
   virtual ~RtpDumpWriter() {
@@ -40,12 +40,12 @@
 
   bool WritePacket(const RtpPacket* packet) override {
     uint16_t len = static_cast<uint16_t>(packet->length + kPacketHeaderSize);
-    CHECK_GE(packet->original_length, packet->length);
+    RTC_CHECK_GE(packet->original_length, packet->length);
     uint16_t plen = static_cast<uint16_t>(packet->original_length);
     uint32_t offset = packet->time_ms;
-    CHECK(WriteUint16(len));
-    CHECK(WriteUint16(plen));
-    CHECK(WriteUint32(offset));
+    RTC_CHECK(WriteUint16(len));
+    RTC_CHECK(WriteUint16(plen));
+    RTC_CHECK(WriteUint32(offset));
     return fwrite(packet->data, sizeof(uint8_t), packet->length, file_) ==
            packet->length;
   }
@@ -54,11 +54,11 @@
   bool Init() {
     fprintf(file_, "%s", kFirstLine);
 
-    CHECK(WriteUint32(0));
-    CHECK(WriteUint32(0));
-    CHECK(WriteUint32(0));
-    CHECK(WriteUint16(0));
-    CHECK(WriteUint16(0));
+    RTC_CHECK(WriteUint32(0));
+    RTC_CHECK(WriteUint32(0));
+    RTC_CHECK(WriteUint32(0));
+    RTC_CHECK(WriteUint16(0));
+    RTC_CHECK(WriteUint16(0));
 
     return true;
   }
diff --git a/webrtc/tools/agc/agc_harness.cc b/webrtc/tools/agc/agc_harness.cc
index 92dcfdb..8a6c7d7 100644
--- a/webrtc/tools/agc/agc_harness.cc
+++ b/webrtc/tools/agc/agc_harness.cc
@@ -107,7 +107,7 @@
       webrtc::Config config;
       config.Set<ExperimentalAgc>(new ExperimentalAgc(!legacy_agc));
       AudioProcessing* audioproc = AudioProcessing::Create(config);
-      CHECK_EQ(0, base_->Init(nullptr, audioproc));
+      RTC_CHECK_EQ(0, base_->Init(nullptr, audioproc));
       // Set this stuff after Init, to override the default voice engine
       // settings.
       audioproc->gain_control()->Enable(true);
@@ -116,27 +116,28 @@
       audioproc->echo_cancellation()->Enable(FLAGS_aec);
     }
     channel_ = base_->CreateChannel();
-    CHECK_NE(-1, channel_);
+    RTC_CHECK_NE(-1, channel_);
 
     channel_transport_.reset(
         new test::VoiceChannelTransport(network, channel_));
-    CHECK_EQ(0, channel_transport_->SetSendDestination("127.0.0.1", tx_port));
-    CHECK_EQ(0, channel_transport_->SetLocalReceiver(rx_port));
+    RTC_CHECK_EQ(0,
+                 channel_transport_->SetSendDestination("127.0.0.1", tx_port));
+    RTC_CHECK_EQ(0, channel_transport_->SetLocalReceiver(rx_port));
 
-    CHECK_EQ(0, hardware_->SetRecordingDevice(capture_idx_));
-    CHECK_EQ(0, hardware_->SetPlayoutDevice(render_idx_));
+    RTC_CHECK_EQ(0, hardware_->SetRecordingDevice(capture_idx_));
+    RTC_CHECK_EQ(0, hardware_->SetPlayoutDevice(render_idx_));
 
     CodecInst codec_params = {};
     bool codec_found = false;
     for (int i = 0; i < codec_->NumOfCodecs(); i++) {
-      CHECK_EQ(0, codec_->GetCodec(i, codec_params));
+      RTC_CHECK_EQ(0, codec_->GetCodec(i, codec_params));
       if (FLAGS_pt == codec_params.pltype) {
         codec_found = true;
         break;
       }
     }
-    CHECK(codec_found);
-    CHECK_EQ(0, codec_->SetSendCodec(channel_, codec_params));
+    RTC_CHECK(codec_found);
+    RTC_CHECK_EQ(0, codec_->SetSendCodec(channel_, codec_params));
 
     audio->Release();
     network->Release();
@@ -145,28 +146,28 @@
   void TearDown() {
     Stop();
     channel_transport_.reset(nullptr);
-    CHECK_EQ(0, base_->DeleteChannel(channel_));
-    CHECK_EQ(0, base_->Terminate());
+    RTC_CHECK_EQ(0, base_->DeleteChannel(channel_));
+    RTC_CHECK_EQ(0, base_->Terminate());
     hardware_->Release();
     base_->Release();
     codec_->Release();
-    CHECK(VoiceEngine::Delete(voe_));
+    RTC_CHECK(VoiceEngine::Delete(voe_));
   }
 
   void PrintDevices() {
     int num_devices = 0;
     char device_name[128] = {0};
     char guid[128] = {0};
-    CHECK_EQ(0, hardware_->GetNumOfRecordingDevices(num_devices));
+    RTC_CHECK_EQ(0, hardware_->GetNumOfRecordingDevices(num_devices));
     printf("Capture devices:\n");
     for (int i = 0; i < num_devices; i++) {
-      CHECK_EQ(0, hardware_->GetRecordingDeviceName(i, device_name, guid));
+      RTC_CHECK_EQ(0, hardware_->GetRecordingDeviceName(i, device_name, guid));
       printf("%d: %s\n", i, device_name);
     }
-    CHECK_EQ(0, hardware_->GetNumOfPlayoutDevices(num_devices));
+    RTC_CHECK_EQ(0, hardware_->GetNumOfPlayoutDevices(num_devices));
     printf("Render devices:\n");
     for (int i = 0; i < num_devices; i++) {
-      CHECK_EQ(0, hardware_->GetPlayoutDeviceName(i, device_name, guid));
+      RTC_CHECK_EQ(0, hardware_->GetPlayoutDeviceName(i, device_name, guid));
       printf("%d: %s\n", i, device_name);
     }
   }
@@ -175,13 +176,13 @@
     CodecInst params = {0};
     printf("Codecs:\n");
     for (int i = 0; i < codec_->NumOfCodecs(); i++) {
-      CHECK_EQ(0, codec_->GetCodec(i, params));
+      RTC_CHECK_EQ(0, codec_->GetCodec(i, params));
       printf("%d %s/%d/%d\n", params.pltype, params.plname, params.plfreq,
              params.channels);
     }
   }
 
-  void StartSending() { CHECK_EQ(0, base_->StartSend(channel_)); }
+  void StartSending() { RTC_CHECK_EQ(0, base_->StartSend(channel_)); }
 
   void StartPlaying(Pan pan, const std::string& filename) {
     VoEVolumeControl* volume = VoEVolumeControl::GetInterface(voe_);
@@ -193,19 +194,19 @@
     }
     if (filename != "") {
       printf("playing file\n");
-      CHECK_EQ(
+      RTC_CHECK_EQ(
           0, file->StartPlayingFileLocally(channel_, filename.c_str(), true,
                                            kFileFormatPcm16kHzFile, 1.0, 0, 0));
     }
-    CHECK_EQ(0, base_->StartReceive(channel_));
-    CHECK_EQ(0, base_->StartPlayout(channel_));
+    RTC_CHECK_EQ(0, base_->StartReceive(channel_));
+    RTC_CHECK_EQ(0, base_->StartPlayout(channel_));
     volume->Release();
     file->Release();
   }
 
   void Stop() {
-    CHECK_EQ(0, base_->StopSend(channel_));
-    CHECK_EQ(0, base_->StopPlayout(channel_));
+    RTC_CHECK_EQ(0, base_->StopSend(channel_));
+    RTC_CHECK_EQ(0, base_->StopPlayout(channel_));
   }
 
  private:
diff --git a/webrtc/video/audio_receive_stream.cc b/webrtc/video/audio_receive_stream.cc
index 9b40002..b8da1bb 100644
--- a/webrtc/video/audio_receive_stream.cc
+++ b/webrtc/video/audio_receive_stream.cc
@@ -48,21 +48,21 @@
     : remote_bitrate_estimator_(remote_bitrate_estimator),
       config_(config),
       rtp_header_parser_(RtpHeaderParser::Create()) {
-  DCHECK(config.voe_channel_id != -1);
-  DCHECK(remote_bitrate_estimator_ != nullptr);
-  DCHECK(rtp_header_parser_ != nullptr);
+  RTC_DCHECK(config.voe_channel_id != -1);
+  RTC_DCHECK(remote_bitrate_estimator_ != nullptr);
+  RTC_DCHECK(rtp_header_parser_ != nullptr);
   for (const auto& ext : config.rtp.extensions) {
     // One-byte-extension local identifiers are in the range 1-14 inclusive.
-    DCHECK_GE(ext.id, 1);
-    DCHECK_LE(ext.id, 14);
+    RTC_DCHECK_GE(ext.id, 1);
+    RTC_DCHECK_LE(ext.id, 14);
     if (ext.name == RtpExtension::kAudioLevel) {
-      CHECK(rtp_header_parser_->RegisterRtpHeaderExtension(
+      RTC_CHECK(rtp_header_parser_->RegisterRtpHeaderExtension(
           kRtpExtensionAudioLevel, ext.id));
     } else if (ext.name == RtpExtension::kAbsSendTime) {
-      CHECK(rtp_header_parser_->RegisterRtpHeaderExtension(
+      RTC_CHECK(rtp_header_parser_->RegisterRtpHeaderExtension(
           kRtpExtensionAbsoluteSendTime, ext.id));
     } else if (ext.name == RtpExtension::kTransportSequenceNumber) {
-      CHECK(rtp_header_parser_->RegisterRtpHeaderExtension(
+      RTC_CHECK(rtp_header_parser_->RegisterRtpHeaderExtension(
           kRtpExtensionTransportSequenceNumber, ext.id));
     } else {
       RTC_NOTREACHED() << "Unsupported RTP extension.";
diff --git a/webrtc/video/bitrate_estimator_tests.cc b/webrtc/video/bitrate_estimator_tests.cc
index 059de35..f7044ae 100644
--- a/webrtc/video/bitrate_estimator_tests.cc
+++ b/webrtc/video/bitrate_estimator_tests.cc
@@ -188,7 +188,7 @@
       test_->send_config_.encoder_settings.encoder = &fake_encoder_;
       send_stream_ = test_->sender_call_->CreateVideoSendStream(
           test_->send_config_, test_->encoder_config_);
-      DCHECK_EQ(1u, test_->encoder_config_.streams.size());
+      RTC_DCHECK_EQ(1u, test_->encoder_config_.streams.size());
       frame_generator_capturer_.reset(test::FrameGeneratorCapturer::Create(
           send_stream_->Input(),
           test_->encoder_config_.streams[0].width,
@@ -201,9 +201,9 @@
       if (receive_audio) {
         AudioReceiveStream::Config receive_config;
         receive_config.rtp.remote_ssrc = test_->send_config_.rtp.ssrcs[0];
-        // Bogus non-default id to prevent hitting a DCHECK when creating the
-        // AudioReceiveStream. Every receive stream has to correspond to an
-        // underlying channel id.
+        // Bogus non-default id to prevent hitting a RTC_DCHECK when creating
+        // the AudioReceiveStream. Every receive stream has to correspond to
+        // an underlying channel id.
         receive_config.voe_channel_id = 0;
         receive_config.rtp.extensions.push_back(
             RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId));
diff --git a/webrtc/video/call.cc b/webrtc/video/call.cc
index 3ef113c..2b2d596 100644
--- a/webrtc/video/call.cc
+++ b/webrtc/video/call.cc
@@ -144,12 +144,12 @@
       receive_crit_(RWLockWrapper::CreateRWLock()),
       send_crit_(RWLockWrapper::CreateRWLock()),
       event_log_(nullptr) {
-  DCHECK_GE(config.bitrate_config.min_bitrate_bps, 0);
-  DCHECK_GE(config.bitrate_config.start_bitrate_bps,
-            config.bitrate_config.min_bitrate_bps);
+  RTC_DCHECK_GE(config.bitrate_config.min_bitrate_bps, 0);
+  RTC_DCHECK_GE(config.bitrate_config.start_bitrate_bps,
+                config.bitrate_config.min_bitrate_bps);
   if (config.bitrate_config.max_bitrate_bps != -1) {
-    DCHECK_GE(config.bitrate_config.max_bitrate_bps,
-              config.bitrate_config.start_bitrate_bps);
+    RTC_DCHECK_GE(config.bitrate_config.max_bitrate_bps,
+                  config.bitrate_config.start_bitrate_bps);
   }
   if (config.voice_engine) {
     VoECodec* voe_codec = VoECodec::GetInterface(config.voice_engine);
@@ -166,11 +166,11 @@
 }
 
 Call::~Call() {
-  CHECK_EQ(0u, video_send_ssrcs_.size());
-  CHECK_EQ(0u, video_send_streams_.size());
-  CHECK_EQ(0u, audio_receive_ssrcs_.size());
-  CHECK_EQ(0u, video_receive_ssrcs_.size());
-  CHECK_EQ(0u, video_receive_streams_.size());
+  RTC_CHECK_EQ(0u, video_send_ssrcs_.size());
+  RTC_CHECK_EQ(0u, video_send_streams_.size());
+  RTC_CHECK_EQ(0u, audio_receive_ssrcs_.size());
+  RTC_CHECK_EQ(0u, video_receive_ssrcs_.size());
+  RTC_CHECK_EQ(0u, video_receive_streams_.size());
 
   module_process_thread_->Stop();
   Trace::ReturnTrace();
@@ -194,8 +194,8 @@
       channel_group_->GetRemoteBitrateEstimator(), config);
   {
     WriteLockScoped write_lock(*receive_crit_);
-    DCHECK(audio_receive_ssrcs_.find(config.rtp.remote_ssrc) ==
-        audio_receive_ssrcs_.end());
+    RTC_DCHECK(audio_receive_ssrcs_.find(config.rtp.remote_ssrc) ==
+               audio_receive_ssrcs_.end());
     audio_receive_ssrcs_[config.rtp.remote_ssrc] = receive_stream;
     ConfigureSync(config.sync_group);
   }
@@ -205,14 +205,14 @@
 void Call::DestroyAudioReceiveStream(
     webrtc::AudioReceiveStream* receive_stream) {
   TRACE_EVENT0("webrtc", "Call::DestroyAudioReceiveStream");
-  DCHECK(receive_stream != nullptr);
+  RTC_DCHECK(receive_stream != nullptr);
   AudioReceiveStream* audio_receive_stream =
       static_cast<AudioReceiveStream*>(receive_stream);
   {
     WriteLockScoped write_lock(*receive_crit_);
     size_t num_deleted = audio_receive_ssrcs_.erase(
         audio_receive_stream->config().rtp.remote_ssrc);
-    DCHECK(num_deleted == 1);
+    RTC_DCHECK(num_deleted == 1);
     const std::string& sync_group = audio_receive_stream->config().sync_group;
     const auto it = sync_stream_mapping_.find(sync_group);
     if (it != sync_stream_mapping_.end() &&
@@ -229,7 +229,7 @@
     const VideoEncoderConfig& encoder_config) {
   TRACE_EVENT0("webrtc", "Call::CreateVideoSendStream");
   LOG(LS_INFO) << "CreateVideoSendStream: " << config.ToString();
-  DCHECK(!config.rtp.ssrcs.empty());
+  RTC_DCHECK(!config.rtp.ssrcs.empty());
 
   // TODO(mflodman): Base the start bitrate on a current bandwidth estimate, if
   // the call has already started.
@@ -243,7 +243,7 @@
   rtc::CritScope lock(&network_enabled_crit_);
   WriteLockScoped write_lock(*send_crit_);
   for (uint32_t ssrc : config.rtp.ssrcs) {
-    DCHECK(video_send_ssrcs_.find(ssrc) == video_send_ssrcs_.end());
+    RTC_DCHECK(video_send_ssrcs_.find(ssrc) == video_send_ssrcs_.end());
     video_send_ssrcs_[ssrc] = send_stream;
   }
   video_send_streams_.insert(send_stream);
@@ -258,7 +258,7 @@
 
 void Call::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) {
   TRACE_EVENT0("webrtc", "Call::DestroyVideoSendStream");
-  DCHECK(send_stream != nullptr);
+  RTC_DCHECK(send_stream != nullptr);
 
   send_stream->Stop();
 
@@ -276,7 +276,7 @@
     }
     video_send_streams_.erase(send_stream_impl);
   }
-  CHECK(send_stream_impl != nullptr);
+  RTC_CHECK(send_stream_impl != nullptr);
 
   VideoSendStream::RtpStateMap rtp_state = send_stream_impl->GetRtpStates();
 
@@ -302,8 +302,8 @@
   // while changing network state.
   rtc::CritScope lock(&network_enabled_crit_);
   WriteLockScoped write_lock(*receive_crit_);
-  DCHECK(video_receive_ssrcs_.find(config.rtp.remote_ssrc) ==
-      video_receive_ssrcs_.end());
+  RTC_DCHECK(video_receive_ssrcs_.find(config.rtp.remote_ssrc) ==
+             video_receive_ssrcs_.end());
   video_receive_ssrcs_[config.rtp.remote_ssrc] = receive_stream;
   // TODO(pbos): Configure different RTX payloads per receive payload.
   VideoReceiveStream::Config::Rtp::RtxMap::const_iterator it =
@@ -326,7 +326,7 @@
 void Call::DestroyVideoReceiveStream(
     webrtc::VideoReceiveStream* receive_stream) {
   TRACE_EVENT0("webrtc", "Call::DestroyVideoReceiveStream");
-  DCHECK(receive_stream != nullptr);
+  RTC_DCHECK(receive_stream != nullptr);
   VideoReceiveStream* receive_stream_impl = nullptr;
   {
     WriteLockScoped write_lock(*receive_crit_);
@@ -336,7 +336,7 @@
     while (it != video_receive_ssrcs_.end()) {
       if (it->second == static_cast<VideoReceiveStream*>(receive_stream)) {
         if (receive_stream_impl != nullptr)
-          DCHECK(receive_stream_impl == it->second);
+          RTC_DCHECK(receive_stream_impl == it->second);
         receive_stream_impl = it->second;
         video_receive_ssrcs_.erase(it++);
       } else {
@@ -344,7 +344,7 @@
       }
     }
     video_receive_streams_.erase(receive_stream_impl);
-    CHECK(receive_stream_impl != nullptr);
+    RTC_CHECK(receive_stream_impl != nullptr);
     ConfigureSync(receive_stream_impl->config().sync_group);
   }
   delete receive_stream_impl;
@@ -376,9 +376,9 @@
 void Call::SetBitrateConfig(
     const webrtc::Call::Config::BitrateConfig& bitrate_config) {
   TRACE_EVENT0("webrtc", "Call::SetBitrateConfig");
-  DCHECK_GE(bitrate_config.min_bitrate_bps, 0);
+  RTC_DCHECK_GE(bitrate_config.min_bitrate_bps, 0);
   if (bitrate_config.max_bitrate_bps != -1)
-    DCHECK_GT(bitrate_config.max_bitrate_bps, 0);
+    RTC_DCHECK_GT(bitrate_config.max_bitrate_bps, 0);
   if (config_.bitrate_config.min_bitrate_bps ==
           bitrate_config.min_bitrate_bps &&
       (bitrate_config.start_bitrate_bps <= 0 ||
diff --git a/webrtc/video/call_perf_tests.cc b/webrtc/video/call_perf_tests.cc
index a301452..bbf4caa 100644
--- a/webrtc/video/call_perf_tests.cc
+++ b/webrtc/video/call_perf_tests.cc
@@ -548,7 +548,7 @@
                                  const PacketTime& packet_time) override {
       VideoSendStream::Stats stats = send_stream_->GetStats();
       if (stats.substreams.size() > 0) {
-        DCHECK_EQ(1u, stats.substreams.size());
+        RTC_DCHECK_EQ(1u, stats.substreams.size());
         int bitrate_kbps =
             stats.substreams.begin()->second.total_bitrate_bps / 1000;
         if (bitrate_kbps > 0) {
@@ -595,7 +595,7 @@
       if (pad_to_min_bitrate_) {
         encoder_config->min_transmit_bitrate_bps = kMinTransmitBitrateBps;
       } else {
-        DCHECK_EQ(0, encoder_config->min_transmit_bitrate_bps);
+        RTC_DCHECK_EQ(0, encoder_config->min_transmit_bitrate_bps);
       }
     }
 
diff --git a/webrtc/video/encoded_frame_callback_adapter.cc b/webrtc/video/encoded_frame_callback_adapter.cc
index 1261ad5..6726a37 100644
--- a/webrtc/video/encoded_frame_callback_adapter.cc
+++ b/webrtc/video/encoded_frame_callback_adapter.cc
@@ -26,7 +26,7 @@
     const EncodedImage& encodedImage,
     const CodecSpecificInfo* codecSpecificInfo,
     const RTPFragmentationHeader* fragmentation) {
-  DCHECK(observer_ != nullptr);
+  RTC_DCHECK(observer_ != nullptr);
   FrameType frame_type =
         VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
   const EncodedFrame frame(encodedImage._buffer,
diff --git a/webrtc/video/end_to_end_tests.cc b/webrtc/video/end_to_end_tests.cc
index a71c2e0..7485dc9 100644
--- a/webrtc/video/end_to_end_tests.cc
+++ b/webrtc/video/end_to_end_tests.cc
@@ -1386,7 +1386,7 @@
 
    protected:
     void Wait() override {
-      DCHECK(observer_ != nullptr);
+      RTC_DCHECK(observer_ != nullptr);
       EXPECT_EQ(EventTypeWrapper::kEventSignaled, observer_->Wait());
     }
 
@@ -2234,7 +2234,7 @@
     }
 
     bool CheckSendStats() {
-      DCHECK(send_stream_ != nullptr);
+      RTC_DCHECK(send_stream_ != nullptr);
       VideoSendStream::Stats stats = send_stream_->GetStats();
 
       send_stats_filled_["NumStreams"] |=
diff --git a/webrtc/video/full_stack.cc b/webrtc/video/full_stack.cc
index 1fee087..3fb1db6 100644
--- a/webrtc/video/full_stack.cc
+++ b/webrtc/video/full_stack.cc
@@ -77,7 +77,7 @@
     // spare cores.
 
     uint32_t num_cores = CpuInfo::DetectNumberOfCores();
-    DCHECK_GE(num_cores, 1u);
+    RTC_DCHECK_GE(num_cores, 1u);
     static const uint32_t kMinCoresLeft = 4;
     static const uint32_t kMaxComparisonThreads = 8;
 
@@ -500,8 +500,8 @@
 
   void PrintSamplesToFile(void) {
     FILE* out = fopen(graph_data_output_filename_.c_str(), "w");
-    CHECK(out != nullptr)
-        << "Couldn't open file: " << graph_data_output_filename_;
+    RTC_CHECK(out != nullptr) << "Couldn't open file: "
+                              << graph_data_output_filename_;
 
     rtc::CritScope crit(&comparison_lock_);
     std::sort(samples_.begin(), samples_.end(),
diff --git a/webrtc/video/rampup_tests.cc b/webrtc/video/rampup_tests.cc
index fb533cb..d308f2d 100644
--- a/webrtc/video/rampup_tests.cc
+++ b/webrtc/video/rampup_tests.cc
@@ -92,7 +92,7 @@
 void StreamObserver::OnReceiveBitrateChanged(
     const std::vector<unsigned int>& ssrcs, unsigned int bitrate) {
   rtc::CritScope lock(&crit_);
-  DCHECK_GT(expected_bitrate_bps_, 0u);
+  RTC_DCHECK_GT(expected_bitrate_bps_, 0u);
   if (start_bitrate_bps_ != 0) {
     // For tests with an explicitly set start bitrate, verify the first
     // bitrate estimate is close to the start bitrate and lower than the
@@ -119,7 +119,7 @@
   EXPECT_TRUE(rtp_parser_->Parse(packet, length, &header));
   receive_stats_->IncomingPacket(header, length, false);
   payload_registry_->SetIncomingPayloadType(header);
-  DCHECK(remote_bitrate_estimator_ != nullptr);
+  RTC_DCHECK(remote_bitrate_estimator_ != nullptr);
   remote_bitrate_estimator_->IncomingPacket(
       clock_->TimeInMilliseconds(), length - header.headerLength, header, true);
   if (remote_bitrate_estimator_->TimeUntilNextProcess() <= 0) {
@@ -303,7 +303,7 @@
 void LowRateStreamObserver::EvolveTestState(unsigned int bitrate_bps) {
   int64_t now = clock_->TimeInMilliseconds();
   rtc::CritScope lock(&crit_);
-  DCHECK(send_stream_ != nullptr);
+  RTC_DCHECK(send_stream_ != nullptr);
   switch (test_state_) {
     case kFirstRampup: {
       EXPECT_FALSE(suspended_in_stats_);
diff --git a/webrtc/video/receive_statistics_proxy.cc b/webrtc/video/receive_statistics_proxy.cc
index eba28f5..b6063a8 100644
--- a/webrtc/video/receive_statistics_proxy.cc
+++ b/webrtc/video/receive_statistics_proxy.cc
@@ -103,7 +103,7 @@
     const webrtc::RtcpStatistics& statistics,
     uint32_t ssrc) {
   rtc::CritScope lock(&crit_);
-  // TODO(pbos): Handle both local and remote ssrcs here and DCHECK that we
+  // TODO(pbos): Handle both local and remote ssrcs here and RTC_DCHECK that we
   // receive stats from one of them.
   if (stats_.ssrc != ssrc)
     return;
@@ -113,7 +113,7 @@
 
 void ReceiveStatisticsProxy::CNameChanged(const char* cname, uint32_t ssrc) {
   rtc::CritScope lock(&crit_);
-  // TODO(pbos): Handle both local and remote ssrcs here and DCHECK that we
+  // TODO(pbos): Handle both local and remote ssrcs here and RTC_DCHECK that we
   // receive stats from one of them.
   if (stats_.ssrc != ssrc)
     return;
diff --git a/webrtc/video/replay.cc b/webrtc/video/replay.cc
index 6f0703b..05d9df0 100644
--- a/webrtc/video/replay.cc
+++ b/webrtc/video/replay.cc
@@ -196,7 +196,7 @@
  public:
   explicit DecoderBitstreamFileWriter(const char* filename)
       : file_(fopen(filename, "wb")) {
-    DCHECK(file_ != nullptr);
+    RTC_DCHECK(file_ != nullptr);
   }
   ~DecoderBitstreamFileWriter() { fclose(file_); }
 
diff --git a/webrtc/video/rtc_event_log.cc b/webrtc/video/rtc_event_log.cc
index eb4340d..7086b3e 100644
--- a/webrtc/video/rtc_event_log.cc
+++ b/webrtc/video/rtc_event_log.cc
@@ -352,11 +352,11 @@
     auto debug_event = event.mutable_debug_event();
     debug_event->set_type(ConvertDebugEvent(DebugEvent::kLogEnd));
     // Store the event and close the file
-    DCHECK(file_->Open());
+    RTC_DCHECK(file_->Open());
     StoreToFile(&event);
     file_->CloseFile();
   }
-  DCHECK(!file_->Open());
+  RTC_DCHECK(!file_->Open());
   stream_.Clear();
 }
 
@@ -376,7 +376,7 @@
   if (stream_.stream_size() < 1) {
     stream_.add_stream();
   }
-  DCHECK_EQ(stream_.stream_size(), 1);
+  RTC_DCHECK_EQ(stream_.stream_size(), 1);
   stream_.mutable_stream(0)->Swap(event);
   // TODO(terelius): Doesn't this create a new EventStream per event?
   // Is this guaranteed to work e.g. in future versions of protobuf?
diff --git a/webrtc/video/rtc_event_log_unittest.cc b/webrtc/video/rtc_event_log_unittest.cc
index 647d29d..7a2bd11 100644
--- a/webrtc/video/rtc_event_log_unittest.cc
+++ b/webrtc/video/rtc_event_log_unittest.cc
@@ -290,7 +290,7 @@
                          uint32_t csrcs_count,
                          uint8_t* packet,
                          size_t packet_size) {
-  CHECK_GE(packet_size, 16 + 4 * csrcs_count + 4 * kNumExtensions);
+  RTC_CHECK_GE(packet_size, 16 + 4 * csrcs_count + 4 * kNumExtensions);
   Clock* clock = Clock::GetRealTimeClock();
 
   RTPSender rtp_sender(0,         // int32_t id
diff --git a/webrtc/video/screenshare_loopback.cc b/webrtc/video/screenshare_loopback.cc
index a221e9c..2dfadd1 100644
--- a/webrtc/video/screenshare_loopback.cc
+++ b/webrtc/video/screenshare_loopback.cc
@@ -154,14 +154,15 @@
 class ScreenshareLoopback : public test::Loopback {
  public:
   explicit ScreenshareLoopback(const Config& config) : Loopback(config) {
-    CHECK_GE(config.num_temporal_layers, 1u);
-    CHECK_LE(config.num_temporal_layers, 2u);
-    CHECK_GE(config.num_spatial_layers, 1u);
-    CHECK_LE(config.num_spatial_layers, 5u);
-    CHECK(config.num_spatial_layers == 1 || config.codec == "VP9");
-    CHECK(config.num_spatial_layers == 1 || config.num_temporal_layers == 1);
-    CHECK_LT(config.tl_discard_threshold, config.num_temporal_layers);
-    CHECK_LT(config.sl_discard_threshold, config.num_spatial_layers);
+    RTC_CHECK_GE(config.num_temporal_layers, 1u);
+    RTC_CHECK_LE(config.num_temporal_layers, 2u);
+    RTC_CHECK_GE(config.num_spatial_layers, 1u);
+    RTC_CHECK_LE(config.num_spatial_layers, 5u);
+    RTC_CHECK(config.num_spatial_layers == 1 || config.codec == "VP9");
+    RTC_CHECK(config.num_spatial_layers == 1 ||
+              config.num_temporal_layers == 1);
+    RTC_CHECK_LT(config.tl_discard_threshold, config.num_temporal_layers);
+    RTC_CHECK_LT(config.sl_discard_threshold, config.num_spatial_layers);
 
     vp8_settings_ = VideoEncoder::GetDefaultVp8Settings();
     vp8_settings_.denoisingOn = false;
@@ -216,12 +217,12 @@
     // Fixed for input resolution for prerecorded screenshare content.
     const size_t kWidth = 1850;
     const size_t kHeight = 1110;
-    CHECK_LE(flags::Width(), kWidth);
-    CHECK_LE(flags::Height(), kHeight);
-    CHECK_GT(flags::SlideChangeInterval(), 0);
+    RTC_CHECK_LE(flags::Width(), kWidth);
+    RTC_CHECK_LE(flags::Height(), kHeight);
+    RTC_CHECK_GT(flags::SlideChangeInterval(), 0);
     const int kPauseDurationMs =
         (flags::SlideChangeInterval() - flags::ScrollDuration()) * 1000;
-    CHECK_LE(flags::ScrollDuration(), flags::SlideChangeInterval());
+    RTC_CHECK_LE(flags::ScrollDuration(), flags::SlideChangeInterval());
 
     test::FrameGenerator* frame_generator =
         test::FrameGenerator::CreateScrollingInputFromYuvFiles(
diff --git a/webrtc/video/send_statistics_proxy.cc b/webrtc/video/send_statistics_proxy.cc
index e60614c..505dc07 100644
--- a/webrtc/video/send_statistics_proxy.cc
+++ b/webrtc/video/send_statistics_proxy.cc
@@ -225,8 +225,8 @@
     uint32_t ssrc) {
   rtc::CritScope lock(&crit_);
   VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc);
-  DCHECK(stats != nullptr) << "DataCountersUpdated reported for unknown ssrc: "
-                           << ssrc;
+  RTC_DCHECK(stats != nullptr)
+      << "DataCountersUpdated reported for unknown ssrc: " << ssrc;
 
   stats->rtp_stats = counters;
 }
diff --git a/webrtc/video/transport_adapter.cc b/webrtc/video/transport_adapter.cc
index 225d436..e5c9f61 100644
--- a/webrtc/video/transport_adapter.cc
+++ b/webrtc/video/transport_adapter.cc
@@ -17,7 +17,7 @@
 
 TransportAdapter::TransportAdapter(newapi::Transport* transport)
     : transport_(transport), enabled_(0) {
-  DCHECK(nullptr != transport);
+  RTC_DCHECK(nullptr != transport);
 }
 
 int TransportAdapter::SendPacket(int /*channel*/,
diff --git a/webrtc/video/video_decoder.cc b/webrtc/video/video_decoder.cc
index 0a5df7d..e8dc5f1 100644
--- a/webrtc/video/video_decoder.cc
+++ b/webrtc/video/video_decoder.cc
@@ -20,7 +20,7 @@
 VideoDecoder* VideoDecoder::Create(VideoDecoder::DecoderType codec_type) {
   switch (codec_type) {
     case kH264:
-      DCHECK(H264Decoder::IsSupported());
+      RTC_DCHECK(H264Decoder::IsSupported());
       return H264Decoder::Create();
     case kVp8:
       return VP8Decoder::Create();
@@ -64,7 +64,7 @@
 }
 
 bool VideoDecoderSoftwareFallbackWrapper::InitFallbackDecoder() {
-  CHECK(decoder_type_ != kUnsupportedCodec)
+  RTC_CHECK(decoder_type_ != kUnsupportedCodec)
       << "Decoder requesting fallback to codec not supported in software.";
   LOG(LS_WARNING) << "Decoder falling back to software decoding.";
   fallback_decoder_.reset(VideoDecoder::Create(decoder_type_));
diff --git a/webrtc/video/video_encoder.cc b/webrtc/video/video_encoder.cc
index 8847a10..305406b 100644
--- a/webrtc/video/video_encoder.cc
+++ b/webrtc/video/video_encoder.cc
@@ -20,7 +20,7 @@
 VideoEncoder* VideoEncoder::Create(VideoEncoder::EncoderType codec_type) {
   switch (codec_type) {
     case kH264:
-      DCHECK(H264Encoder::IsSupported());
+      RTC_DCHECK(H264Encoder::IsSupported());
       return H264Encoder::Create();
     case kVp8:
       return VP8Encoder::Create();
diff --git a/webrtc/video/video_receive_stream.cc b/webrtc/video/video_receive_stream.cc
index 9f0e26f..efa97c7 100644
--- a/webrtc/video/video_receive_stream.cc
+++ b/webrtc/video/video_receive_stream.cc
@@ -139,7 +139,7 @@
       clock_(Clock::GetRealTimeClock()),
       channel_group_(channel_group),
       channel_id_(channel_id) {
-  CHECK(channel_group_->CreateReceiveChannel(
+  RTC_CHECK(channel_group_->CreateReceiveChannel(
       channel_id_, 0, &transport_adapter_, num_cpu_cores));
 
   vie_channel_ = channel_group_->GetChannel(channel_id_);
@@ -150,17 +150,17 @@
   vie_channel_->SetKeyFrameRequestMethod(kKeyFrameReqPliRtcp);
   SetRtcpMode(config_.rtp.rtcp_mode);
 
-  DCHECK(config_.rtp.remote_ssrc != 0);
+  RTC_DCHECK(config_.rtp.remote_ssrc != 0);
   // TODO(pbos): What's an appropriate local_ssrc for receive-only streams?
-  DCHECK(config_.rtp.local_ssrc != 0);
-  DCHECK(config_.rtp.remote_ssrc != config_.rtp.local_ssrc);
+  RTC_DCHECK(config_.rtp.local_ssrc != 0);
+  RTC_DCHECK(config_.rtp.remote_ssrc != config_.rtp.local_ssrc);
 
   vie_channel_->SetSSRC(config_.rtp.local_ssrc, kViEStreamTypeNormal, 0);
   // TODO(pbos): Support multiple RTX, per video payload.
   Config::Rtp::RtxMap::const_iterator it = config_.rtp.rtx.begin();
   for (; it != config_.rtp.rtx.end(); ++it) {
-    DCHECK(it->second.ssrc != 0);
-    DCHECK(it->second.payload_type != 0);
+    RTC_DCHECK(it->second.ssrc != 0);
+    RTC_DCHECK(it->second.payload_type != 0);
 
     vie_channel_->SetRemoteSSRCType(kViEStreamTypeRtx, it->second.ssrc);
     vie_channel_->SetRtxReceivePayloadType(it->second.payload_type, it->first);
@@ -174,16 +174,17 @@
     const std::string& extension = config_.rtp.extensions[i].name;
     int id = config_.rtp.extensions[i].id;
     // One-byte-extension local identifiers are in the range 1-14 inclusive.
-    DCHECK_GE(id, 1);
-    DCHECK_LE(id, 14);
+    RTC_DCHECK_GE(id, 1);
+    RTC_DCHECK_LE(id, 14);
     if (extension == RtpExtension::kTOffset) {
-      CHECK_EQ(0, vie_channel_->SetReceiveTimestampOffsetStatus(true, id));
+      RTC_CHECK_EQ(0, vie_channel_->SetReceiveTimestampOffsetStatus(true, id));
     } else if (extension == RtpExtension::kAbsSendTime) {
-      CHECK_EQ(0, vie_channel_->SetReceiveAbsoluteSendTimeStatus(true, id));
+      RTC_CHECK_EQ(0, vie_channel_->SetReceiveAbsoluteSendTimeStatus(true, id));
     } else if (extension == RtpExtension::kVideoRotation) {
-      CHECK_EQ(0, vie_channel_->SetReceiveVideoRotationStatus(true, id));
+      RTC_CHECK_EQ(0, vie_channel_->SetReceiveVideoRotationStatus(true, id));
     } else if (extension == RtpExtension::kTransportSequenceNumber) {
-      CHECK_EQ(0, vie_channel_->SetReceiveTransportSequenceNumber(true, id));
+      RTC_CHECK_EQ(0,
+                   vie_channel_->SetReceiveTransportSequenceNumber(true, id));
     } else {
       RTC_NOTREACHED() << "Unsupported RTP extension.";
     }
@@ -191,13 +192,13 @@
 
   if (config_.rtp.fec.ulpfec_payload_type != -1) {
     // ULPFEC without RED doesn't make sense.
-    DCHECK(config_.rtp.fec.red_payload_type != -1);
+    RTC_DCHECK(config_.rtp.fec.red_payload_type != -1);
     VideoCodec codec;
     memset(&codec, 0, sizeof(codec));
     codec.codecType = kVideoCodecULPFEC;
     strcpy(codec.plName, "ulpfec");
     codec.plType = config_.rtp.fec.ulpfec_payload_type;
-    CHECK_EQ(0, vie_channel_->SetReceiveCodec(codec));
+    RTC_CHECK_EQ(0, vie_channel_->SetReceiveCodec(codec));
   }
   if (config_.rtp.fec.red_payload_type != -1) {
     VideoCodec codec;
@@ -205,7 +206,7 @@
     codec.codecType = kVideoCodecRED;
     strcpy(codec.plName, "red");
     codec.plType = config_.rtp.fec.red_payload_type;
-    CHECK_EQ(0, vie_channel_->SetReceiveCodec(codec));
+    RTC_CHECK_EQ(0, vie_channel_->SetReceiveCodec(codec));
     if (config_.rtp.fec.red_rtx_payload_type != -1) {
       vie_channel_->SetRtxReceivePayloadType(
           config_.rtp.fec.red_rtx_payload_type,
@@ -225,17 +226,18 @@
   vie_channel_->RegisterReceiveChannelRtpStatisticsCallback(stats_proxy_.get());
   vie_channel_->RegisterRtcpPacketTypeCounterObserver(stats_proxy_.get());
 
-  DCHECK(!config_.decoders.empty());
+  RTC_DCHECK(!config_.decoders.empty());
   for (size_t i = 0; i < config_.decoders.size(); ++i) {
     const Decoder& decoder = config_.decoders[i];
-    CHECK_EQ(0, vie_channel_->RegisterExternalDecoder(
-                    decoder.payload_type, decoder.decoder, decoder.is_renderer,
-                    decoder.is_renderer ? decoder.expected_delay_ms
-                                        : config.render_delay_ms));
+    RTC_CHECK_EQ(0,
+                 vie_channel_->RegisterExternalDecoder(
+                     decoder.payload_type, decoder.decoder, decoder.is_renderer,
+                     decoder.is_renderer ? decoder.expected_delay_ms
+                                         : config.render_delay_ms));
 
     VideoCodec codec = CreateDecoderVideoCodec(decoder);
 
-    CHECK_EQ(0, vie_channel_->SetReceiveCodec(codec));
+    RTC_CHECK_EQ(0, vie_channel_->SetReceiveCodec(codec));
   }
 
   incoming_video_stream_.reset(new IncomingVideoStream(0));
diff --git a/webrtc/video/video_send_stream.cc b/webrtc/video/video_send_stream.cc
index 42ad774..2ab4eaa 100644
--- a/webrtc/video/video_send_stream.cc
+++ b/webrtc/video/video_send_stream.cc
@@ -117,9 +117,9 @@
       channel_id_(channel_id),
       use_config_bitrate_(true),
       stats_proxy_(Clock::GetRealTimeClock(), config) {
-  DCHECK(!config_.rtp.ssrcs.empty());
-  CHECK(channel_group->CreateSendChannel(channel_id_, 0, &transport_adapter_,
-                                         num_cpu_cores, config_.rtp.ssrcs));
+  RTC_DCHECK(!config_.rtp.ssrcs.empty());
+  RTC_CHECK(channel_group->CreateSendChannel(
+      channel_id_, 0, &transport_adapter_, num_cpu_cores, config_.rtp.ssrcs));
   vie_channel_ = channel_group_->GetChannel(channel_id_);
   vie_encoder_ = channel_group_->GetEncoder(channel_id_);
 
@@ -127,16 +127,16 @@
     const std::string& extension = config_.rtp.extensions[i].name;
     int id = config_.rtp.extensions[i].id;
     // One-byte-extension local identifiers are in the range 1-14 inclusive.
-    DCHECK_GE(id, 1);
-    DCHECK_LE(id, 14);
+    RTC_DCHECK_GE(id, 1);
+    RTC_DCHECK_LE(id, 14);
     if (extension == RtpExtension::kTOffset) {
-      CHECK_EQ(0, vie_channel_->SetSendTimestampOffsetStatus(true, id));
+      RTC_CHECK_EQ(0, vie_channel_->SetSendTimestampOffsetStatus(true, id));
     } else if (extension == RtpExtension::kAbsSendTime) {
-      CHECK_EQ(0, vie_channel_->SetSendAbsoluteSendTimeStatus(true, id));
+      RTC_CHECK_EQ(0, vie_channel_->SetSendAbsoluteSendTimeStatus(true, id));
     } else if (extension == RtpExtension::kVideoRotation) {
-      CHECK_EQ(0, vie_channel_->SetSendVideoRotationStatus(true, id));
+      RTC_CHECK_EQ(0, vie_channel_->SetSendVideoRotationStatus(true, id));
     } else if (extension == RtpExtension::kTransportSequenceNumber) {
-      CHECK_EQ(0, vie_channel_->SetSendTransportSequenceNumber(true, id));
+      RTC_CHECK_EQ(0, vie_channel_->SetSendTransportSequenceNumber(true, id));
     } else {
       RTC_NOTREACHED() << "Registering unsupported RTP extension.";
     }
@@ -164,18 +164,18 @@
       &stats_proxy_, this));
 
   // 28 to match packet overhead in ModuleRtpRtcpImpl.
-  DCHECK_LE(config_.rtp.max_packet_size, static_cast<size_t>(0xFFFF - 28));
+  RTC_DCHECK_LE(config_.rtp.max_packet_size, static_cast<size_t>(0xFFFF - 28));
   vie_channel_->SetMTU(static_cast<uint16_t>(config_.rtp.max_packet_size + 28));
 
-  DCHECK(config.encoder_settings.encoder != nullptr);
-  DCHECK_GE(config.encoder_settings.payload_type, 0);
-  DCHECK_LE(config.encoder_settings.payload_type, 127);
-  CHECK_EQ(0, vie_encoder_->RegisterExternalEncoder(
-                  config.encoder_settings.encoder,
-                  config.encoder_settings.payload_type,
-                  config.encoder_settings.internal_source));
+  RTC_DCHECK(config.encoder_settings.encoder != nullptr);
+  RTC_DCHECK_GE(config.encoder_settings.payload_type, 0);
+  RTC_DCHECK_LE(config.encoder_settings.payload_type, 127);
+  RTC_CHECK_EQ(0, vie_encoder_->RegisterExternalEncoder(
+                      config.encoder_settings.encoder,
+                      config.encoder_settings.payload_type,
+                      config.encoder_settings.internal_source));
 
-  CHECK(ReconfigureVideoEncoder(encoder_config));
+  RTC_CHECK(ReconfigureVideoEncoder(encoder_config));
 
   vie_channel_->RegisterSendSideDelayObserver(&stats_proxy_);
   vie_encoder_->RegisterSendStatisticsProxy(&stats_proxy_);
@@ -251,8 +251,8 @@
   TRACE_EVENT0("webrtc", "VideoSendStream::(Re)configureVideoEncoder");
   LOG(LS_INFO) << "(Re)configureVideoEncoder: " << config.ToString();
   const std::vector<VideoStream>& streams = config.streams;
-  DCHECK(!streams.empty());
-  DCHECK_GE(config_.rtp.ssrcs.size(), streams.size());
+  RTC_DCHECK(!streams.empty());
+  RTC_DCHECK_GE(config_.rtp.ssrcs.size(), streams.size());
 
   VideoCodec video_codec;
   memset(&video_codec, 0, sizeof(video_codec));
@@ -311,7 +311,7 @@
     }
   } else {
     // TODO(pbos): Support encoder_settings codec-agnostically.
-    DCHECK(config.encoder_specific_settings == nullptr)
+    RTC_DCHECK(config.encoder_specific_settings == nullptr)
         << "Encoder-specific settings for codec type not wired up.";
   }
 
@@ -323,18 +323,18 @@
   video_codec.numberOfSimulcastStreams =
       static_cast<unsigned char>(streams.size());
   video_codec.minBitrate = streams[0].min_bitrate_bps / 1000;
-  DCHECK_LE(streams.size(), static_cast<size_t>(kMaxSimulcastStreams));
+  RTC_DCHECK_LE(streams.size(), static_cast<size_t>(kMaxSimulcastStreams));
   for (size_t i = 0; i < streams.size(); ++i) {
     SimulcastStream* sim_stream = &video_codec.simulcastStream[i];
-    DCHECK_GT(streams[i].width, 0u);
-    DCHECK_GT(streams[i].height, 0u);
-    DCHECK_GT(streams[i].max_framerate, 0);
+    RTC_DCHECK_GT(streams[i].width, 0u);
+    RTC_DCHECK_GT(streams[i].height, 0u);
+    RTC_DCHECK_GT(streams[i].max_framerate, 0);
     // Different framerates not supported per stream at the moment.
-    DCHECK_EQ(streams[i].max_framerate, streams[0].max_framerate);
-    DCHECK_GE(streams[i].min_bitrate_bps, 0);
-    DCHECK_GE(streams[i].target_bitrate_bps, streams[i].min_bitrate_bps);
-    DCHECK_GE(streams[i].max_bitrate_bps, streams[i].target_bitrate_bps);
-    DCHECK_GE(streams[i].max_qp, 0);
+    RTC_DCHECK_EQ(streams[i].max_framerate, streams[0].max_framerate);
+    RTC_DCHECK_GE(streams[i].min_bitrate_bps, 0);
+    RTC_DCHECK_GE(streams[i].target_bitrate_bps, streams[i].min_bitrate_bps);
+    RTC_DCHECK_GE(streams[i].max_bitrate_bps, streams[i].target_bitrate_bps);
+    RTC_DCHECK_GE(streams[i].max_qp, 0);
 
     sim_stream->width = static_cast<unsigned short>(streams[i].width);
     sim_stream->height = static_cast<unsigned short>(streams[i].height);
@@ -361,7 +361,7 @@
   // the bitrate controller is already set from Call.
   video_codec.startBitrate = 0;
 
-  DCHECK_GT(streams[0].max_framerate, 0);
+  RTC_DCHECK_GT(streams[0].max_framerate, 0);
   video_codec.maxFramerate = streams[0].max_framerate;
 
   if (!SetSendCodec(video_codec))
@@ -373,7 +373,7 @@
     stats_proxy_.OnInactiveSsrc(config_.rtp.ssrcs[i]);
   }
 
-  DCHECK_GE(config.min_transmit_bitrate_bps, 0);
+  RTC_DCHECK_GE(config.min_transmit_bitrate_bps, 0);
   vie_encoder_->SetMinTransmitBitrate(config.min_transmit_bitrate_bps / 1000);
 
   encoder_config_ = config;
@@ -415,7 +415,7 @@
   }
 
   // Set up RTX.
-  DCHECK_EQ(config_.rtp.rtx.ssrcs.size(), config_.rtp.ssrcs.size());
+  RTC_DCHECK_EQ(config_.rtp.rtx.ssrcs.size(), config_.rtp.ssrcs.size());
   for (size_t i = 0; i < config_.rtp.rtx.ssrcs.size(); ++i) {
     uint32_t ssrc = config_.rtp.rtx.ssrcs[i];
     vie_channel_->SetSSRC(config_.rtp.rtx.ssrcs[i], kViEStreamTypeRtx,
@@ -425,7 +425,7 @@
       vie_channel_->SetRtpStateForSsrc(ssrc, it->second);
   }
 
-  DCHECK_GE(config_.rtp.rtx.payload_type, 0);
+  RTC_DCHECK_GE(config_.rtp.rtx.payload_type, 0);
   vie_channel_->SetRtxSendPayloadType(config_.rtp.rtx.payload_type,
                                       config_.encoder_settings.payload_type);
 }
diff --git a/webrtc/video/video_send_stream_tests.cc b/webrtc/video/video_send_stream_tests.cc
index c558099..a70490a 100644
--- a/webrtc/video/video_send_stream_tests.cc
+++ b/webrtc/video/video_send_stream_tests.cc
@@ -511,7 +511,7 @@
           current_size_frame_(static_cast<int32_t>(start_size)) {
       // Fragmentation required, this test doesn't make sense without it.
       encoder_.SetFrameSize(start_size);
-      DCHECK_GT(stop_size, max_packet_size);
+      RTC_DCHECK_GT(stop_size, max_packet_size);
       transport_adapter_.Enable();
     }
 
@@ -969,7 +969,7 @@
       RTPHeader header;
       if (!parser_->Parse(packet, length, &header))
         return DELIVERY_PACKET_ERROR;
-      DCHECK(stream_ != nullptr);
+      RTC_DCHECK(stream_ != nullptr);
       VideoSendStream::Stats stats = stream_->GetStats();
       if (!stats.substreams.empty()) {
         EXPECT_EQ(1u, stats.substreams.size());
@@ -1754,7 +1754,7 @@
         encoded._frameType = (*frame_types)[i];
         encoded._encodedWidth = kEncodedResolution[i].width;
         encoded._encodedHeight = kEncodedResolution[i].height;
-        DCHECK(callback_ != nullptr);
+        RTC_DCHECK(callback_ != nullptr);
         if (callback_->Encoded(encoded, &specifics, nullptr) != 0)
           return -1;
       }
diff --git a/webrtc/video_engine/encoder_state_feedback.cc b/webrtc/video_engine/encoder_state_feedback.cc
index 55a0c43..4d744ac 100644
--- a/webrtc/video_engine/encoder_state_feedback.cc
+++ b/webrtc/video_engine/encoder_state_feedback.cc
@@ -56,10 +56,10 @@
 
 void EncoderStateFeedback::AddEncoder(const std::vector<uint32_t>& ssrcs,
                                       ViEEncoder* encoder) {
-  DCHECK(!ssrcs.empty());
+  RTC_DCHECK(!ssrcs.empty());
   CriticalSectionScoped lock(crit_.get());
   for (uint32_t ssrc : ssrcs) {
-    DCHECK(encoders_.find(ssrc) == encoders_.end());
+    RTC_DCHECK(encoders_.find(ssrc) == encoders_.end());
     encoders_[ssrc] = encoder;
   }
 }
diff --git a/webrtc/video_engine/overuse_frame_detector.cc b/webrtc/video_engine/overuse_frame_detector.cc
index 4724865..441b106 100644
--- a/webrtc/video_engine/overuse_frame_detector.cc
+++ b/webrtc/video_engine/overuse_frame_detector.cc
@@ -214,7 +214,7 @@
       usage_(new SendProcessingUsage(options)),
       frame_queue_(new FrameQueue()),
       last_sample_time_ms_(0) {
-  DCHECK(metrics_observer != nullptr);
+  RTC_DCHECK(metrics_observer != nullptr);
   // Make sure stats are initially up-to-date. This simplifies unit testing
   // since we don't have to trigger an update using one of the methods which
   // would also alter the overuse state.
@@ -243,7 +243,7 @@
 }
 
 int64_t OveruseFrameDetector::TimeUntilNextProcess() {
-  DCHECK(processing_thread_.CalledOnValidThread());
+  RTC_DCHECK(processing_thread_.CalledOnValidThread());
   return next_process_time_ - clock_->TimeInMilliseconds();
 }
 
@@ -328,7 +328,7 @@
 }
 
 int32_t OveruseFrameDetector::Process() {
-  DCHECK(processing_thread_.CalledOnValidThread());
+  RTC_DCHECK(processing_thread_.CalledOnValidThread());
 
   int64_t now = clock_->TimeInMilliseconds();
 
diff --git a/webrtc/video_engine/vie_channel.cc b/webrtc/video_engine/vie_channel.cc
index 70c4476..e941326 100644
--- a/webrtc/video_engine/vie_channel.cc
+++ b/webrtc/video_engine/vie_channel.cc
@@ -157,7 +157,7 @@
   if (sender_) {
     std::list<RtpRtcp*> send_rtp_modules(1, rtp_rtcp_modules_[0]);
     send_payload_router_->SetSendingRtpModules(send_rtp_modules);
-    DCHECK(!send_payload_router_->active());
+    RTC_DCHECK(!send_payload_router_->active());
   }
   if (vcm_->RegisterReceiveCallback(this) != 0) {
     return -1;
@@ -331,7 +331,7 @@
 
 int32_t ViEChannel::SetSendCodec(const VideoCodec& video_codec,
                                  bool new_stream) {
-  DCHECK(sender_);
+  RTC_DCHECK(sender_);
   if (video_codec.codecType == kVideoCodecRED ||
       video_codec.codecType == kVideoCodecULPFEC) {
     LOG_F(LS_ERROR) << "Not a valid send codec " << video_codec.codecType;
@@ -415,7 +415,7 @@
 }
 
 int32_t ViEChannel::SetReceiveCodec(const VideoCodec& video_codec) {
-  DCHECK(!sender_);
+  RTC_DCHECK(!sender_);
   if (!vie_receiver_.SetReceiveCodec(video_codec)) {
     return -1;
   }
@@ -436,7 +436,7 @@
                                             VideoDecoder* decoder,
                                             bool buffered_rendering,
                                             int32_t render_delay) {
-  DCHECK(!sender_);
+  RTC_DCHECK(!sender_);
   int32_t result;
   result = vcm_->RegisterExternalDecoder(decoder, pl_type, buffered_rendering);
   if (result != VCM_OK) {
@@ -446,7 +446,7 @@
 }
 
 int32_t ViEChannel::DeRegisterExternalDecoder(const uint8_t pl_type) {
-  DCHECK(!sender_);
+  RTC_DCHECK(!sender_);
   VideoCodec current_receive_codec;
   int32_t result = 0;
   result = vcm_->ReceiveCodec(&current_receive_codec);
@@ -488,13 +488,13 @@
                                    int payload_type_fec) {
   // Validate payload types.
   if (enable_fec) {
-    DCHECK_GE(payload_type_red, 0);
-    DCHECK_GE(payload_type_fec, 0);
-    DCHECK_LE(payload_type_red, 127);
-    DCHECK_LE(payload_type_fec, 127);
+    RTC_DCHECK_GE(payload_type_red, 0);
+    RTC_DCHECK_GE(payload_type_fec, 0);
+    RTC_DCHECK_LE(payload_type_red, 127);
+    RTC_DCHECK_LE(payload_type_fec, 127);
   } else {
-    DCHECK_EQ(payload_type_red, -1);
-    DCHECK_EQ(payload_type_fec, -1);
+    RTC_DCHECK_EQ(payload_type_red, -1);
+    RTC_DCHECK_EQ(payload_type_fec, -1);
     // Set to valid uint8_ts to be castable later without signed overflows.
     payload_type_red = 0;
     payload_type_fec = 0;
@@ -707,7 +707,7 @@
 }
 
 void ViEChannel::SetTransmissionSmoothingStatus(bool enable) {
-  DCHECK(paced_sender_ && "No paced sender registered.");
+  RTC_DCHECK(paced_sender_ && "No paced sender registered.");
   paced_sender_->SetStatus(enable);
 }
 
@@ -734,7 +734,7 @@
 }
 
 int32_t ViEChannel::GetLocalSSRC(uint8_t idx, unsigned int* ssrc) {
-  DCHECK_LE(idx, rtp_rtcp_modules_.size());
+  RTC_DCHECK_LE(idx, rtp_rtcp_modules_.size());
   *ssrc = rtp_rtcp_modules_[idx]->SSRC();
   return 0;
 }
@@ -765,7 +765,7 @@
 }
 
 void ViEChannel::SetRtpStateForSsrc(uint32_t ssrc, const RtpState& rtp_state) {
-  DCHECK(!rtp_rtcp_modules_[0]->Sending());
+  RTC_DCHECK(!rtp_rtcp_modules_[0]->Sending());
   for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
     if (rtp_rtcp->SetRtpStateForSsrc(ssrc, rtp_state))
       return;
@@ -773,7 +773,7 @@
 }
 
 RtpState ViEChannel::GetRtpStateForSsrc(uint32_t ssrc) {
-  DCHECK(!rtp_rtcp_modules_[0]->Sending());
+  RTC_DCHECK(!rtp_rtcp_modules_[0]->Sending());
   RtpState rtp_state;
   for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
     if (rtp_rtcp->GetRtpStateForSsrc(ssrc, &rtp_state))
@@ -785,7 +785,7 @@
 
 // TODO(pbos): Set CNAME on all modules.
 int32_t ViEChannel::SetRTCPCName(const char* rtcp_cname) {
-  DCHECK(!rtp_rtcp_modules_[0]->Sending());
+  RTC_DCHECK(!rtp_rtcp_modules_[0]->Sending());
   return rtp_rtcp_modules_[0]->SetCNAME(rtcp_cname);
 }
 
@@ -1150,7 +1150,7 @@
     FrameCountObserver* send_frame_count_observer,
     SendSideDelayObserver* send_side_delay_observer,
     size_t num_modules) {
-  DCHECK_GT(num_modules, 0u);
+  RTC_DCHECK_GT(num_modules, 0u);
   RtpRtcp::Configuration configuration;
   ReceiveStatistics* null_receive_statistics = configuration.receive_statistics;
   configuration.id = id;
@@ -1186,7 +1186,7 @@
 }
 
 void ViEChannel::StartDecodeThread() {
-  DCHECK(!sender_);
+  RTC_DCHECK(!sender_);
   // Start the decode thread
   if (decode_thread_)
     return;
@@ -1245,14 +1245,14 @@
 }
 
 void ViEChannel::OnIncomingSSRCChanged(const int32_t id, const uint32_t ssrc) {
-  DCHECK_EQ(channel_id_, ChannelId(id));
+  RTC_DCHECK_EQ(channel_id_, ChannelId(id));
   rtp_rtcp_modules_[0]->SetRemoteSSRC(ssrc);
 }
 
 void ViEChannel::OnIncomingCSRCChanged(const int32_t id,
                                        const uint32_t CSRC,
                                        const bool added) {
-  DCHECK_EQ(channel_id_, ChannelId(id));
+  RTC_DCHECK_EQ(channel_id_, ChannelId(id));
   CriticalSectionScoped cs(crit_.get());
 }
 
diff --git a/webrtc/video_engine/vie_channel_group.cc b/webrtc/video_engine/vie_channel_group.cc
index 60db171..5c55aaa 100644
--- a/webrtc/video_engine/vie_channel_group.cc
+++ b/webrtc/video_engine/vie_channel_group.cc
@@ -180,9 +180,9 @@
   process_thread_->DeRegisterModule(call_stats_.get());
   process_thread_->DeRegisterModule(remote_bitrate_estimator_.get());
   call_stats_->DeregisterStatsObserver(remote_bitrate_estimator_.get());
-  DCHECK(channel_map_.empty());
-  DCHECK(!remb_->InUse());
-  DCHECK(vie_encoder_map_.empty());
+  RTC_DCHECK(channel_map_.empty());
+  RTC_DCHECK(!remb_->InUse());
+  RTC_DCHECK(vie_encoder_map_.empty());
 }
 
 bool ChannelGroup::CreateSendChannel(int channel_id,
@@ -190,7 +190,7 @@
                                      Transport* transport,
                                      int number_of_cores,
                                      const std::vector<uint32_t>& ssrcs) {
-  DCHECK(!ssrcs.empty());
+  RTC_DCHECK(!ssrcs.empty());
   rtc::scoped_ptr<ViEEncoder> vie_encoder(
       new ViEEncoder(channel_id, number_of_cores, *process_thread_,
                      pacer_.get(), bitrate_allocator_.get()));
@@ -303,7 +303,7 @@
 
 ViEChannel* ChannelGroup::PopChannel(int channel_id) {
   ChannelMap::iterator c_it = channel_map_.find(channel_id);
-  DCHECK(c_it != channel_map_.end());
+  RTC_DCHECK(c_it != channel_map_.end());
   ViEChannel* channel = c_it->second;
   channel_map_.erase(c_it);
 
diff --git a/webrtc/video_engine/vie_encoder.cc b/webrtc/video_engine/vie_encoder.cc
index 4dbb0f0..81ab8dc 100644
--- a/webrtc/video_engine/vie_encoder.cc
+++ b/webrtc/video_engine/vie_encoder.cc
@@ -160,7 +160,7 @@
 void ViEEncoder::StartThreadsAndSetSharedMembers(
     rtc::scoped_refptr<PayloadRouter> send_payload_router,
     VCMProtectionCallback* vcm_protection_callback) {
-  DCHECK(send_payload_router_ == NULL);
+  RTC_DCHECK(send_payload_router_ == NULL);
 
   send_payload_router_ = send_payload_router;
   vcm_->RegisterProtectionCallback(vcm_protection_callback);
@@ -254,7 +254,7 @@
 }
 
 int32_t ViEEncoder::SetEncoder(const webrtc::VideoCodec& video_codec) {
-  DCHECK(send_payload_router_ != NULL);
+  RTC_DCHECK(send_payload_router_ != NULL);
   // Setting target width and height for VPM.
   if (vpm_->SetTargetResolution(video_codec.width, video_codec.height,
                                 video_codec.maxFramerate) != VPM_OK) {
@@ -414,7 +414,7 @@
 }
 
 void ViEEncoder::DeliverFrame(VideoFrame video_frame) {
-  DCHECK(send_payload_router_ != NULL);
+  RTC_DCHECK(send_payload_router_ != NULL);
   if (!send_payload_router_->active()) {
     // We've paused or we have no channels attached, don't waste resources on
     // encoding.
@@ -519,7 +519,7 @@
 }
 
 int32_t ViEEncoder::UpdateProtectionMethod(bool nack, bool fec) {
-  DCHECK(send_payload_router_ != NULL);
+  RTC_DCHECK(send_payload_router_ != NULL);
 
   if (fec_enabled_ == fec && nack_enabled_ == nack) {
     // No change needed, we're already in correct state.
@@ -587,7 +587,7 @@
     const EncodedImage& encoded_image,
     const webrtc::RTPFragmentationHeader& fragmentation_header,
     const RTPVideoHeader* rtp_video_hdr) {
-  DCHECK(send_payload_router_ != NULL);
+  RTC_DCHECK(send_payload_router_ != NULL);
 
   {
     CriticalSectionScoped cs(data_cs_.get());
@@ -723,7 +723,7 @@
   LOG(LS_VERBOSE) << "OnNetworkChanged, bitrate" << bitrate_bps
                   << " packet loss " << static_cast<int>(fraction_lost)
                   << " rtt " << round_trip_time_ms;
-  DCHECK(send_payload_router_ != NULL);
+  RTC_DCHECK(send_payload_router_ != NULL);
   vcm_->SetChannelParameters(bitrate_bps, fraction_lost, round_trip_time_ms);
   bool video_is_suspended = vcm_->VideoSuspended();
 
diff --git a/webrtc/video_frame.h b/webrtc/video_frame.h
index d70a746..b71e0aa 100644
--- a/webrtc/video_frame.h
+++ b/webrtc/video_frame.h
@@ -27,7 +27,7 @@
              VideoRotation rotation);
 
   // TODO(pbos): Make all create/copy functions void, they should not be able to
-  // fail (which should be DCHECK/CHECKed instead).
+  // fail (which should be RTC_DCHECK/CHECKed instead).
 
   // CreateEmptyFrame: Sets frame dimensions and allocates buffers based
   // on set dimensions - height and plane stride.
diff --git a/webrtc/voice_engine/test/auto_test/fakes/loudest_filter.cc b/webrtc/voice_engine/test/auto_test/fakes/loudest_filter.cc
index 29dda63..9d7239e 100644
--- a/webrtc/voice_engine/test/auto_test/fakes/loudest_filter.cc
+++ b/webrtc/voice_engine/test/auto_test/fakes/loudest_filter.cc
@@ -68,7 +68,7 @@
   }
 
   unsigned int quietest_ssrc = FindQuietestStream();
-  CHECK_NE(0u, quietest_ssrc);
+  RTC_CHECK_NE(0u, quietest_ssrc);
   // A smaller value if audio level corresponds to a louder sound.
   if (audio_level < stream_levels_[quietest_ssrc].audio_level) {
     stream_levels_.erase(quietest_ssrc);
diff --git a/webrtc/voice_engine/voe_network_impl.cc b/webrtc/voice_engine/voe_network_impl.cc
index 17e0664..2ff6b6a 100644
--- a/webrtc/voice_engine/voe_network_impl.cc
+++ b/webrtc/voice_engine/voe_network_impl.cc
@@ -37,7 +37,7 @@
 
 int VoENetworkImpl::RegisterExternalTransport(int channel,
                                               Transport& transport) {
-  DCHECK(_shared->statistics().Initialized());
+  RTC_DCHECK(_shared->statistics().Initialized());
   voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
   voe::Channel* channelPtr = ch.channel();
   if (!channelPtr) {
@@ -48,7 +48,7 @@
 }
 
 int VoENetworkImpl::DeRegisterExternalTransport(int channel) {
-  CHECK(_shared->statistics().Initialized());
+  RTC_CHECK(_shared->statistics().Initialized());
   voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
   voe::Channel* channelPtr = ch.channel();
   if (!channelPtr) {
@@ -68,8 +68,8 @@
                                       const void* data,
                                       size_t length,
                                       const PacketTime& packet_time) {
-  CHECK(_shared->statistics().Initialized());
-  CHECK(data);
+  RTC_CHECK(_shared->statistics().Initialized());
+  RTC_CHECK(data);
   // L16 at 32 kHz, stereo, 10 ms frames (+12 byte RTP header) -> 1292 bytes
   if ((length < 12) || (length > 1292)) {
     LOG_F(LS_ERROR) << "Invalid packet length: " << length;
@@ -92,8 +92,8 @@
 int VoENetworkImpl::ReceivedRTCPPacket(int channel,
                                        const void* data,
                                        size_t length) {
-  CHECK(_shared->statistics().Initialized());
-  CHECK(data);
+  RTC_CHECK(_shared->statistics().Initialized());
+  RTC_CHECK(data);
   if (length < 4) {
     LOG_F(LS_ERROR) << "Invalid packet length: " << length;
     return -1;