Adding a receive side API for buffering mode.
At the same time, renaming the send side API.

Review URL: https://webrtc-codereview.appspot.com/1104004

git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@3525 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/modules/video_coding/main/interface/video_coding.h b/modules/video_coding/main/interface/video_coding.h
index 77bd9ce..1593fb8 100644
--- a/modules/video_coding/main/interface/video_coding.h
+++ b/modules/video_coding/main/interface/video_coding.h
@@ -553,6 +553,10 @@
     virtual void SetNackSettings(size_t max_nack_list_size,
                                  int max_packet_age_to_nack) = 0;
 
+    // Setting a desired delay to the VCM receiver. Video rendering will be
+    // delayed by at least desired_delay_ms.
+    virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
+
     // Enables recording of debugging information.
     virtual int StartDebugRecording(const char* file_name_utf8) = 0;
 
diff --git a/modules/video_coding/main/source/jitter_buffer.cc b/modules/video_coding/main/source/jitter_buffer.cc
index d8aefe8..8257a53 100644
--- a/modules/video_coding/main/source/jitter_buffer.cc
+++ b/modules/video_coding/main/source/jitter_buffer.cc
@@ -772,10 +772,9 @@
   return ret;
 }
 
-void VCMJitterBuffer::EnableMaxJitterEstimate(bool enable,
-                                              uint32_t initial_delay_ms) {
+void VCMJitterBuffer::SetMaxJitterEstimate(uint32_t initial_delay_ms) {
   CriticalSectionScoped cs(crit_sect_);
-  jitter_estimate_.EnableMaxJitterEstimate(enable, initial_delay_ms);
+  jitter_estimate_.SetMaxJitterEstimate(initial_delay_ms);
 }
 
 uint32_t VCMJitterBuffer::EstimatedJitterMs() {
diff --git a/modules/video_coding/main/source/jitter_buffer.h b/modules/video_coding/main/source/jitter_buffer.h
index e0b7c47..82f490f 100644
--- a/modules/video_coding/main/source/jitter_buffer.h
+++ b/modules/video_coding/main/source/jitter_buffer.h
@@ -127,10 +127,10 @@
   VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame,
                                   const VCMPacket& packet);
 
-  // Enable a max filter on the jitter estimate, and setting of the initial
-  // delay (only when in max mode). When disabled (default), the last jitter
+  // Enable a max filter on the jitter estimate by setting an initial
+  // non-zero delay. When set to zero (default), the last jitter
   // estimate will be used.
-  void EnableMaxJitterEstimate(bool enable, uint32_t initial_delay_ms);
+  void SetMaxJitterEstimate(uint32_t initial_delay_ms);
 
   // Returns the estimated jitter in milliseconds.
   uint32_t EstimatedJitterMs();
diff --git a/modules/video_coding/main/source/jitter_buffer_common.h b/modules/video_coding/main/source/jitter_buffer_common.h
index 2bfbd60..c981e0e 100644
--- a/modules/video_coding/main/source/jitter_buffer_common.h
+++ b/modules/video_coding/main/source/jitter_buffer_common.h
@@ -15,7 +15,7 @@
 
 namespace webrtc {
 
-enum { kMaxNumberOfFrames     = 100 };
+enum { kMaxNumberOfFrames     = 300 };
 enum { kStartNumberOfFrames   = 6 };
 enum { kMaxVideoDelayMs       = 2000 };
 
diff --git a/modules/video_coding/main/source/jitter_buffer_unittest.cc b/modules/video_coding/main/source/jitter_buffer_unittest.cc
index 3d2ad0c..5aea512 100644
--- a/modules/video_coding/main/source/jitter_buffer_unittest.cc
+++ b/modules/video_coding/main/source/jitter_buffer_unittest.cc
@@ -277,25 +277,15 @@
   InsertFrame(kVideoFrameDelta);
   EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
   // Set kMaxEstimate with a 2 seconds initial delay.
-  jitter_buffer_->EnableMaxJitterEstimate(true, 2000u);
+  jitter_buffer_->SetMaxJitterEstimate(2000u);
   EXPECT_EQ(2000u, jitter_buffer_->EstimatedJitterMs());
   InsertFrame(kVideoFrameDelta);
   EXPECT_EQ(2000u, jitter_buffer_->EstimatedJitterMs());
-  // Set kMaxEstimate with a 0S initial delay.
-  jitter_buffer_->EnableMaxJitterEstimate(true, 0u);
-  EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
   // Jitter cannot decrease.
   InsertFrames(2, kVideoFrameDelta);
   uint32_t je1 = jitter_buffer_->EstimatedJitterMs();
   InsertFrames(2, kVideoFrameDelta);
   EXPECT_GE(je1, jitter_buffer_->EstimatedJitterMs());
-
-  // Set kLastEstimate mode (initial delay is arbitrary in this case and will
-  // be ignored).
-  jitter_buffer_->EnableMaxJitterEstimate(false, 2000u);
-  EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
-  InsertFrames(10, kVideoFrameDelta);
-  EXPECT_GT(20u, jitter_buffer_->EstimatedJitterMs());
 }
 
 TEST_F(TestJitterBufferNack, TestEmptyPackets) {
diff --git a/modules/video_coding/main/source/jitter_estimator.cc b/modules/video_coding/main/source/jitter_estimator.cc
index 68a60da..3c82575 100644
--- a/modules/video_coding/main/source/jitter_estimator.cc
+++ b/modules/video_coding/main/source/jitter_estimator.cc
@@ -409,10 +409,9 @@
     }
 }
 
-void VCMJitterEstimator::EnableMaxJitterEstimate(bool enable,
-                                              uint32_t initial_delay_ms)
+void VCMJitterEstimator::SetMaxJitterEstimate(uint32_t initial_delay_ms)
 {
-    if (enable) {
+    if (initial_delay_ms > 0) {
         _maxJitterEstimateMs = initial_delay_ms;
         _jitterEstimateMode = kMaxEstimate;
     } else {
diff --git a/modules/video_coding/main/source/jitter_estimator.h b/modules/video_coding/main/source/jitter_estimator.h
index 44a3455..77d6b6d 100644
--- a/modules/video_coding/main/source/jitter_estimator.h
+++ b/modules/video_coding/main/source/jitter_estimator.h
@@ -64,10 +64,10 @@
 
     void UpdateMaxFrameSize(WebRtc_UWord32 frameSizeBytes);
 
-    // Enable a max filter on the jitter estimate, and setting of the initial
-    // delay (only when in max mode). When disabled (default), the last jitter
+    // Set a max filter on the jitter estimate by setting an initial
+    // non-zero delay. When set to zero (default), the last jitter
     // estimate will be used.
-    void EnableMaxJitterEstimate(bool enable, uint32_t initial_delay_ms);
+    void SetMaxJitterEstimate(uint32_t initial_delay_ms);
 
     // A constant describing the delay from the jitter buffer
     // to the delay on the receiving side which is not accounted
diff --git a/modules/video_coding/main/source/receiver.cc b/modules/video_coding/main/source/receiver.cc
index fc5357f..7835366 100644
--- a/modules/video_coding/main/source/receiver.cc
+++ b/modules/video_coding/main/source/receiver.cc
@@ -21,6 +21,8 @@
 
 namespace webrtc {
 
+enum { kMaxReceiverDelayMs = 10000 };
+
 VCMReceiver::VCMReceiver(VCMTiming* timing,
                          Clock* clock,
                          int32_t vcm_id,
@@ -34,7 +36,8 @@
       jitter_buffer_(clock_, vcm_id, receiver_id, master),
       timing_(timing),
       render_wait_event_(),
-      state_(kPassive) {}
+      state_(kPassive),
+      max_video_delay_ms_(kMaxVideoDelayMs) {}
 
 VCMReceiver::~VCMReceiver() {
   render_wait_event_.Set();
@@ -108,20 +111,21 @@
       jitter_buffer_.Flush();
       timing_->Reset(clock_->TimeInMilliseconds());
       return VCM_FLUSH_INDICATOR;
-    } else if (render_time_ms < now_ms - kMaxVideoDelayMs) {
+    } else if (render_time_ms < now_ms - max_video_delay_ms_) {
       WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
                    VCMId(vcm_id_, receiver_id_),
                    "This frame should have been rendered more than %u ms ago."
                    "Flushing jitter buffer and resetting timing.",
-                   kMaxVideoDelayMs);
+                   max_video_delay_ms_);
       jitter_buffer_.Flush();
       timing_->Reset(clock_->TimeInMilliseconds());
       return VCM_FLUSH_INDICATOR;
-    } else if (timing_->TargetVideoDelay() > kMaxVideoDelayMs) {
+    } else if (static_cast<int>(timing_->TargetVideoDelay()) >
+               max_video_delay_ms_) {
       WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
                    VCMId(vcm_id_, receiver_id_),
                    "More than %u ms target delay. Flushing jitter buffer and"
-                   "resetting timing.", kMaxVideoDelayMs);
+                   "resetting timing.", max_video_delay_ms_);
       jitter_buffer_.Flush();
       timing_->Reset(clock_->TimeInMilliseconds());
       return VCM_FLUSH_INDICATOR;
@@ -402,6 +406,17 @@
   return state_;
 }
 
+int VCMReceiver::SetMinReceiverDelay(int desired_delay_ms) {
+  CriticalSectionScoped cs(crit_sect_);
+  if (desired_delay_ms < 0 || desired_delay_ms > kMaxReceiverDelayMs) {
+    return -1;
+  }
+  jitter_buffer_.SetMaxJitterEstimate(desired_delay_ms);
+  max_video_delay_ms_ = desired_delay_ms + kMaxVideoDelayMs;
+  timing_->SetMaxVideoDelay(max_video_delay_ms_);
+  return 0;
+}
+
 void VCMReceiver::UpdateState(VCMReceiverState new_state) {
   CriticalSectionScoped cs(crit_sect_);
   assert(!(state_ == kPassive && new_state == kWaitForPrimaryDecode));
diff --git a/modules/video_coding/main/source/receiver.h b/modules/video_coding/main/source/receiver.h
index 492d616..f790fd2 100644
--- a/modules/video_coding/main/source/receiver.h
+++ b/modules/video_coding/main/source/receiver.h
@@ -69,6 +69,9 @@
                            VCMReceiver& dual_receiver) const;
   VCMReceiverState State() const;
 
+  // Receiver video delay.
+  int SetMinReceiverDelay(int desired_delay_ms);
+
  private:
   VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
                                     int64_t nextrender_time_ms,
@@ -90,6 +93,7 @@
   VCMTiming* timing_;
   VCMEvent render_wait_event_;
   VCMReceiverState state_;
+  int max_video_delay_ms_;
 
   static int32_t receiver_id_counter_;
 };
diff --git a/modules/video_coding/main/source/timing.cc b/modules/video_coding/main/source/timing.cc
index 36131b1..26bda7e 100644
--- a/modules/video_coding/main/source/timing.cc
+++ b/modules/video_coding/main/source/timing.cc
@@ -34,7 +34,8 @@
 _minTotalDelayMs(0),
 _requiredDelayMs(0),
 _currentDelayMs(0),
-_prevFrameTimestamp(0)
+_prevFrameTimestamp(0),
+_maxVideoDelayMs(kMaxVideoDelayMs)
 {
     if (masterTiming == NULL)
     {
@@ -131,7 +132,7 @@
         WebRtc_Word64 delayDiffMs = static_cast<WebRtc_Word64>(targetDelayMs) -
                                     _currentDelayMs;
         // Never change the delay with more than 100 ms every second. If we're changing the
-        // delay in too large steps we will get noticable freezes. By limiting the change we
+        // delay in too large steps we will get noticeable freezes. By limiting the change we
         // can increase the delay in smaller steps, which will be experienced as the video is
         // played in slow motion. When lowering the delay the video will be played at a faster
         // pace.
@@ -249,7 +250,7 @@
 {
     WebRtc_Word64 estimatedCompleteTimeMs =
             _tsExtrapolator->ExtrapolateLocalTime(frameTimestamp);
-    if (estimatedCompleteTimeMs - nowMs > kMaxVideoDelayMs)
+    if (estimatedCompleteTimeMs - nowMs > _maxVideoDelayMs)
     {
         if (_master)
         {
@@ -323,6 +324,12 @@
     return static_cast<WebRtc_Word32>(availableProcessingTimeMs) - maxDecodeTimeMs > 0;
 }
 
+void VCMTiming::SetMaxVideoDelay(int maxVideoDelayMs)
+{
+    CriticalSectionScoped cs(_critSect);
+    _maxVideoDelayMs = maxVideoDelayMs;
+}
+
 WebRtc_UWord32
 VCMTiming::TargetVideoDelay() const
 {
diff --git a/modules/video_coding/main/source/timing.h b/modules/video_coding/main/source/timing.h
index ac650ec..d1d9cac 100644
--- a/modules/video_coding/main/source/timing.h
+++ b/modules/video_coding/main/source/timing.h
@@ -82,6 +82,9 @@
     // certain amount of processing time.
     bool EnoughTimeToDecode(WebRtc_UWord32 availableProcessingTimeMs) const;
 
+    // Set the max allowed video delay.
+    void SetMaxVideoDelay(int maxVideoDelayMs);
+
     enum { kDefaultRenderDelayMs = 10 };
     enum { kDelayMaxChangeMsPerS = 100 };
 
@@ -104,6 +107,7 @@
     WebRtc_UWord32                _requiredDelayMs;
     WebRtc_UWord32                _currentDelayMs;
     WebRtc_UWord32                _prevFrameTimestamp;
+    int                           _maxVideoDelayMs;
 };
 
 } // namespace webrtc
diff --git a/modules/video_coding/main/source/video_coding_impl.cc b/modules/video_coding/main/source/video_coding_impl.cc
index 25e6c5f..0fb82bb 100644
--- a/modules/video_coding/main/source/video_coding_impl.cc
+++ b/modules/video_coding/main/source/video_coding_impl.cc
@@ -1389,6 +1389,10 @@
                                 max_packet_age_to_nack);
 }
 
+int VideoCodingModuleImpl::SetMinReceiverDelay(int desired_delay_ms) {
+  return _receiver.SetMinReceiverDelay(desired_delay_ms);
+}
+
 int VideoCodingModuleImpl::StartDebugRecording(const char* file_name_utf8) {
   CriticalSectionScoped cs(_sendCritSect);
   _encoderInputFile = fopen(file_name_utf8, "wb");
diff --git a/modules/video_coding/main/source/video_coding_impl.h b/modules/video_coding/main/source/video_coding_impl.h
index 24a1f83..e27a922 100644
--- a/modules/video_coding/main/source/video_coding_impl.h
+++ b/modules/video_coding/main/source/video_coding_impl.h
@@ -262,6 +262,9 @@
     virtual void SetNackSettings(size_t max_nack_list_size,
                                  int max_packet_age_to_nack);
 
+    // Set the video delay for the receiver (default = 0).
+    virtual int SetMinReceiverDelay(int desired_delay_ms);
+
     // Enables recording of debugging information.
     virtual int StartDebugRecording(const char* file_name_utf8);
 
diff --git a/modules/video_coding/main/source/video_coding_impl_unittest.cc b/modules/video_coding/main/source/video_coding_impl_unittest.cc
index 14878e5..576ff17 100644
--- a/modules/video_coding/main/source/video_coding_impl_unittest.cc
+++ b/modules/video_coding/main/source/video_coding_impl_unittest.cc
@@ -287,4 +287,11 @@
   }
 }
 
+TEST_F(TestVideoCodingModule, ReceiverDelay) {
+  EXPECT_EQ(0, vcm_->SetMinReceiverDelay(0));
+  EXPECT_EQ(0, vcm_->SetMinReceiverDelay(5000));
+  EXPECT_EQ(-1, vcm_->SetMinReceiverDelay(-100));
+  EXPECT_EQ(-1, vcm_->SetMinReceiverDelay(10010));
+}
+
 }  // namespace webrtc
diff --git a/video_engine/include/vie_rtp_rtcp.h b/video_engine/include/vie_rtp_rtcp.h
index 88c04e0..a178ea1 100644
--- a/video_engine/include/vie_rtp_rtcp.h
+++ b/video_engine/include/vie_rtp_rtcp.h
@@ -199,11 +199,15 @@
                                      const unsigned char payload_typeRED,
                                      const unsigned char payload_typeFEC) = 0;
 
-  // Enables send side support for delayed video streaming (actual delay will
+  // Sets send side support for delayed video buffering (actual delay will
   // be exhibited on the receiver side).
   // Target delay should be set to zero for real-time mode.
-  virtual int EnableSenderStreamingMode(int video_channel,
-                                        int target_delay_ms) = 0;
+  virtual int SetSenderBufferingMode(int video_channel,
+                                     int target_delay_ms) = 0;
+  // Sets receive side support for delayed video buffering. Target delay should
+  // be set to zero for real-time mode.
+  virtual int SetReceiverBufferingMode(int video_channel,
+                                       int target_delay_ms) = 0;
 
   // This function enables RTCP key frame requests.
   virtual int SetKeyFrameRequestMethod(
diff --git a/video_engine/stream_synchronization.cc b/video_engine/stream_synchronization.cc
index 11caf3d..a7e3b25 100644
--- a/video_engine/stream_synchronization.cc
+++ b/video_engine/stream_synchronization.cc
@@ -20,7 +20,7 @@
 
 const int kMaxVideoDiffMs = 80;
 const int kMaxAudioDiffMs = 80;
-const int kMaxDelay = 1500;
+const int kMaxDeltaDelayMs = 1500;
 
 struct ViESyncDelay {
   ViESyncDelay() {
@@ -42,7 +42,8 @@
                                              int video_channel_id)
     : channel_delay_(new ViESyncDelay),
       audio_channel_id_(audio_channel_id),
-      video_channel_id_(video_channel_id) {}
+      video_channel_id_(video_channel_id),
+      base_target_delay_ms_(0) {}
 
 StreamSynchronization::~StreamSynchronization() {
   delete channel_delay_;
@@ -76,7 +77,8 @@
   *relative_delay_ms = video_measurement.latest_receive_time_ms -
       audio_measurement.latest_receive_time_ms -
       (video_last_capture_time_ms - audio_last_capture_time_ms);
-  if (*relative_delay_ms > 1000 || *relative_delay_ms < -1000) {
+  if (*relative_delay_ms > kMaxDeltaDelayMs ||
+      *relative_delay_ms < -kMaxDeltaDelayMs) {
     return false;
   }
   return true;
@@ -98,11 +100,10 @@
   WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideo, video_channel_id_,
                "Current diff is: %d for audio channel: %d",
                relative_delay_ms, audio_channel_id_);
-
   int current_diff_ms = *total_video_delay_target_ms - current_audio_delay_ms +
       relative_delay_ms;
 
-  int video_delay_ms = 0;
+  int video_delay_ms = base_target_delay_ms_;
   if (current_diff_ms > 0) {
     // The minimum video delay is longer than the current audio delay.
     // We need to decrease extra video delay, if we have added extra delay
@@ -126,7 +127,7 @@
       }
       channel_delay_->last_video_delay_ms = video_delay_ms;
       channel_delay_->last_sync_delay = -1;
-      channel_delay_->extra_audio_delay_ms = 0;
+      channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
     } else {  // channel_delay_->extra_video_delay_ms > 0
       // We have no extra video delay to remove, increase the audio delay.
       if (channel_delay_->last_sync_delay >= 0) {
@@ -137,12 +138,14 @@
           // due to NetEQ maximum changes.
           audio_diff_ms = kMaxAudioDiffMs;
         }
-        // Increase the audio delay
+        // Increase the audio delay.
         channel_delay_->extra_audio_delay_ms += audio_diff_ms;
 
         // Don't set a too high delay.
-        if (channel_delay_->extra_audio_delay_ms > kMaxDelay) {
-          channel_delay_->extra_audio_delay_ms = kMaxDelay;
+        if (channel_delay_->extra_audio_delay_ms >
+            base_target_delay_ms_ + kMaxDeltaDelayMs) {
+          channel_delay_->extra_audio_delay_ms =
+              base_target_delay_ms_ + kMaxDeltaDelayMs;
         }
 
         // Don't add any extra video delay.
@@ -153,7 +156,7 @@
       } else {  // channel_delay_->last_sync_delay >= 0
         // First time after a delay change, don't add any extra delay.
         // This is to not toggle back and forth too much.
-        channel_delay_->extra_audio_delay_ms = 0;
+        channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
         // Set minimum video delay
         video_delay_ms = *total_video_delay_target_ms;
         channel_delay_->extra_video_delay_ms = 0;
@@ -161,14 +164,13 @@
         channel_delay_->last_sync_delay = 0;
       }
     }
-  } else {  // if (current_diffMS > 0)
+  } else {  // if (current_diff_ms > 0)
     // The minimum video delay is lower than the current audio delay.
     // We need to decrease possible extra audio delay, or
     // add extra video delay.
-
-    if (channel_delay_->extra_audio_delay_ms > 0) {
-      // We have extra delay in VoiceEngine
-      // Start with decreasing the voice delay
+    if (channel_delay_->extra_audio_delay_ms > base_target_delay_ms_) {
+      // We have extra delay in VoiceEngine.
+      // Start with decreasing the voice delay.
       int audio_diff_ms = current_diff_ms / 2;
       if (audio_diff_ms < -1 * kMaxAudioDiffMs) {
         // Don't change the delay too much at once.
@@ -179,7 +181,7 @@
 
       if (channel_delay_->extra_audio_delay_ms < 0) {
         // Negative values not allowed.
-        channel_delay_->extra_audio_delay_ms = 0;
+        channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
         channel_delay_->last_sync_delay = 0;
       } else {
         // There is more audio delay to use for the next round.
@@ -192,7 +194,7 @@
       channel_delay_->last_video_delay_ms = video_delay_ms;
     } else {  // channel_delay_->extra_audio_delay_ms > 0
       // We have no extra delay in VoiceEngine, increase the video delay.
-      channel_delay_->extra_audio_delay_ms = 0;
+      channel_delay_->extra_audio_delay_ms = base_target_delay_ms_;
 
       // Make the difference positive.
       int video_diff_ms = -1 * current_diff_ms;
@@ -202,27 +204,27 @@
       if (video_delay_ms > channel_delay_->last_video_delay_ms) {
         if (video_delay_ms >
             channel_delay_->last_video_delay_ms + kMaxVideoDiffMs) {
-          // Don't increase the delay too much at once
+          // Don't increase the delay too much at once.
           video_delay_ms =
               channel_delay_->last_video_delay_ms + kMaxVideoDiffMs;
         }
-        // Verify we don't go above the maximum allowed delay
-        if (video_delay_ms > kMaxDelay) {
-          video_delay_ms = kMaxDelay;
+        // Verify we don't go above the maximum allowed delay.
+        if (video_delay_ms > base_target_delay_ms_ + kMaxDeltaDelayMs) {
+          video_delay_ms = base_target_delay_ms_ + kMaxDeltaDelayMs;
         }
       } else {
         if (video_delay_ms <
             channel_delay_->last_video_delay_ms - kMaxVideoDiffMs) {
-          // Don't decrease the delay too much at once
+          // Don't decrease the delay too much at once.
           video_delay_ms =
               channel_delay_->last_video_delay_ms - kMaxVideoDiffMs;
         }
-        // Verify we don't go below the minimum delay
+        // Verify we don't go below the minimum delay.
         if (video_delay_ms < *total_video_delay_target_ms) {
           video_delay_ms = *total_video_delay_target_ms;
         }
       }
-      // Store the values
+      // Store the values.
       channel_delay_->extra_video_delay_ms =
           video_delay_ms - *total_video_delay_target_ms;
       channel_delay_->last_video_delay_ms = video_delay_ms;
@@ -245,4 +247,15 @@
       *total_video_delay_target_ms : video_delay_ms;
   return true;
 }
+
+void StreamSynchronization::SetTargetBufferingDelay(int target_delay_ms) {
+  // Video is already delayed by the desired amount.
+  base_target_delay_ms_ = target_delay_ms;
+  // Setting initial extra delay for audio.
+  channel_delay_->extra_audio_delay_ms += target_delay_ms;
+  // The video delay is compared to the last value (and how much we can updated
+  // is limited by that as well).
+  channel_delay_->last_video_delay_ms += target_delay_ms;
+}
+
 }  // namespace webrtc
diff --git a/video_engine/stream_synchronization.h b/video_engine/stream_synchronization.h
index 25a370c..9b7780c 100644
--- a/video_engine/stream_synchronization.h
+++ b/video_engine/stream_synchronization.h
@@ -43,11 +43,15 @@
   static bool ComputeRelativeDelay(const Measurements& audio_measurement,
                                    const Measurements& video_measurement,
                                    int* relative_delay_ms);
+  // Set target buffering delay - All audio and video will be delayed by at
+  // least target_delay_ms.
+  void SetTargetBufferingDelay(int target_delay_ms);
 
  private:
   ViESyncDelay* channel_delay_;
   int audio_channel_id_;
   int video_channel_id_;
+  int base_target_delay_ms_;
 };
 }  // namespace webrtc
 
diff --git a/video_engine/stream_synchronization_unittest.cc b/video_engine/stream_synchronization_unittest.cc
index f693b75..49629f5 100644
--- a/video_engine/stream_synchronization_unittest.cc
+++ b/video_engine/stream_synchronization_unittest.cc
@@ -120,9 +120,9 @@
 
     // Capture an audio and a video frame at the same time.
     audio.latest_timestamp = send_time_->NowRtp(audio_frequency,
-                                                      audio_offset);
+                                                audio_offset);
     video.latest_timestamp = send_time_->NowRtp(video_frequency,
-                                                      video_offset);
+                                                video_offset);
 
     if (audio_delay_ms > video_delay_ms) {
       // Audio later than video.
@@ -154,56 +154,57 @@
   // TODO(holmer): This is currently wrong! We should simply change
   // audio_delay_ms or video_delay_ms since those now include VCM and NetEQ
   // delays.
-  void BothDelayedAudioLaterTest() {
-    int current_audio_delay_ms = 0;
-    int audio_delay_ms = 300;
-    int video_delay_ms = 100;
+  void BothDelayedAudioLaterTest(int base_target_delay) {
+    int current_audio_delay_ms = base_target_delay;
+    int audio_delay_ms = base_target_delay + 300;
+    int video_delay_ms = base_target_delay + 100;
     int extra_audio_delay_ms = 0;
-    int total_video_delay_ms = 0;
+    int total_video_delay_ms = base_target_delay;
 
     EXPECT_TRUE(DelayedStreams(audio_delay_ms,
                                video_delay_ms,
                                current_audio_delay_ms,
                                &extra_audio_delay_ms,
                                &total_video_delay_ms));
-    EXPECT_EQ(kMaxVideoDiffMs, total_video_delay_ms);
-    EXPECT_EQ(0, extra_audio_delay_ms);
+    EXPECT_EQ(base_target_delay + kMaxVideoDiffMs, total_video_delay_ms);
+    EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
     current_audio_delay_ms = extra_audio_delay_ms;
 
     send_time_->IncreaseTimeMs(1000);
     receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
                                                   video_delay_ms));
-    // Simulate 0 minimum delay in the VCM.
-    total_video_delay_ms = 0;
+    // Simulate base_target_delay minimum delay in the VCM.
+    total_video_delay_ms = base_target_delay;
     EXPECT_TRUE(DelayedStreams(audio_delay_ms,
                                video_delay_ms,
                                current_audio_delay_ms,
                                &extra_audio_delay_ms,
                                &total_video_delay_ms));
-    EXPECT_EQ(2 * kMaxVideoDiffMs, total_video_delay_ms);
-    EXPECT_EQ(0, extra_audio_delay_ms);
+    EXPECT_EQ(base_target_delay + 2 * kMaxVideoDiffMs, total_video_delay_ms);
+    EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
     current_audio_delay_ms = extra_audio_delay_ms;
 
     send_time_->IncreaseTimeMs(1000);
     receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
                                                   video_delay_ms));
-    // Simulate 0 minimum delay in the VCM.
-    total_video_delay_ms = 0;
+    // Simulate base_target_delay minimum delay in the VCM.
+    total_video_delay_ms = base_target_delay;
     EXPECT_TRUE(DelayedStreams(audio_delay_ms,
                                video_delay_ms,
                                current_audio_delay_ms,
                                &extra_audio_delay_ms,
                                &total_video_delay_ms));
-    EXPECT_EQ(audio_delay_ms - video_delay_ms, total_video_delay_ms);
-    EXPECT_EQ(0, extra_audio_delay_ms);
+    EXPECT_EQ(base_target_delay + audio_delay_ms - video_delay_ms,
+              total_video_delay_ms);
+    EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
 
     // Simulate that NetEQ introduces some audio delay.
-    current_audio_delay_ms = 50;
+    current_audio_delay_ms = base_target_delay + 50;
     send_time_->IncreaseTimeMs(1000);
     receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
                                                   video_delay_ms));
-    // Simulate 0 minimum delay in the VCM.
-    total_video_delay_ms = 0;
+    // Simulate base_target_delay minimum delay in the VCM.
+    total_video_delay_ms = base_target_delay;
     EXPECT_TRUE(DelayedStreams(audio_delay_ms,
                                video_delay_ms,
                                current_audio_delay_ms,
@@ -211,15 +212,15 @@
                                &total_video_delay_ms));
     EXPECT_EQ(audio_delay_ms - video_delay_ms + current_audio_delay_ms,
               total_video_delay_ms);
-    EXPECT_EQ(0, extra_audio_delay_ms);
+    EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
 
     // Simulate that NetEQ reduces its delay.
-    current_audio_delay_ms = 10;
+    current_audio_delay_ms = base_target_delay + 10;
     send_time_->IncreaseTimeMs(1000);
     receive_time_->IncreaseTimeMs(1000 - std::max(audio_delay_ms,
                                                   video_delay_ms));
-    // Simulate 0 minimum delay in the VCM.
-    total_video_delay_ms = 0;
+    // Simulate base_target_delay minimum delay in the VCM.
+    total_video_delay_ms = base_target_delay;
     EXPECT_TRUE(DelayedStreams(audio_delay_ms,
                                video_delay_ms,
                                current_audio_delay_ms,
@@ -227,12 +228,100 @@
                                &total_video_delay_ms));
     EXPECT_EQ(audio_delay_ms - video_delay_ms + current_audio_delay_ms,
               total_video_delay_ms);
-    EXPECT_EQ(0, extra_audio_delay_ms);
+    EXPECT_EQ(base_target_delay, extra_audio_delay_ms);
+  }
+
+  void BothDelayedVideoLaterTest(int base_target_delay) {
+    int current_audio_delay_ms = base_target_delay;
+    int audio_delay_ms = base_target_delay + 100;
+    int video_delay_ms = base_target_delay + 300;
+    int extra_audio_delay_ms = 0;
+    int total_video_delay_ms = base_target_delay;
+
+    EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+                               video_delay_ms,
+                               current_audio_delay_ms,
+                               &extra_audio_delay_ms,
+                               &total_video_delay_ms));
+    EXPECT_EQ(base_target_delay, total_video_delay_ms);
+    // The audio delay is not allowed to change more than this in 1 second.
+    EXPECT_EQ(base_target_delay + kMaxAudioDiffMs, extra_audio_delay_ms);
+    current_audio_delay_ms = extra_audio_delay_ms;
+    int current_extra_delay_ms = extra_audio_delay_ms;
+
+    send_time_->IncreaseTimeMs(1000);
+    receive_time_->IncreaseTimeMs(800);
+    EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+                               video_delay_ms,
+                               current_audio_delay_ms,
+                               &extra_audio_delay_ms,
+                               &total_video_delay_ms));
+    EXPECT_EQ(base_target_delay, total_video_delay_ms);
+    // The audio delay is not allowed to change more than the half of the
+    // required change in delay.
+    EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+        current_audio_delay_ms,
+        base_target_delay + video_delay_ms - audio_delay_ms),
+        extra_audio_delay_ms);
+    current_audio_delay_ms = extra_audio_delay_ms;
+    current_extra_delay_ms = extra_audio_delay_ms;
+
+    send_time_->IncreaseTimeMs(1000);
+    receive_time_->IncreaseTimeMs(800);
+    EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+                               video_delay_ms,
+                               current_audio_delay_ms,
+                               &extra_audio_delay_ms,
+                               &total_video_delay_ms));
+    EXPECT_EQ(base_target_delay, total_video_delay_ms);
+    // The audio delay is not allowed to change more than the half of the
+    // required change in delay.
+    EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+        current_audio_delay_ms,
+        base_target_delay + video_delay_ms - audio_delay_ms),
+        extra_audio_delay_ms);
+    current_extra_delay_ms = extra_audio_delay_ms;
+
+    // Simulate that NetEQ for some reason reduced the delay.
+    current_audio_delay_ms = base_target_delay + 170;
+    send_time_->IncreaseTimeMs(1000);
+    receive_time_->IncreaseTimeMs(800);
+    EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+                               video_delay_ms,
+                               current_audio_delay_ms,
+                               &extra_audio_delay_ms,
+                               &total_video_delay_ms));
+    EXPECT_EQ(base_target_delay, total_video_delay_ms);
+    // Since we only can ask NetEQ for a certain amount of extra delay, and
+    // we only measure the total NetEQ delay, we will ask for additional delay
+    // here to try to stay in sync.
+    EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+        current_audio_delay_ms,
+        base_target_delay + video_delay_ms - audio_delay_ms),
+        extra_audio_delay_ms);
+    current_extra_delay_ms = extra_audio_delay_ms;
+
+    // Simulate that NetEQ for some reason significantly increased the delay.
+    current_audio_delay_ms = base_target_delay + 250;
+    send_time_->IncreaseTimeMs(1000);
+    receive_time_->IncreaseTimeMs(800);
+    EXPECT_TRUE(DelayedStreams(audio_delay_ms,
+                               video_delay_ms,
+                               current_audio_delay_ms,
+                               &extra_audio_delay_ms,
+                               &total_video_delay_ms));
+    EXPECT_EQ(base_target_delay, total_video_delay_ms);
+    // The audio delay is not allowed to change more than the half of the
+    // required change in delay.
+    EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
+        current_audio_delay_ms,
+        base_target_delay + video_delay_ms - audio_delay_ms),
+        extra_audio_delay_ms);
   }
 
   int MaxAudioDelayIncrease(int current_audio_delay_ms, int delay_ms) {
     return std::min((delay_ms - current_audio_delay_ms) / 2,
-                    static_cast<int>(kMaxAudioDiffMs));
+                     static_cast<int>(kMaxAudioDiffMs));
   }
 
   int MaxAudioDelayDecrease(int current_audio_delay_ms, int delay_ms) {
@@ -363,100 +452,86 @@
 }
 
 TEST_F(StreamSynchronizationTest, BothDelayedVideoLater) {
-  int current_audio_delay_ms = 0;
-  int audio_delay_ms = 100;
-  int video_delay_ms = 300;
-  int extra_audio_delay_ms = 0;
-  int total_video_delay_ms = 0;
+  BothDelayedVideoLaterTest(0);
+}
 
-  EXPECT_TRUE(DelayedStreams(audio_delay_ms,
-                             video_delay_ms,
-                             current_audio_delay_ms,
-                             &extra_audio_delay_ms,
-                             &total_video_delay_ms));
-  EXPECT_EQ(0, total_video_delay_ms);
-  // The audio delay is not allowed to change more than this in 1 second.
-  EXPECT_EQ(kMaxAudioDiffMs, extra_audio_delay_ms);
-  current_audio_delay_ms = extra_audio_delay_ms;
-  int current_extra_delay_ms = extra_audio_delay_ms;
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterAudioClockDrift) {
+  audio_clock_drift_ = 1.05;
+  BothDelayedVideoLaterTest(0);
+}
 
-  send_time_->IncreaseTimeMs(1000);
-  receive_time_->IncreaseTimeMs(800);
-  EXPECT_TRUE(DelayedStreams(audio_delay_ms,
-                             video_delay_ms,
-                             current_audio_delay_ms,
-                             &extra_audio_delay_ms,
-                             &total_video_delay_ms));
-  EXPECT_EQ(0, total_video_delay_ms);
-  // The audio delay is not allowed to change more than the half of the required
-  // change in delay.
-  EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
-      current_audio_delay_ms, video_delay_ms - audio_delay_ms),
-      extra_audio_delay_ms);
-  current_audio_delay_ms = extra_audio_delay_ms;
-  current_extra_delay_ms = extra_audio_delay_ms;
-
-  send_time_->IncreaseTimeMs(1000);
-  receive_time_->IncreaseTimeMs(800);
-  EXPECT_TRUE(DelayedStreams(audio_delay_ms,
-                             video_delay_ms,
-                             current_audio_delay_ms,
-                             &extra_audio_delay_ms,
-                             &total_video_delay_ms));
-  EXPECT_EQ(0, total_video_delay_ms);
-  // The audio delay is not allowed to change more than the half of the required
-  // change in delay.
-  EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
-      current_audio_delay_ms, video_delay_ms - audio_delay_ms),
-      extra_audio_delay_ms);
-  current_extra_delay_ms = extra_audio_delay_ms;
-
-  // Simulate that NetEQ for some reason reduced the delay.
-  current_audio_delay_ms = 170;
-  send_time_->IncreaseTimeMs(1000);
-  receive_time_->IncreaseTimeMs(800);
-  EXPECT_TRUE(DelayedStreams(audio_delay_ms,
-                             video_delay_ms,
-                             current_audio_delay_ms,
-                             &extra_audio_delay_ms,
-                             &total_video_delay_ms));
-  EXPECT_EQ(0, total_video_delay_ms);
-  // Since we only can ask NetEQ for a certain amount of extra delay, and
-  // we only measure the total NetEQ delay, we will ask for additional delay
-  // here to try to stay in sync.
-  EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
-      current_audio_delay_ms, video_delay_ms - audio_delay_ms),
-      extra_audio_delay_ms);
-  current_extra_delay_ms = extra_audio_delay_ms;
-
-  // Simulate that NetEQ for some reason significantly increased the delay.
-  current_audio_delay_ms = 250;
-  send_time_->IncreaseTimeMs(1000);
-  receive_time_->IncreaseTimeMs(800);
-  EXPECT_TRUE(DelayedStreams(audio_delay_ms,
-                             video_delay_ms,
-                             current_audio_delay_ms,
-                             &extra_audio_delay_ms,
-                             &total_video_delay_ms));
-  EXPECT_EQ(0, total_video_delay_ms);
-  // The audio delay is not allowed to change more than the half of the required
-  // change in delay.
-  EXPECT_EQ(current_extra_delay_ms + MaxAudioDelayIncrease(
-      current_audio_delay_ms, video_delay_ms - audio_delay_ms),
-      extra_audio_delay_ms);
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterVideoClockDrift) {
+  video_clock_drift_ = 1.05;
+  BothDelayedVideoLaterTest(0);
 }
 
 TEST_F(StreamSynchronizationTest, BothDelayedAudioLater) {
-  BothDelayedAudioLaterTest();
+  BothDelayedAudioLaterTest(0);
 }
 
 TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDrift) {
   audio_clock_drift_ = 1.05;
-  BothDelayedAudioLaterTest();
+  BothDelayedAudioLaterTest(0);
 }
 
 TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDrift) {
   video_clock_drift_ = 1.05;
-  BothDelayedAudioLaterTest();
+  BothDelayedAudioLaterTest(0);
 }
+
+TEST_F(StreamSynchronizationTest, BaseDelay) {
+  int base_target_delay_ms = 2000;
+  int current_audio_delay_ms = 2000;
+  int extra_audio_delay_ms = 0;
+  int total_video_delay_ms = base_target_delay_ms;
+  sync_->SetTargetBufferingDelay(base_target_delay_ms);
+  EXPECT_TRUE(DelayedStreams(base_target_delay_ms, base_target_delay_ms,
+                             current_audio_delay_ms,
+                             &extra_audio_delay_ms, &total_video_delay_ms));
+  EXPECT_EQ(base_target_delay_ms, extra_audio_delay_ms);
+  EXPECT_EQ(base_target_delay_ms, total_video_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioLaterWithBaseDelay) {
+  int base_target_delay_ms = 3000;
+  sync_->SetTargetBufferingDelay(base_target_delay_ms);
+  BothDelayedAudioLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedAudioClockDriftWithBaseDelay) {
+  int base_target_delay_ms = 3000;
+  sync_->SetTargetBufferingDelay(base_target_delay_ms);
+  audio_clock_drift_ = 1.05;
+  BothDelayedAudioLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoClockDriftWithBaseDelay) {
+  int base_target_delay_ms = 3000;
+  sync_->SetTargetBufferingDelay(base_target_delay_ms);
+  video_clock_drift_ = 1.05;
+  BothDelayedAudioLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest, BothDelayedVideoLaterWithBaseDelay) {
+  int base_target_delay_ms = 2000;
+  sync_->SetTargetBufferingDelay(base_target_delay_ms);
+  BothDelayedVideoLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest,
+       BothDelayedVideoLaterAudioClockDriftWithBaseDelay) {
+  int base_target_delay_ms = 2000;
+  audio_clock_drift_ = 1.05;
+  sync_->SetTargetBufferingDelay(base_target_delay_ms);
+  BothDelayedVideoLaterTest(base_target_delay_ms);
+}
+
+TEST_F(StreamSynchronizationTest,
+       BothDelayedVideoLaterVideoClockDriftWithBaseDelay) {
+  int base_target_delay_ms = 2000;
+  video_clock_drift_ = 1.05;
+  sync_->SetTargetBufferingDelay(base_target_delay_ms);
+  BothDelayedVideoLaterTest(base_target_delay_ms);
+}
+
 }  // namespace webrtc
diff --git a/video_engine/test/auto_test/source/vie_autotest_custom_call.cc b/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
index a171845..b862b64 100644
--- a/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
+++ b/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
@@ -39,6 +39,7 @@
 #define DEFAULT_VIDEO_CODEC_MAX_FRAMERATE               "30"
 #define DEFAULT_VIDEO_PROTECTION_METHOD                 "None"
 #define DEFAULT_TEMPORAL_LAYER                          "0"
+#define DEFAULT_BUFFERING_DELAY_MS                      "0"
 
 DEFINE_string(render_custom_call_remote_to, "", "Specify to render the remote "
     "stream of a custom call to the provided filename instead of showing it in "
@@ -153,6 +154,7 @@
                         int video_channel,
                         VideoProtectionMethod protection_method);
 bool GetBitrateSignaling();
+int GetBufferingDelay();
 
 // The following are audio helper functions.
 bool GetAudioDevices(webrtc::VoEBase* voe_base,
@@ -265,6 +267,7 @@
   webrtc::CodecInst audio_codec;
   int audio_channel = -1;
   VideoProtectionMethod protection_method = kProtectionMethodNone;
+  int buffer_delay_ms = 0;
   bool is_image_scale_enabled = false;
   bool remb = true;
 
@@ -297,6 +300,9 @@
     // Get the video protection method for the call.
     protection_method = GetVideoProtection();
 
+    // Get the call mode (Real-Time/Buffered).
+    buffer_delay_ms = GetBufferingDelay();
+
     // Get the audio device for the call.
     memset(audio_capture_device_name, 0, KMaxUniqueIdLength);
     memset(audio_playbackDeviceName, 0, KMaxUniqueIdLength);
@@ -486,6 +492,16 @@
     number_of_errors += ViETest::TestError(error == 0,
                                            "ERROR: %s at line %d",
                                            __FUNCTION__, __LINE__);
+
+    // Set the call mode (conferencing/buffering)
+    error = vie_rtp_rtcp->SetSenderBufferingMode(video_channel,
+                                                    buffer_delay_ms);
+    number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
+                                           __FUNCTION__, __LINE__);
+    error = vie_rtp_rtcp->SetReceiverBufferingMode(video_channel,
+                                                      buffer_delay_ms);
+    number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
+                                           __FUNCTION__, __LINE__);
     // Set the Video Protection before start send and receive.
     SetVideoProtection(vie_codec, vie_rtp_rtcp,
                        video_channel, protection_method);
@@ -1555,6 +1571,15 @@
   return choice == 1;
 }
 
+int GetBufferingDelay() {
+  std::string input = TypedInput("Choose buffering delay (mS).")
+      .WithDefault(DEFAULT_BUFFERING_DELAY_MS)
+      .WithInputValidator(new webrtc::IntegerWithinRangeValidator(0, 10000))
+      .AskForInput();
+  std::string delay_ms = input;
+  return atoi(delay_ms.c_str());
+}
+
 void PrintRTCCPStatistics(webrtc::ViERTP_RTCP* vie_rtp_rtcp,
                           int video_channel,
                           StatisticsType stat_type) {
diff --git a/video_engine/test/auto_test/source/vie_autotest_rtp_rtcp.cc b/video_engine/test/auto_test/source/vie_autotest_rtp_rtcp.cc
index f017b07..7271003 100644
--- a/video_engine/test/auto_test/source/vie_autotest_rtp_rtcp.cc
+++ b/video_engine/test/auto_test/source/vie_autotest_rtp_rtcp.cc
@@ -685,19 +685,32 @@
     EXPECT_EQ(0, ViE.rtp_rtcp->SetTransmissionSmoothingStatus(
         tbChannel.videoChannel, false));
 
-    // Streaming Mode.
-    EXPECT_EQ(-1, ViE.rtp_rtcp->EnableSenderStreamingMode(
+    // Buffering mode - sender side.
+    EXPECT_EQ(-1, ViE.rtp_rtcp->SetSenderBufferingMode(
         invalid_channel_id, 0));
     int invalid_delay = -1;
-    EXPECT_EQ(-1, ViE.rtp_rtcp->EnableSenderStreamingMode(
+    EXPECT_EQ(-1, ViE.rtp_rtcp->SetSenderBufferingMode(
         tbChannel.videoChannel, invalid_delay));
     invalid_delay = 15000;
-    EXPECT_EQ(-1, ViE.rtp_rtcp->EnableSenderStreamingMode(
+    EXPECT_EQ(-1, ViE.rtp_rtcp->SetSenderBufferingMode(
         tbChannel.videoChannel, invalid_delay));
-    EXPECT_EQ(0, ViE.rtp_rtcp->EnableSenderStreamingMode(
+    EXPECT_EQ(0, ViE.rtp_rtcp->SetSenderBufferingMode(
         tbChannel.videoChannel, 5000));
-    // Real-time mode.
-    EXPECT_EQ(0, ViE.rtp_rtcp->EnableSenderStreamingMode(
+    // Buffering mode - receiver side.
+    EXPECT_EQ(-1, ViE.rtp_rtcp->SetReceiverBufferingMode(
+        invalid_channel_id, 0));
+    EXPECT_EQ(-1, ViE.rtp_rtcp->SetReceiverBufferingMode(
+        tbChannel.videoChannel, invalid_delay));
+    invalid_delay = 15000;
+    EXPECT_EQ(-1, ViE.rtp_rtcp->SetReceiverBufferingMode(
+        tbChannel.videoChannel, invalid_delay));
+    EXPECT_EQ(0, ViE.rtp_rtcp->SetReceiverBufferingMode(
+        tbChannel.videoChannel, 5000));
+    // Real-time mode - sender side.
+    EXPECT_EQ(0, ViE.rtp_rtcp->SetSenderBufferingMode(
+        tbChannel.videoChannel, 0));
+    // Real-time mode - receiver side.
+    EXPECT_EQ(0, ViE.rtp_rtcp->SetReceiverBufferingMode(
         tbChannel.videoChannel, 0));
 
     //***************************************************************
diff --git a/video_engine/vie_channel.cc b/video_engine/vie_channel.cc
index 5e7bbdc..bcad08f 100644
--- a/video_engine/vie_channel.cc
+++ b/video_engine/vie_channel.cc
@@ -104,7 +104,8 @@
       file_recorder_(channel_id),
       mtu_(0),
       sender_(sender),
-      nack_history_size_sender_(kSendSidePacketHistorySize) {
+      nack_history_size_sender_(kSendSidePacketHistorySize),
+      max_nack_reordering_threshold_(kMaxPacketAgeToNack) {
   WEBRTC_TRACE(kTraceMemory, kTraceVideo, ViEId(engine_id, channel_id),
                "ViEChannel::ViEChannel(channel_id: %d, engine_id: %d)",
                channel_id, engine_id);
@@ -125,7 +126,7 @@
 
   rtp_rtcp_.reset(RtpRtcp::CreateRtpRtcp(configuration));
   vie_receiver_.SetRtpRtcpModule(rtp_rtcp_.get());
-  vcm_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack);
+  vcm_.SetNackSettings(kMaxNackListSize, max_nack_reordering_threshold_);
 }
 
 WebRtc_Word32 ViEChannel::Init() {
@@ -298,7 +299,7 @@
       }
       if (nack_method != kNackOff) {
         rtp_rtcp->SetStorePacketsStatus(true, nack_history_size_sender_);
-        rtp_rtcp->SetNACKStatus(nack_method, kMaxPacketAgeToNack);
+        rtp_rtcp->SetNACKStatus(nack_method, max_nack_reordering_threshold_);
       } else if (paced_sender_) {
         rtp_rtcp->SetStorePacketsStatus(true, nack_history_size_sender_);
       }
@@ -622,7 +623,8 @@
                    "%s: Could not enable NACK, RTPC not on ", __FUNCTION__);
       return -1;
     }
-    if (rtp_rtcp_->SetNACKStatus(nackMethod, kMaxPacketAgeToNack) != 0) {
+    if (rtp_rtcp_->SetNACKStatus(nackMethod,
+                                 max_nack_reordering_threshold_) != 0) {
       WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
                    "%s: Could not set NACK method %d", __FUNCTION__,
                    nackMethod);
@@ -640,7 +642,7 @@
          it != simulcast_rtp_rtcp_.end();
          it++) {
       RtpRtcp* rtp_rtcp = *it;
-      rtp_rtcp->SetNACKStatus(nackMethod, kMaxPacketAgeToNack);
+      rtp_rtcp->SetNACKStatus(nackMethod, max_nack_reordering_threshold_);
       rtp_rtcp->SetStorePacketsStatus(true, nack_history_size_sender_);
     }
   } else {
@@ -652,13 +654,14 @@
       if (paced_sender_ == NULL) {
         rtp_rtcp->SetStorePacketsStatus(false, 0);
       }
-      rtp_rtcp->SetNACKStatus(kNackOff, kMaxPacketAgeToNack);
+      rtp_rtcp->SetNACKStatus(kNackOff, max_nack_reordering_threshold_);
     }
     vcm_.RegisterPacketRequestCallback(NULL);
     if (paced_sender_ == NULL) {
       rtp_rtcp_->SetStorePacketsStatus(false, 0);
     }
-    if (rtp_rtcp_->SetNACKStatus(kNackOff, kMaxPacketAgeToNack) != 0) {
+    if (rtp_rtcp_->SetNACKStatus(kNackOff,
+                                 max_nack_reordering_threshold_) != 0) {
       WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
                    "%s: Could not turn off NACK", __FUNCTION__);
       return -1;
@@ -723,21 +726,18 @@
   return ProcessFECRequest(enable, payload_typeRED, payload_typeFEC);
 }
 
-int ViEChannel::EnableSenderStreamingMode(int target_delay_ms) {
+int ViEChannel::SetSenderBufferingMode(int target_delay_ms) {
   if ((target_delay_ms < 0) || (target_delay_ms > kMaxTargetDelayMs)) {
     WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
-                 "%s: Target streaming delay out of bounds: %d", __FUNCTION__,
-                 target_delay_ms);
+                 "%s: Target sender buffering delay out of bounds: %d",
+                 __FUNCTION__, target_delay_ms);
     return -1;
   }
   if (target_delay_ms == 0) {
     // Real-time mode.
     nack_history_size_sender_ = kSendSidePacketHistorySize;
   } else {
-    // The max size of the nack list should be large enough to accommodate the
-    // the number of packets(frames) resulting from the increased delay.
-    // Roughly estimating for ~20 packets per frame @ 30fps.
-    nack_history_size_sender_ = target_delay_ms * 20 * 30 / 1000;
+    nack_history_size_sender_ = GetRequiredNackListSize(target_delay_ms);
     // Don't allow a number lower than the default value.
     if (nack_history_size_sender_ < kSendSidePacketHistorySize) {
       nack_history_size_sender_ = kSendSidePacketHistorySize;
@@ -758,6 +758,35 @@
   return 0;
 }
 
+int ViEChannel::SetReceiverBufferingMode(int target_delay_ms) {
+  if ((target_delay_ms < 0) || (target_delay_ms > kMaxTargetDelayMs)) {
+    WEBRTC_TRACE(kTraceError, kTraceVideo, ViEId(engine_id_, channel_id_),
+                 "%s: Target receiver buffering delay out of bounds: %d",
+                 __FUNCTION__, target_delay_ms);
+    return -1;
+  }
+  int max_nack_list_size;
+  if (target_delay_ms == 0) {
+    // Real-time mode - restore default settings.
+    max_nack_reordering_threshold_ = kMaxPacketAgeToNack;
+    max_nack_list_size = kMaxNackListSize;
+  } else {
+    max_nack_list_size =  3 / 4 * GetRequiredNackListSize(target_delay_ms);
+    max_nack_reordering_threshold_ = max_nack_list_size;
+  }
+  vcm_.SetNackSettings(max_nack_list_size, max_nack_reordering_threshold_);
+  vcm_.SetMinReceiverDelay(target_delay_ms);
+  vie_sync_.SetTargetBufferingDelay(target_delay_ms);
+  return 0;
+}
+
+int ViEChannel::GetRequiredNackListSize(int target_delay_ms) {
+  // The max size of the nack list should be large enough to accommodate the
+  // the number of packets (frames) resulting from the increased delay.
+  // Roughly estimating for ~20 packets per frame @ 30fps.
+  return target_delay_ms * 20 * 30 / 1000;
+}
+
 WebRtc_Word32 ViEChannel::SetKeyFrameRequestMethod(
     const KeyFrameRequestMethod method) {
   WEBRTC_TRACE(kTraceInfo, kTraceVideo, ViEId(engine_id_, channel_id_),
diff --git a/video_engine/vie_channel.h b/video_engine/vie_channel.h
index 9db2b5d..40d11be 100644
--- a/video_engine/vie_channel.h
+++ b/video_engine/vie_channel.h
@@ -116,7 +116,8 @@
   WebRtc_Word32 SetHybridNACKFECStatus(const bool enable,
                                        const unsigned char payload_typeRED,
                                        const unsigned char payload_typeFEC);
-  int EnableSenderStreamingMode(int target_delay_ms);
+  int SetSenderBufferingMode(int target_delay_ms);
+  int SetReceiverBufferingMode(int target_delay_ms);
   WebRtc_Word32 SetKeyFrameRequestMethod(const KeyFrameRequestMethod method);
   bool EnableRemb(bool enable);
   int SetSendTimestampOffsetStatus(bool enable, int id);
@@ -365,6 +366,8 @@
   WebRtc_Word32 ProcessFECRequest(const bool enable,
                                   const unsigned char payload_typeRED,
                                   const unsigned char payload_typeFEC);
+  // Compute NACK list parameters for the buffering mode.
+  int GetRequiredNackListSize(int target_delay_ms);
 
   WebRtc_Word32 channel_id_;
   WebRtc_Word32 engine_id_;
@@ -425,6 +428,7 @@
   const bool sender_;
 
   int nack_history_size_sender_;
+  int max_nack_reordering_threshold_;
 };
 
 }  // namespace webrtc
diff --git a/video_engine/vie_encoder.cc b/video_engine/vie_encoder.cc
index c0f257f..5c5aa23 100644
--- a/video_engine/vie_encoder.cc
+++ b/video_engine/vie_encoder.cc
@@ -702,13 +702,13 @@
   return 0;
 }
 
-void ViEEncoder::EnableSenderStreamingMode(int target_delay_ms) {
+void ViEEncoder::SetSenderBufferingMode(int target_delay_ms) {
   if (target_delay_ms > 0) {
-    // Disable external frame-droppers.
+     // Disable external frame-droppers.
      vcm_.EnableFrameDropper(false);
      vpm_.EnableTemporalDecimation(false);
   } else {
-    // Real-time mode - enabling frame droppers.
+    // Real-time mode - enable frame droppers.
     vpm_.EnableTemporalDecimation(true);
     vcm_.EnableFrameDropper(true);
   }
diff --git a/video_engine/vie_encoder.h b/video_engine/vie_encoder.h
index 6c4aaff..08295a7 100644
--- a/video_engine/vie_encoder.h
+++ b/video_engine/vie_encoder.h
@@ -113,8 +113,8 @@
   // Loss protection.
   WebRtc_Word32 UpdateProtectionMethod();
 
-  // Streaming mode.
-  void EnableSenderStreamingMode(int target_delay_ms);
+  // Buffering mode.
+  void SetSenderBufferingMode(int target_delay_ms);
 
   // Implements VCMPacketizationCallback.
   virtual WebRtc_Word32 SendData(
diff --git a/video_engine/vie_rtp_rtcp_impl.cc b/video_engine/vie_rtp_rtcp_impl.cc
index c57d361..1a2ced0 100644
--- a/video_engine/vie_rtp_rtcp_impl.cc
+++ b/video_engine/vie_rtp_rtcp_impl.cc
@@ -553,11 +553,11 @@
   return 0;
 }
 
-int ViERTP_RTCPImpl::EnableSenderStreamingMode(int video_channel,
+int ViERTP_RTCPImpl::SetSenderBufferingMode(int video_channel,
                                                int target_delay_ms) {
   WEBRTC_TRACE(kTraceApiCall, kTraceVideo,
                ViEId(shared_data_->instance_id(), video_channel),
-               "%s(channel: %d, target_delay: %d)",
+               "%s(channel: %d, sender target_delay: %d)",
                __FUNCTION__, video_channel, target_delay_ms);
   ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
   ViEChannel* vie_channel = cs.Channel(video_channel);
@@ -578,8 +578,8 @@
     return -1;
   }
 
-  // Update the channel's streaming mode settings.
-  if (vie_channel->EnableSenderStreamingMode(target_delay_ms) != 0) {
+  // Update the channel with buffering mode settings.
+  if (vie_channel->SetSenderBufferingMode(target_delay_ms) != 0) {
     WEBRTC_TRACE(kTraceError, kTraceVideo,
                  ViEId(shared_data_->instance_id(), video_channel),
                  "%s: failed for channel %d", __FUNCTION__, video_channel);
@@ -587,8 +587,35 @@
     return -1;
   }
 
-  // Update the encoder's streaming mode settings.
-  vie_encoder->EnableSenderStreamingMode(target_delay_ms);
+  // Update the encoder's buffering mode settings.
+  vie_encoder->SetSenderBufferingMode(target_delay_ms);
+  return 0;
+}
+
+int ViERTP_RTCPImpl::SetReceiverBufferingMode(int video_channel,
+                                                 int target_delay_ms) {
+  WEBRTC_TRACE(kTraceApiCall, kTraceVideo,
+               ViEId(shared_data_->instance_id(), video_channel),
+               "%s(channel: %d, receiver target_delay: %d)",
+               __FUNCTION__, video_channel, target_delay_ms);
+  ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
+  ViEChannel* vie_channel = cs.Channel(video_channel);
+  if (!vie_channel) {
+    WEBRTC_TRACE(kTraceError, kTraceVideo,
+                 ViEId(shared_data_->instance_id(), video_channel),
+                 "%s: Channel %d doesn't exist", __FUNCTION__, video_channel);
+    shared_data_->SetLastError(kViERtpRtcpInvalidChannelId);
+    return -1;
+  }
+
+  // Update the channel with buffering mode settings.
+  if (vie_channel->SetReceiverBufferingMode(target_delay_ms) != 0) {
+    WEBRTC_TRACE(kTraceError, kTraceVideo,
+                 ViEId(shared_data_->instance_id(), video_channel),
+                 "%s: failed for channel %d", __FUNCTION__, video_channel);
+    shared_data_->SetLastError(kViERtpRtcpUnknownError);
+    return -1;
+  }
   return 0;
 }
 
diff --git a/video_engine/vie_rtp_rtcp_impl.h b/video_engine/vie_rtp_rtcp_impl.h
index 210afcf..1c1971a 100644
--- a/video_engine/vie_rtp_rtcp_impl.h
+++ b/video_engine/vie_rtp_rtcp_impl.h
@@ -64,8 +64,10 @@
   virtual int SetHybridNACKFECStatus(const int video_channel, const bool enable,
                                      const unsigned char payload_typeRED,
                                      const unsigned char payload_typeFEC);
-  virtual int EnableSenderStreamingMode(int video_channel,
-                                        int target_delay_ms);
+  virtual int SetSenderBufferingMode(int video_channel,
+                                     int target_delay_ms);
+  virtual int SetReceiverBufferingMode(int video_channel,
+                                       int target_delay_ms);
   virtual int SetKeyFrameRequestMethod(const int video_channel,
                                        const ViEKeyFrameRequestMethod method);
   virtual int SetTMMBRStatus(const int video_channel, const bool enable);
diff --git a/video_engine/vie_sync_module.cc b/video_engine/vie_sync_module.cc
index fb5612d..e01b0e2 100644
--- a/video_engine/vie_sync_module.cc
+++ b/video_engine/vie_sync_module.cc
@@ -172,4 +172,14 @@
   return 0;
 }
 
+void ViESyncModule::SetTargetBufferingDelay(int target_delay_ms) {
+  CriticalSectionScoped cs(data_cs_.get());
+  sync_->SetTargetBufferingDelay(target_delay_ms);
+  // Setting initial playout delay to voice engine (video engine is updated via
+  // the VCM interface).
+  assert(voe_sync_interface_ != NULL);
+  voe_sync_interface_->SetInitialPlayoutDelay(voe_channel_id_,
+                                              target_delay_ms);
+}
+
 }  // namespace webrtc
diff --git a/video_engine/vie_sync_module.h b/video_engine/vie_sync_module.h
index fcb8f8d..8f10c61 100644
--- a/video_engine/vie_sync_module.h
+++ b/video_engine/vie_sync_module.h
@@ -40,6 +40,9 @@
 
   int VoiceChannel();
 
+  // Set target delay for buffering mode (0 = real-time mode).
+  void SetTargetBufferingDelay(int target_delay_ms);
+
   // Implements Module.
   virtual WebRtc_Word32 TimeUntilNextProcess();
   virtual WebRtc_Word32 Process();