Add SetAudioPlayout and SetAudioRecording methods to the PeerConnection API (II)

Second attempt to land https://webrtc-review.googlesource.com/c/src/+/16180

Now removes voice_engine dependency from peerconnection and fixes a minor
const issue in NullAudioPoller.

TBR=solenberg

Bug: webrtc:7313
Change-Id: Ibfddbdc76118581e4a4dc64575203f84c1659e5c
Reviewed-on: https://webrtc-review.googlesource.com/17784
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20526}
diff --git a/api/peerconnectioninterface.h b/api/peerconnectioninterface.h
index 5b9df68..e82fa75 100644
--- a/api/peerconnectioninterface.h
+++ b/api/peerconnectioninterface.h
@@ -788,6 +788,21 @@
       std::unique_ptr<rtc::BitrateAllocationStrategy>
           bitrate_allocation_strategy) {}
 
+  // Enable/disable playout of received audio streams. Enabled by default. Note
+  // that even if playout is enabled, streams will only be played out if the
+  // appropriate SDP is also applied. Setting |playout| to false will stop
+  // playout of the underlying audio device but starts a task which will poll
+  // for audio data every 10ms to ensure that audio processing happens and the
+  // audio statistics are updated.
+  // TODO(henrika): deprecate and remove this.
+  virtual void SetAudioPlayout(bool playout) {}
+
+  // Enable/disable recording of transmitted audio streams. Enabled by default.
+  // Note that even if recording is enabled, streams will only be recorded if
+  // the appropriate SDP is also applied.
+  // TODO(henrika): deprecate and remove this.
+  virtual void SetAudioRecording(bool recording) {}
+
   // Returns the current SignalingState.
   virtual SignalingState signaling_state() = 0;
 
diff --git a/api/peerconnectionproxy.h b/api/peerconnectionproxy.h
index a8ea3fa..78fe402 100644
--- a/api/peerconnectionproxy.h
+++ b/api/peerconnectionproxy.h
@@ -100,6 +100,8 @@
   PROXY_METHOD1(bool,
                 RemoveIceCandidates,
                 const std::vector<cricket::Candidate>&);
+  PROXY_METHOD1(void, SetAudioPlayout, bool)
+  PROXY_METHOD1(void, SetAudioRecording, bool)
   PROXY_METHOD1(void, RegisterUMAObserver, UMAObserver*)
   PROXY_METHOD1(RTCError, SetBitrate, const BitrateParameters&);
   PROXY_METHOD1(void,
diff --git a/audio/BUILD.gn b/audio/BUILD.gn
index a9ca0d5..80545ca 100644
--- a/audio/BUILD.gn
+++ b/audio/BUILD.gn
@@ -23,6 +23,8 @@
     "audio_transport_proxy.cc",
     "audio_transport_proxy.h",
     "conversion.h",
+    "null_audio_poller.cc",
+    "null_audio_poller.h",
     "scoped_voe_interface.h",
     "time_interval.cc",
     "time_interval.h",
@@ -52,6 +54,7 @@
     "../modules/pacing:pacing",
     "../modules/remote_bitrate_estimator:remote_bitrate_estimator",
     "../modules/rtp_rtcp:rtp_rtcp",
+    "../rtc_base:rtc_base",
     "../rtc_base:rtc_base_approved",
     "../rtc_base:rtc_task_queue",
     "../system_wrappers",
diff --git a/audio/audio_state.cc b/audio/audio_state.cc
index 2a84f5c..9b5f74f 100644
--- a/audio/audio_state.cc
+++ b/audio/audio_state.cc
@@ -12,8 +12,11 @@
 
 #include "modules/audio_device/include/audio_device.h"
 #include "rtc_base/atomicops.h"
+#include "rtc_base/bind.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/thread.h"
 #include "voice_engine/transmit_mixer.h"
 
 namespace webrtc {
@@ -59,6 +62,40 @@
   return transmit_mixer->typing_noise_detected();
 }
 
+void AudioState::SetPlayout(bool enabled) {
+  LOG(INFO) << "SetPlayout(" << enabled << ")";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  const bool currently_enabled = (null_audio_poller_ == nullptr);
+  if (enabled == currently_enabled) {
+    return;
+  }
+  VoEBase* const voe = VoEBase::GetInterface(voice_engine());
+  RTC_DCHECK(voe);
+  if (enabled) {
+    null_audio_poller_.reset();
+  }
+  // Will stop/start playout of the underlying device, if necessary, and
+  // remember the setting for when it receives subsequent calls of
+  // StartPlayout.
+  voe->SetPlayout(enabled);
+  if (!enabled) {
+    null_audio_poller_ =
+        rtc::MakeUnique<NullAudioPoller>(&audio_transport_proxy_);
+  }
+}
+
+void AudioState::SetRecording(bool enabled) {
+  LOG(INFO) << "SetRecording(" << enabled << ")";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  // TODO(henrika): keep track of state as in SetPlayout().
+  VoEBase* const voe = VoEBase::GetInterface(voice_engine());
+  RTC_DCHECK(voe);
+  // Will stop/start recording of the underlying device, if necessary, and
+  // remember the setting for when it receives subsequent calls of
+  // StartPlayout.
+  voe->SetRecording(enabled);
+}
+
 // Reference count; implementation copied from rtc::RefCountedObject.
 void AudioState::AddRef() const {
   rtc::AtomicOps::Increment(&ref_count_);
diff --git a/audio/audio_state.h b/audio/audio_state.h
index 86d60b6..023c7b1 100644
--- a/audio/audio_state.h
+++ b/audio/audio_state.h
@@ -11,7 +11,10 @@
 #ifndef AUDIO_AUDIO_STATE_H_
 #define AUDIO_AUDIO_STATE_H_
 
+#include <memory>
+
 #include "audio/audio_transport_proxy.h"
+#include "audio/null_audio_poller.h"
 #include "audio/scoped_voe_interface.h"
 #include "call/audio_state.h"
 #include "rtc_base/constructormagic.h"
@@ -33,6 +36,9 @@
     return config_.audio_processing.get();
   }
 
+  void SetPlayout(bool enabled) override;
+  void SetRecording(bool enabled) override;
+
   VoiceEngine* voice_engine();
   rtc::scoped_refptr<AudioMixer> mixer();
   bool typing_noise_detected() const;
@@ -57,6 +63,11 @@
   // recorded audio to the VoE AudioTransport.
   AudioTransportProxy audio_transport_proxy_;
 
+  // Null audio poller is used to continue polling the audio streams if audio
+  // playout is disabled so that audio processing still happens and the audio
+  // stats are still updated.
+  std::unique_ptr<NullAudioPoller> null_audio_poller_;
+
   RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioState);
 };
 }  // namespace internal
diff --git a/audio/null_audio_poller.cc b/audio/null_audio_poller.cc
new file mode 100644
index 0000000..c22b3d8
--- /dev/null
+++ b/audio/null_audio_poller.cc
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/null_audio_poller.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+namespace internal {
+
+namespace {
+
+constexpr int64_t kPollDelayMs = 10;  // WebRTC uses 10ms by default
+
+constexpr size_t kNumChannels = 1;
+constexpr uint32_t kSamplesPerSecond = 48000;            // 48kHz
+constexpr size_t kNumSamples = kSamplesPerSecond / 100;  // 10ms of samples
+
+}  // namespace
+
+NullAudioPoller::NullAudioPoller(AudioTransport* audio_transport)
+    : audio_transport_(audio_transport),
+      reschedule_at_(rtc::TimeMillis() + kPollDelayMs) {
+  RTC_DCHECK(audio_transport);
+  OnMessage(nullptr);  // Start the poll loop.
+}
+
+NullAudioPoller::~NullAudioPoller() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  rtc::Thread::Current()->Clear(this);
+}
+
+void NullAudioPoller::OnMessage(rtc::Message* msg) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Buffer to hold the audio samples.
+  int16_t buffer[kNumSamples * kNumChannels];
+  // Output variables from |NeedMorePlayData|.
+  size_t n_samples;
+  int64_t elapsed_time_ms;
+  int64_t ntp_time_ms;
+  audio_transport_->NeedMorePlayData(kNumSamples, sizeof(int16_t), kNumChannels,
+                                     kSamplesPerSecond, buffer, n_samples,
+                                     &elapsed_time_ms, &ntp_time_ms);
+
+  // Reschedule the next poll iteration. If, for some reason, the given
+  // reschedule time has already passed, reschedule as soon as possible.
+  int64_t now = rtc::TimeMillis();
+  if (reschedule_at_ < now) {
+    reschedule_at_ = now;
+  }
+  rtc::Thread::Current()->PostAt(RTC_FROM_HERE, reschedule_at_, this, 0);
+
+  // Loop after next will be kPollDelayMs later.
+  reschedule_at_ += kPollDelayMs;
+}
+
+}  // namespace internal
+}  // namespace webrtc
diff --git a/audio/null_audio_poller.h b/audio/null_audio_poller.h
new file mode 100644
index 0000000..b6ddf17
--- /dev/null
+++ b/audio/null_audio_poller.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_NULL_AUDIO_POLLER_H_
+#define AUDIO_NULL_AUDIO_POLLER_H_
+
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+namespace internal {
+
+class NullAudioPoller final : public rtc::MessageHandler {
+ public:
+  explicit NullAudioPoller(AudioTransport* audio_transport);
+  ~NullAudioPoller();
+
+ protected:
+  void OnMessage(rtc::Message* msg) override;
+
+ private:
+  rtc::ThreadChecker thread_checker_;
+  AudioTransport* const audio_transport_;
+  int64_t reschedule_at_;
+};
+
+}  // namespace internal
+}  // namespace webrtc
+
+#endif  // AUDIO_NULL_AUDIO_POLLER_H_
diff --git a/call/audio_state.h b/call/audio_state.h
index 7719388..ad411d1 100644
--- a/call/audio_state.h
+++ b/call/audio_state.h
@@ -44,6 +44,17 @@
 
   virtual AudioProcessing* audio_processing() = 0;
 
+  // Enable/disable playout of the audio channels. Enabled by default.
+  // This will stop playout of the underlying audio device but start a task
+  // which will poll for audio data every 10ms to ensure that audio processing
+  // happens and the audio stats are updated.
+  virtual void SetPlayout(bool enabled) = 0;
+
+  // Enable/disable recording of the audio channels. Enabled by default.
+  // This will stop recording of the underlying audio device and no audio
+  // packets will be encoded or transmitted.
+  virtual void SetRecording(bool enabled) = 0;
+
   // TODO(solenberg): Replace scoped_refptr with shared_ptr once we can use it.
   static rtc::scoped_refptr<AudioState> Create(
       const AudioState::Config& config);
diff --git a/media/engine/fakewebrtcvoiceengine.h b/media/engine/fakewebrtcvoiceengine.h
index 7e8e5c2..55d3100 100644
--- a/media/engine/fakewebrtcvoiceengine.h
+++ b/media/engine/fakewebrtcvoiceengine.h
@@ -99,6 +99,8 @@
   WEBRTC_STUB(StartSend, (int channel));
   WEBRTC_STUB(StopPlayout, (int channel));
   WEBRTC_STUB(StopSend, (int channel));
+  WEBRTC_STUB(SetPlayout, (bool enable));
+  WEBRTC_STUB(SetRecording, (bool enable));
 
   size_t GetNetEqCapacity() const {
     auto ch = channels_.find(last_channel_);
diff --git a/pc/peerconnection.cc b/pc/peerconnection.cc
index 03d34d0..89452c0 100644
--- a/pc/peerconnection.cc
+++ b/pc/peerconnection.cc
@@ -1323,6 +1323,30 @@
   call_->SetBitrateAllocationStrategy(std::move(bitrate_allocation_strategy));
 }
 
+void PeerConnection::SetAudioPlayout(bool playout) {
+  if (!worker_thread()->IsCurrent()) {
+    worker_thread()->Invoke<void>(
+        RTC_FROM_HERE,
+        rtc::Bind(&PeerConnection::SetAudioPlayout, this, playout));
+    return;
+  }
+  auto audio_state =
+      factory_->channel_manager()->media_engine()->GetAudioState();
+  audio_state->SetPlayout(playout);
+}
+
+void PeerConnection::SetAudioRecording(bool recording) {
+  if (!worker_thread()->IsCurrent()) {
+    worker_thread()->Invoke<void>(
+        RTC_FROM_HERE,
+        rtc::Bind(&PeerConnection::SetAudioRecording, this, recording));
+    return;
+  }
+  auto audio_state =
+      factory_->channel_manager()->media_engine()->GetAudioState();
+  audio_state->SetRecording(recording);
+}
+
 std::unique_ptr<rtc::SSLCertificate>
 PeerConnection::GetRemoteAudioSSLCertificate() {
   if (!session_) {
diff --git a/pc/peerconnection.h b/pc/peerconnection.h
index 9163c36..97068b9 100644
--- a/pc/peerconnection.h
+++ b/pc/peerconnection.h
@@ -143,6 +143,9 @@
       std::unique_ptr<rtc::BitrateAllocationStrategy>
           bitrate_allocation_strategy) override;
 
+  void SetAudioPlayout(bool playout) override;
+  void SetAudioRecording(bool recording) override;
+
   RTC_DEPRECATED bool StartRtcEventLog(rtc::PlatformFile file,
                                        int64_t max_size_bytes) override;
   bool StartRtcEventLog(std::unique_ptr<RtcEventLogOutput> output) override;
diff --git a/pc/peerconnection_integrationtest.cc b/pc/peerconnection_integrationtest.cc
index 3289ccb..305ed94 100644
--- a/pc/peerconnection_integrationtest.cc
+++ b/pc/peerconnection_integrationtest.cc
@@ -3564,6 +3564,76 @@
       kMaxWaitForFramesMs);
 }
 
+// Test that SetAudioPlayout can be used to disable audio playout from the
+// start, then later enable it. This may be useful, for example, if the caller
+// needs to play a local ringtone until some event occurs, after which it
+// switches to playing the received audio.
+TEST_F(PeerConnectionIntegrationTest, DisableAndEnableAudioPlayout) {
+  ASSERT_TRUE(CreatePeerConnectionWrappers());
+  ConnectFakeSignaling();
+
+  // Set up audio-only call where audio playout is disabled on caller's side.
+  caller()->pc()->SetAudioPlayout(false);
+  caller()->AddAudioOnlyMediaStream();
+  callee()->AddAudioOnlyMediaStream();
+  caller()->CreateAndSetAndSignalOffer();
+  ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
+
+  // Pump messages for a second.
+  WAIT(false, 1000);
+  // Since audio playout is disabled, the caller shouldn't have received
+  // anything (at the playout level, at least).
+  EXPECT_EQ(0, caller()->audio_frames_received());
+  // As a sanity check, make sure the callee (for which playout isn't disabled)
+  // did still see frames on its audio level.
+  ASSERT_GT(callee()->audio_frames_received(), 0);
+
+  // Enable playout again, and ensure audio starts flowing.
+  caller()->pc()->SetAudioPlayout(true);
+  ExpectNewFramesReceivedWithWait(kDefaultExpectedAudioFrameCount, 0,
+                                  kDefaultExpectedAudioFrameCount, 0,
+                                  kMaxWaitForFramesMs);
+}
+
+double GetAudioEnergyStat(PeerConnectionWrapper* pc) {
+  auto report = pc->NewGetStats();
+  auto track_stats_list =
+      report->GetStatsOfType<webrtc::RTCMediaStreamTrackStats>();
+  const webrtc::RTCMediaStreamTrackStats* remote_track_stats = nullptr;
+  for (const auto* track_stats : track_stats_list) {
+    if (track_stats->remote_source.is_defined() &&
+        *track_stats->remote_source) {
+      remote_track_stats = track_stats;
+      break;
+    }
+  }
+
+  if (!remote_track_stats->total_audio_energy.is_defined()) {
+    return 0.0;
+  }
+  return *remote_track_stats->total_audio_energy;
+}
+
+// Test that if audio playout is disabled via the SetAudioPlayout() method, then
+// incoming audio is still processed and statistics are generated.
+TEST_F(PeerConnectionIntegrationTest,
+       DisableAudioPlayoutStillGeneratesAudioStats) {
+  ASSERT_TRUE(CreatePeerConnectionWrappers());
+  ConnectFakeSignaling();
+
+  // Set up audio-only call where playout is disabled but audio-processing is
+  // still active.
+  caller()->AddAudioOnlyMediaStream();
+  callee()->AddAudioOnlyMediaStream();
+  caller()->pc()->SetAudioPlayout(false);
+
+  caller()->CreateAndSetAndSignalOffer();
+  ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout);
+
+  // Wait for the callee to receive audio stats.
+  EXPECT_TRUE_WAIT(GetAudioEnergyStat(caller()) > 0, kMaxWaitForFramesMs);
+}
+
 }  // namespace
 
 #endif  // if !defined(THREAD_SANITIZER)
diff --git a/sdk/android/api/org/webrtc/PeerConnection.java b/sdk/android/api/org/webrtc/PeerConnection.java
index 66e8075..5dd8832 100644
--- a/sdk/android/api/org/webrtc/PeerConnection.java
+++ b/sdk/android/api/org/webrtc/PeerConnection.java
@@ -363,6 +363,18 @@
 
   public native void setRemoteDescription(SdpObserver observer, SessionDescription sdp);
 
+  // True if remote audio should be played out. Defaults to true.
+  // Note that even if playout is enabled, streams will only be played out if
+  // the appropriate SDP is also applied. The main purpose of this API is to
+  // be able to control the exact time when audio playout starts.
+  public native void setAudioPlayout(boolean playout);
+
+  // True if local audio shall be recorded. Defaults to true.
+  // Note that even if recording is enabled, streams will only be recorded if
+  // the appropriate SDP is also applied. The main purpose of this API is to
+  // be able to control the exact time when audio recording starts.
+  public native void setAudioRecording(boolean recording);
+
   public boolean setConfiguration(RTCConfiguration config) {
     return nativeSetConfiguration(config, nativeObserver);
   }
diff --git a/sdk/android/src/jni/pc/peerconnection_jni.cc b/sdk/android/src/jni/pc/peerconnection_jni.cc
index a542c28..3c6bf76 100644
--- a/sdk/android/src/jni/pc/peerconnection_jni.cc
+++ b/sdk/android/src/jni/pc/peerconnection_jni.cc
@@ -166,6 +166,22 @@
       observer, JavaToNativeSessionDescription(jni, j_sdp));
 }
 
+JNI_FUNCTION_DECLARATION(void,
+                         PeerConnection_setAudioPlayout,
+                         JNIEnv* jni,
+                         jobject j_pc,
+                         jboolean playout) {
+  ExtractNativePC(jni, j_pc)->SetAudioPlayout(playout);
+}
+
+JNI_FUNCTION_DECLARATION(void,
+                         PeerConnection_setAudioRecording,
+                         JNIEnv* jni,
+                         jobject j_pc,
+                         jboolean recording) {
+  ExtractNativePC(jni, j_pc)->SetAudioRecording(recording);
+}
+
 JNI_FUNCTION_DECLARATION(jboolean,
                          PeerConnection_nativeSetConfiguration,
                          JNIEnv* jni,
diff --git a/voice_engine/include/voe_base.h b/voice_engine/include/voe_base.h
index 94ac6ac..a62a2b4 100644
--- a/voice_engine/include/voe_base.h
+++ b/voice_engine/include/voe_base.h
@@ -139,6 +139,21 @@
   // Stops sending packets from a specified |channel|.
   virtual int StopSend(int channel) = 0;
 
+  // Enable or disable playout to the underlying device. Takes precedence over
+  // StartPlayout. Though calls to StartPlayout are remembered; if
+  // SetPlayout(true) is called after StartPlayout, playout will be started.
+  //
+  // By default, playout is enabled.
+  virtual int SetPlayout(bool enabled) = 0;
+
+  // Enable or disable recording (which drives sending of encoded audio packtes)
+  // from the underlying device. Takes precedence over StartSend. Though calls
+  // to StartSend are remembered; if SetRecording(true) is called after
+  // StartSend, recording will be started.
+  //
+  // By default, recording is enabled.
+  virtual int SetRecording(bool enabled) = 0;
+
   // TODO(xians): Make the interface pure virtual after libjingle
   // implements the interface in its FakeWebRtcVoiceEngine.
   virtual AudioTransport* audio_transport() { return NULL; }
diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc
index b14bf95..9e7a5f4 100644
--- a/voice_engine/voe_base_impl.cc
+++ b/voice_engine/voe_base_impl.cc
@@ -407,7 +407,7 @@
       LOG_F(LS_ERROR) << "Failed to initialize playout";
       return -1;
     }
-    if (shared_->audio_device()->StartPlayout() != 0) {
+    if (playout_enabled_ && shared_->audio_device()->StartPlayout() != 0) {
       LOG_F(LS_ERROR) << "Failed to start playout";
       return -1;
     }
@@ -416,7 +416,10 @@
 }
 
 int32_t VoEBaseImpl::StopPlayout() {
-  // Stop audio-device playing if no channel is playing out
+  if (!playout_enabled_) {
+    return 0;
+  }
+  // Stop audio-device playing if no channel is playing out.
   if (shared_->NumOfPlayingChannels() == 0) {
     if (shared_->audio_device()->StopPlayout() != 0) {
       LOG(LS_ERROR) << "StopPlayout() failed to stop playout";
@@ -427,15 +430,12 @@
 }
 
 int32_t VoEBaseImpl::StartSend() {
-  if (!shared_->audio_device()->RecordingIsInitialized() &&
-      !shared_->audio_device()->Recording()) {
+  if (!shared_->audio_device()->Recording()) {
     if (shared_->audio_device()->InitRecording() != 0) {
       LOG_F(LS_ERROR) << "Failed to initialize recording";
       return -1;
     }
-  }
-  if (!shared_->audio_device()->Recording()) {
-    if (shared_->audio_device()->StartRecording() != 0) {
+    if (recording_enabled_ && shared_->audio_device()->StartRecording() != 0) {
       LOG_F(LS_ERROR) << "Failed to start recording";
       return -1;
     }
@@ -444,8 +444,11 @@
 }
 
 int32_t VoEBaseImpl::StopSend() {
+  if (!recording_enabled_) {
+    return 0;
+  }
+  // Stop audio-device recording if no channel is recording.
   if (shared_->NumOfSendingChannels() == 0) {
-    // Stop audio-device recording if no channel is recording
     if (shared_->audio_device()->StopRecording() != 0) {
       LOG(LS_ERROR) << "StopSend() failed to stop recording";
       return -1;
@@ -456,6 +459,58 @@
   return 0;
 }
 
+int32_t VoEBaseImpl::SetPlayout(bool enabled) {
+  LOG(INFO) << "SetPlayout(" << enabled << ")";
+  if (playout_enabled_ == enabled) {
+    return 0;
+  }
+  playout_enabled_ = enabled;
+  if (shared_->NumOfPlayingChannels() == 0) {
+    // If there are no channels attempting to play out yet, there's nothing to
+    // be done; we should be in a "not playing out" state either way.
+    return 0;
+  }
+  int32_t ret;
+  if (enabled) {
+    ret = shared_->audio_device()->StartPlayout();
+    if (ret != 0) {
+      LOG(LS_ERROR) << "SetPlayout(true) failed to start playout";
+    }
+  } else {
+    ret = shared_->audio_device()->StopPlayout();
+    if (ret != 0) {
+      LOG(LS_ERROR) << "SetPlayout(false) failed to stop playout";
+    }
+  }
+  return ret;
+}
+
+int32_t VoEBaseImpl::SetRecording(bool enabled) {
+  LOG(INFO) << "SetRecording(" << enabled << ")";
+  if (recording_enabled_ == enabled) {
+    return 0;
+  }
+  recording_enabled_ = enabled;
+  if (shared_->NumOfSendingChannels() == 0) {
+    // If there are no channels attempting to record out yet, there's nothing to
+    // be done; we should be in a "not recording" state either way.
+    return 0;
+  }
+  int32_t ret;
+  if (enabled) {
+    ret = shared_->audio_device()->StartRecording();
+    if (ret != 0) {
+      LOG(LS_ERROR) << "SetRecording(true) failed to start recording";
+    }
+  } else {
+    ret = shared_->audio_device()->StopRecording();
+    if (ret != 0) {
+      LOG(LS_ERROR) << "SetRecording(false) failed to stop recording";
+    }
+  }
+  return ret;
+}
+
 int32_t VoEBaseImpl::TerminateInternal() {
   // Delete any remaining channel objects
   shared_->channel_manager().DestroyAllChannels();
diff --git a/voice_engine/voe_base_impl.h b/voice_engine/voe_base_impl.h
index a3c4c1f..e647124 100644
--- a/voice_engine/voe_base_impl.h
+++ b/voice_engine/voe_base_impl.h
@@ -45,6 +45,9 @@
   int StopPlayout(int channel) override;
   int StopSend(int channel) override;
 
+  int SetPlayout(bool enabled) override;
+  int SetRecording(bool enabled) override;
+
   AudioTransport* audio_transport() override { return this; }
 
   // AudioTransport
@@ -103,6 +106,8 @@
 
   AudioFrame audioFrame_;
   voe::SharedData* shared_;
+  bool playout_enabled_ = true;
+  bool recording_enabled_ = true;
 };
 
 }  // namespace webrtc