Reland of "Choose between APM-AGC-Limiter and Apm-AGC2-fixed-gain_controller."

The webrtc::AudioMixer uses a limiter component. This CL allows
changes the APM-AGC limiter to the APM-AGC2 limiter though a Chrome
field trial.

The AGC2 limiter has a float interface. We plan to eventually switch
to the AGC2 limiter. Therefore, we will now mix in de-interleaved
floats. Float mixing will happen both when using the old limiter and
when using the new one.

After this CL the mixer will support two limiters. The limiters have
different interfaces and need different processing steps. Because of
that, we make (rather big) changes to the control flow in
FrameCombiner. For a short while, we will mix in deinterleaved floats
when using any limiter.

Originally landed in https://webrtc-review.googlesource.com/c/src/+/56141/

Reverted in https://webrtc-review.googlesource.com/c/src/+/57940
because of both breaking compilation and having a severe error. The
error is fixed and a test is added. The compilation issue is fixed.

Bug: webrtc:8925
Change-Id: Ieba138dee9652c826459fe637ae2dccbbc06bcf0
Reviewed-on: https://webrtc-review.googlesource.com/58085
Reviewed-by: Gustaf Ullberg <gustaf@webrtc.org>
Commit-Queue: Alex Loiko <aleloi@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22207}
diff --git a/modules/audio_mixer/BUILD.gn b/modules/audio_mixer/BUILD.gn
index 2758dc7..9b74f5d 100644
--- a/modules/audio_mixer/BUILD.gn
+++ b/modules/audio_mixer/BUILD.gn
@@ -32,6 +32,8 @@
     "frame_combiner.h",
   ]
 
+  configs += [ "../audio_processing:apm_debug_dump" ]
+
   deps = [
     ":audio_frame_manipulator",
     "..:module_api",
@@ -40,10 +42,15 @@
     "../../api:array_view",
     "../../api/audio:audio_mixer_api",
     "../../audio/utility:audio_frame_operations",
+    "../../common_audio",
     "../../rtc_base:checks",
     "../../rtc_base:rtc_base_approved",
     "../../system_wrappers",
+    "../../system_wrappers:field_trial_api",
     "../audio_processing",
+    "../audio_processing:apm_logging",
+    "../audio_processing:audio_frame_view",
+    "../audio_processing/agc2:agc2",
   ]
 }
 
@@ -79,6 +86,7 @@
       "sine_wave_generator.cc",
       "sine_wave_generator.h",
     ]
+
     deps = [
       ":audio_frame_manipulator",
       ":audio_mixer_impl",
diff --git a/modules/audio_mixer/audio_mixer_impl.cc b/modules/audio_mixer/audio_mixer_impl.cc
index 0940c59..de0f1c3 100644
--- a/modules/audio_mixer/audio_mixer_impl.cc
+++ b/modules/audio_mixer/audio_mixer_impl.cc
@@ -19,6 +19,7 @@
 #include "modules/audio_mixer/default_output_rate_calculator.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/refcountedobject.h"
+#include "system_wrappers/include/field_trial.h"
 
 namespace webrtc {
 namespace {
@@ -88,6 +89,17 @@
         return p->audio_source == audio_source;
       });
 }
+
+FrameCombiner::LimiterType ChooseLimiterType(bool use_limiter) {
+  using LimiterType = FrameCombiner::LimiterType;
+  if (!use_limiter) {
+    return LimiterType::kNoLimiter;
+  } else if (field_trial::IsEnabled("WebRTC-ApmGainController2Limiter")) {
+    return LimiterType::kApmAgc2Limiter;
+  } else {
+    return LimiterType::kApmAgcLimiter;
+  }
+}
 }  // namespace
 
 AudioMixerImpl::AudioMixerImpl(
@@ -97,7 +109,7 @@
       output_frequency_(0),
       sample_size_(0),
       audio_source_list_(),
-      frame_combiner_(use_limiter) {}
+      frame_combiner_(ChooseLimiterType(use_limiter)) {}
 
 AudioMixerImpl::~AudioMixerImpl() {}
 
diff --git a/modules/audio_mixer/frame_combiner.cc b/modules/audio_mixer/frame_combiner.cc
index 7c671ec..6d26fdd 100644
--- a/modules/audio_mixer/frame_combiner.cc
+++ b/modules/audio_mixer/frame_combiner.cc
@@ -13,12 +13,13 @@
 #include <algorithm>
 #include <array>
 #include <functional>
-#include <memory>
 
 #include "api/array_view.h"
 #include "audio/utility/audio_frame_operations.h"
+#include "common_audio/include/audio_util.h"
 #include "modules/audio_mixer/audio_frame_manipulator.h"
 #include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 
@@ -26,113 +27,10 @@
 namespace {
 
 // Stereo, 48 kHz, 10 ms.
-constexpr int kMaximalFrameSize = 2 * 48 * 10;
+constexpr int kMaximumAmountOfChannels = 2;
+constexpr int kMaximumChannelSize = 48 * AudioMixerImpl::kFrameDurationInMs;
 
-void CombineZeroFrames(bool use_limiter,
-                       AudioProcessing* limiter,
-                       AudioFrame* audio_frame_for_mixing) {
-  audio_frame_for_mixing->elapsed_time_ms_ = -1;
-  AudioFrameOperations::Mute(audio_frame_for_mixing);
-  // The limiter should still process a zero frame to avoid jumps in
-  // its gain curve.
-  if (use_limiter) {
-    RTC_DCHECK(limiter);
-    // The limiter smoothly increases frames with half gain to full
-    // volume.  Here there's no need to apply half gain, since the frame
-    // is zero anyway.
-    limiter->ProcessStream(audio_frame_for_mixing);
-  }
-}
-
-void CombineOneFrame(const AudioFrame* input_frame,
-                     bool use_limiter,
-                     AudioProcessing* limiter,
-                     AudioFrame* audio_frame_for_mixing) {
-  audio_frame_for_mixing->timestamp_ = input_frame->timestamp_;
-  audio_frame_for_mixing->elapsed_time_ms_ = input_frame->elapsed_time_ms_;
-  // TODO(yujo): can we optimize muted frames?
-  std::copy(input_frame->data(),
-            input_frame->data() +
-                input_frame->num_channels_ * input_frame->samples_per_channel_,
-            audio_frame_for_mixing->mutable_data());
-  if (use_limiter) {
-    AudioFrameOperations::ApplyHalfGain(audio_frame_for_mixing);
-    RTC_DCHECK(limiter);
-    limiter->ProcessStream(audio_frame_for_mixing);
-    AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing);
-  }
-}
-
-// Lower-level helper function called from Combine(...) when there
-// are several input frames.
-//
-// TODO(aleloi): change interface to ArrayView<int16_t> output_frame
-// once we have gotten rid of the APM limiter.
-//
-// Only the 'data' field of output_frame should be modified. The
-// rest are used for potentially sending the output to the APM
-// limiter.
-void CombineMultipleFrames(
-    const std::vector<rtc::ArrayView<const int16_t>>& input_frames,
-    bool use_limiter,
-    AudioProcessing* limiter,
-    AudioFrame* audio_frame_for_mixing) {
-  RTC_DCHECK(!input_frames.empty());
-  RTC_DCHECK(audio_frame_for_mixing);
-
-  const size_t frame_length = input_frames.front().size();
-  for (const auto& frame : input_frames) {
-    RTC_DCHECK_EQ(frame_length, frame.size());
-  }
-
-  // Algorithm: int16 frames are added to a sufficiently large
-  // statically allocated int32 buffer. For > 2 participants this is
-  // more efficient than addition in place in the int16 audio
-  // frame. The audio quality loss due to halving the samples is
-  // smaller than 16-bit addition in place.
-  RTC_DCHECK_GE(kMaximalFrameSize, frame_length);
-  std::array<int32_t, kMaximalFrameSize> add_buffer;
-
-  add_buffer.fill(0);
-
-  for (const auto& frame : input_frames) {
-    // TODO(yujo): skip this for muted frames.
-    std::transform(frame.begin(), frame.end(), add_buffer.begin(),
-                   add_buffer.begin(), std::plus<int32_t>());
-  }
-
-  if (use_limiter) {
-    // Halve all samples to avoid saturation before limiting.
-    std::transform(add_buffer.begin(), add_buffer.begin() + frame_length,
-                   audio_frame_for_mixing->mutable_data(), [](int32_t a) {
-                     return rtc::saturated_cast<int16_t>(a / 2);
-                   });
-
-    // Smoothly limit the audio.
-    RTC_DCHECK(limiter);
-    const int error = limiter->ProcessStream(audio_frame_for_mixing);
-    if (error != limiter->kNoError) {
-      RTC_LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error;
-      RTC_NOTREACHED();
-    }
-
-    // And now we can safely restore the level. This procedure results in
-    // some loss of resolution, deemed acceptable.
-    //
-    // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
-    // and compression gain of 6 dB). However, in the transition frame when this
-    // is enabled (moving from one to two audio sources) it has the potential to
-    // create discontinuities in the mixed frame.
-    //
-    // Instead we double the frame (with addition since left-shifting a
-    // negative value is undefined).
-    AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing);
-  } else {
-    std::transform(add_buffer.begin(), add_buffer.begin() + frame_length,
-                   audio_frame_for_mixing->mutable_data(),
-                   [](int32_t a) { return rtc::saturated_cast<int16_t>(a); });
-  }
-}
+using OneChannelBuffer = std::array<float, kMaximumChannelSize>;
 
 std::unique_ptr<AudioProcessing> CreateLimiter() {
   Config config;
@@ -141,7 +39,6 @@
   std::unique_ptr<AudioProcessing> limiter(
       AudioProcessingBuilder().Create(config));
   RTC_DCHECK(limiter);
-
   webrtc::AudioProcessing::Config apm_config;
   apm_config.residual_echo_detector.enabled = false;
   limiter->ApplyConfig(apm_config);
@@ -160,13 +57,143 @@
   check_no_error(gain_control->set_compression_gain_db(0));
   check_no_error(gain_control->enable_limiter(true));
   check_no_error(gain_control->Enable(true));
+
   return limiter;
 }
+
+void SetAudioFrameFields(const std::vector<AudioFrame*>& mix_list,
+                         size_t number_of_channels,
+                         int sample_rate,
+                         size_t number_of_streams,
+                         AudioFrame* audio_frame_for_mixing) {
+  const size_t samples_per_channel = static_cast<size_t>(
+      (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
+
+  // TODO(minyue): Issue bugs.webrtc.org/3390.
+  // Audio frame timestamp. The 'timestamp_' field is set to dummy
+  // value '0', because it is only supported in the one channel case and
+  // is then updated in the helper functions.
+  audio_frame_for_mixing->UpdateFrame(
+      0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
+      AudioFrame::kVadUnknown, number_of_channels);
+
+  if (mix_list.empty()) {
+    audio_frame_for_mixing->elapsed_time_ms_ = -1;
+  } else if (mix_list.size() == 1) {
+    audio_frame_for_mixing->timestamp_ = mix_list[0]->timestamp_;
+    audio_frame_for_mixing->elapsed_time_ms_ = mix_list[0]->elapsed_time_ms_;
+  }
+}
+
+void MixFewFramesWithNoLimiter(const std::vector<AudioFrame*>& mix_list,
+                               AudioFrame* audio_frame_for_mixing) {
+  if (mix_list.empty()) {
+    audio_frame_for_mixing->Mute();
+    return;
+  }
+  RTC_DCHECK_LE(mix_list.size(), 1);
+  std::copy(mix_list[0]->data(),
+            mix_list[0]->data() +
+                mix_list[0]->num_channels_ * mix_list[0]->samples_per_channel_,
+            audio_frame_for_mixing->mutable_data());
+}
+
+std::array<OneChannelBuffer, kMaximumAmountOfChannels> MixToFloatFrame(
+    const std::vector<AudioFrame*>& mix_list,
+    size_t samples_per_channel,
+    size_t number_of_channels) {
+  // Convert to FloatS16 and mix.
+  using OneChannelBuffer = std::array<float, kMaximumChannelSize>;
+  std::array<OneChannelBuffer, kMaximumAmountOfChannels> mixing_buffer{};
+
+  for (size_t i = 0; i < mix_list.size(); ++i) {
+    const AudioFrame* const frame = mix_list[i];
+    for (size_t j = 0; j < number_of_channels; ++j) {
+      for (size_t k = 0; k < samples_per_channel; ++k) {
+        mixing_buffer[j][k] += frame->data()[number_of_channels * k + j];
+      }
+    }
+  }
+  return mixing_buffer;
+}
+
+void RunApmAgcLimiter(AudioFrameView<float> mixing_buffer_view,
+                      AudioProcessing* apm_agc_limiter) {
+  // Halve all samples to avoid saturation before limiting. The input
+  // format of APM is Float. Convert the samples from FloatS16 to
+  // Float.
+  for (size_t i = 0; i < mixing_buffer_view.num_channels(); ++i) {
+    std::transform(mixing_buffer_view.channel(i).begin(),
+                   mixing_buffer_view.channel(i).end(),
+                   mixing_buffer_view.channel(i).begin(),
+                   [](float a) { return FloatS16ToFloat(a / 2); });
+  }
+
+  const int sample_rate =
+      static_cast<int>(mixing_buffer_view.samples_per_channel()) * 1000 /
+      AudioMixerImpl::kFrameDurationInMs;
+  StreamConfig processing_config(sample_rate,
+                                 mixing_buffer_view.num_channels());
+
+  // Smoothly limit the audio.
+  apm_agc_limiter->ProcessStream(mixing_buffer_view.data(), processing_config,
+                                 processing_config, mixing_buffer_view.data());
+
+  // And now we can safely restore the level. This procedure results in
+  // some loss of resolution, deemed acceptable.
+  //
+  // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
+  // and compression gain of 6 dB). However, in the transition frame when this
+  // is enabled (moving from one to two audio sources) it has the potential to
+  // create discontinuities in the mixed frame.
+  //
+  // Instead we double the samples in the frame..
+  // Also convert the samples back to FloatS16.
+  for (size_t i = 0; i < mixing_buffer_view.num_channels(); ++i) {
+    std::transform(mixing_buffer_view.channel(i).begin(),
+                   mixing_buffer_view.channel(i).end(),
+                   mixing_buffer_view.channel(i).begin(),
+                   [](float a) { return FloatToFloatS16(a * 2); });
+  }
+}
+
+void RunApmAgc2Limiter(AudioFrameView<float> mixing_buffer_view,
+                       FixedGainController* apm_agc2_limiter) {
+  const size_t sample_rate = mixing_buffer_view.samples_per_channel() * 1000 /
+                             AudioMixerImpl::kFrameDurationInMs;
+  apm_agc2_limiter->SetSampleRate(sample_rate);
+  apm_agc2_limiter->Process(mixing_buffer_view);
+}
+
+// Both interleaves and rounds.
+void InterleaveToAudioFrame(AudioFrameView<const float> mixing_buffer_view,
+                            AudioFrame* audio_frame_for_mixing) {
+  const size_t number_of_channels = mixing_buffer_view.num_channels();
+  const size_t samples_per_channel = mixing_buffer_view.samples_per_channel();
+  // Put data in the result frame.
+  for (size_t i = 0; i < number_of_channels; ++i) {
+    for (size_t j = 0; j < samples_per_channel; ++j) {
+      audio_frame_for_mixing->mutable_data()[number_of_channels * j + i] =
+          FloatS16ToS16(mixing_buffer_view.channel(i)[j]);
+    }
+  }
+}
 }  // namespace
 
-FrameCombiner::FrameCombiner(bool use_apm_limiter)
-    : use_apm_limiter_(use_apm_limiter),
-      limiter_(use_apm_limiter ? CreateLimiter() : nullptr) {}
+FrameCombiner::FrameCombiner(LimiterType limiter_type)
+    : limiter_type_(limiter_type),
+      apm_agc_limiter_(limiter_type_ == LimiterType::kApmAgcLimiter
+                           ? CreateLimiter()
+                           : nullptr),
+      data_dumper_(new ApmDataDumper(0)),
+      apm_agc2_limiter_(data_dumper_.get()) {
+  apm_agc2_limiter_.SetGain(0.f);
+  apm_agc2_limiter_.EnableLimiter(true);
+}
+
+FrameCombiner::FrameCombiner(bool use_limiter)
+    : FrameCombiner(use_limiter ? LimiterType::kApmAgcLimiter
+                                : LimiterType::kNoLimiter) {}
 
 FrameCombiner::~FrameCombiner() = default;
 
@@ -174,8 +201,11 @@
                             size_t number_of_channels,
                             int sample_rate,
                             size_t number_of_streams,
-                            AudioFrame* audio_frame_for_mixing) const {
+                            AudioFrame* audio_frame_for_mixing) {
   RTC_DCHECK(audio_frame_for_mixing);
+  SetAudioFrameFields(mix_list, number_of_channels, sample_rate,
+                      number_of_streams, audio_frame_for_mixing);
+
   const size_t samples_per_channel = static_cast<size_t>(
       (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
 
@@ -184,36 +214,35 @@
     RTC_DCHECK_EQ(sample_rate, frame->sample_rate_hz_);
   }
 
-  // Frames could be both stereo and mono.
+  // The 'num_channels_' field of frames in 'mix_list' could be
+  // different from 'number_of_channels'.
   for (auto* frame : mix_list) {
     RemixFrame(number_of_channels, frame);
   }
 
-  // TODO(aleloi): Issue bugs.webrtc.org/3390.
-  // Audio frame timestamp. The 'timestamp_' field is set to dummy
-  // value '0', because it is only supported in the one channel case and
-  // is then updated in the helper functions.
-  audio_frame_for_mixing->UpdateFrame(
-      0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
-      AudioFrame::kVadUnknown, number_of_channels);
-
-  const bool use_limiter_this_round = use_apm_limiter_ && number_of_streams > 1;
-
-  if (mix_list.empty()) {
-    CombineZeroFrames(use_limiter_this_round, limiter_.get(),
-                      audio_frame_for_mixing);
-  } else if (mix_list.size() == 1) {
-    CombineOneFrame(mix_list.front(), use_limiter_this_round, limiter_.get(),
-                    audio_frame_for_mixing);
-  } else {
-    std::vector<rtc::ArrayView<const int16_t>> input_frames;
-    for (size_t i = 0; i < mix_list.size(); ++i) {
-      input_frames.push_back(rtc::ArrayView<const int16_t>(
-          mix_list[i]->data(), samples_per_channel * number_of_channels));
-    }
-    CombineMultipleFrames(input_frames, use_limiter_this_round, limiter_.get(),
-                          audio_frame_for_mixing);
+  if (number_of_streams <= 1) {
+    MixFewFramesWithNoLimiter(mix_list, audio_frame_for_mixing);
+    return;
   }
+
+  std::array<OneChannelBuffer, kMaximumAmountOfChannels> mixing_buffer =
+      MixToFloatFrame(mix_list, samples_per_channel, number_of_channels);
+
+  // Put float data in an AudioFrameView.
+  std::array<float*, kMaximumAmountOfChannels> channel_pointers{};
+  for (size_t i = 0; i < number_of_channels; ++i) {
+    channel_pointers[i] = &mixing_buffer[i][0];
+  }
+  AudioFrameView<float> mixing_buffer_view(
+      &channel_pointers[0], number_of_channels, samples_per_channel);
+
+  if (limiter_type_ == LimiterType::kApmAgcLimiter) {
+    RunApmAgcLimiter(mixing_buffer_view, apm_agc_limiter_.get());
+  } else if (limiter_type_ == LimiterType::kApmAgc2Limiter) {
+    RunApmAgc2Limiter(mixing_buffer_view, &apm_agc2_limiter_);
+  }
+
+  InterleaveToAudioFrame(mixing_buffer_view, audio_frame_for_mixing);
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_mixer/frame_combiner.h b/modules/audio_mixer/frame_combiner.h
index 88ab0d7..3a60322 100644
--- a/modules/audio_mixer/frame_combiner.h
+++ b/modules/audio_mixer/frame_combiner.h
@@ -14,14 +14,19 @@
 #include <memory>
 #include <vector>
 
+#include "modules/audio_processing/agc2/fixed_gain_controller.h"
 #include "modules/audio_processing/include/audio_processing.h"
 #include "modules/include/module_common_types.h"
 
 namespace webrtc {
+class ApmDataDumper;
+class FixedGainController;
 
 class FrameCombiner {
  public:
-  explicit FrameCombiner(bool use_apm_limiter);
+  enum class LimiterType { kNoLimiter, kApmAgcLimiter, kApmAgc2Limiter };
+  explicit FrameCombiner(LimiterType limiter_type);
+  explicit FrameCombiner(bool use_limiter);
   ~FrameCombiner();
 
   // Combine several frames into one. Assumes sample_rate,
@@ -34,11 +39,13 @@
                size_t number_of_channels,
                int sample_rate,
                size_t number_of_streams,
-               AudioFrame* audio_frame_for_mixing) const;
+               AudioFrame* audio_frame_for_mixing);
 
  private:
-  const bool use_apm_limiter_;
-  std::unique_ptr<AudioProcessing> limiter_;
+  const LimiterType limiter_type_;
+  std::unique_ptr<AudioProcessing> apm_agc_limiter_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  FixedGainController apm_agc2_limiter_;
 };
 }  // namespace webrtc
 
diff --git a/modules/audio_mixer/frame_combiner_unittest.cc b/modules/audio_mixer/frame_combiner_unittest.cc
index 490e99e..b78e981 100644
--- a/modules/audio_mixer/frame_combiner_unittest.cc
+++ b/modules/audio_mixer/frame_combiner_unittest.cc
@@ -23,6 +23,15 @@
 namespace webrtc {
 
 namespace {
+using LimiterType = FrameCombiner::LimiterType;
+using NativeRate = AudioProcessing::NativeRate;
+struct FrameCombinerConfig {
+  LimiterType limiter_type;
+  NativeRate sample_rate_hz;
+  int number_of_channels;
+  float wave_frequency;
+};
+
 std::string ProduceDebugText(int sample_rate_hz,
                              int number_of_channels,
                              int number_of_sources) {
@@ -33,17 +42,18 @@
   return ss.str();
 }
 
-std::string ProduceDebugText(int sample_rate_hz,
-                             int number_of_channels,
-                             int number_of_sources,
-                             bool limiter_active,
-                             float wave_frequency) {
+std::string ProduceDebugText(const FrameCombinerConfig& config) {
   std::ostringstream ss;
-  ss << "Sample rate: " << sample_rate_hz << " ,";
-  ss << "number of channels: " << number_of_channels << " ,";
-  ss << "number of sources: " << number_of_sources << " ,";
-  ss << "limiter active: " << (limiter_active ? "true" : "false") << " ,";
-  ss << "wave frequency: " << wave_frequency << " ,";
+  ss << "Sample rate: " << config.sample_rate_hz << " ,";
+  ss << "number of channels: " << config.number_of_channels << " ,";
+  ss << "limiter active: "
+     << (config.limiter_type == LimiterType::kNoLimiter
+             ? "off"
+
+             : (config.limiter_type == LimiterType::kApmAgcLimiter ? "agc1"
+                                                                   : "agc2"))
+     << " ,";
+  ss << "wave frequency: " << config.wave_frequency << " ,";
   return ss.str();
 }
 
@@ -61,7 +71,7 @@
 }  // namespace
 
 TEST(FrameCombiner, BasicApiCallsLimiter) {
-  FrameCombiner combiner(true);
+  FrameCombiner combiner(LimiterType::kApmAgcLimiter);
   for (const int rate : {8000, 16000, 32000, 48000}) {
     for (const int number_of_channels : {1, 2}) {
       const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
@@ -83,7 +93,7 @@
 // on rate. The rate has to be divisible by 100 since we use
 // 10 ms frames, though.
 TEST(FrameCombiner, BasicApiCallsNoLimiter) {
-  FrameCombiner combiner(false);
+  FrameCombiner combiner(LimiterType::kNoLimiter);
   for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
     for (const int number_of_channels : {1, 2}) {
       const std::vector<AudioFrame*> all_frames = {&frame1, &frame2};
@@ -102,7 +112,7 @@
 }
 
 TEST(FrameCombiner, CombiningZeroFramesShouldProduceSilence) {
-  FrameCombiner combiner(false);
+  FrameCombiner combiner(LimiterType::kNoLimiter);
   for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
     for (const int number_of_channels : {1, 2}) {
       SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 0));
@@ -124,7 +134,7 @@
 }
 
 TEST(FrameCombiner, CombiningOneFrameShouldNotChangeFrame) {
-  FrameCombiner combiner(false);
+  FrameCombiner combiner(LimiterType::kNoLimiter);
   for (const int rate : {8000, 10000, 11000, 32000, 44100}) {
     for (const int number_of_channels : {1, 2}) {
       SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 1));
@@ -150,56 +160,65 @@
 }
 
 // Send a sine wave through the FrameCombiner, and check that the
-// difference between input and output varies smoothly. This is to
-// catch issues like chromium:695993.
+// difference between input and output varies smoothly. Also check
+// that it is inside reasonable bounds. This is to catch issues like
+// chromium:695993 and chromium:816875.
 TEST(FrameCombiner, GainCurveIsSmoothForAlternatingNumberOfStreams) {
-  // Test doesn't work with rates requiring a band split, because it
-  // introduces a small delay measured in single samples, and this
-  // test cannot handle it.
-  //
-  // TODO(aleloi): Add more rates when APM limiter doesn't use band
-  // split.
-  for (const bool use_limiter : {true, false}) {
-    for (const int rate : {8000, 16000}) {
-      constexpr int number_of_channels = 2;
-      for (const float wave_frequency : {50, 400, 3200}) {
-        SCOPED_TRACE(ProduceDebugText(rate, number_of_channels, 1, use_limiter,
-                                      wave_frequency));
+  std::vector<FrameCombinerConfig> configs = {
+      {LimiterType::kNoLimiter, NativeRate::kSampleRate32kHz, 2, 50.f},
+      {LimiterType::kNoLimiter, NativeRate::kSampleRate16kHz, 1, 3200.f},
+      {LimiterType::kApmAgcLimiter, NativeRate::kSampleRate8kHz, 1, 3200.f},
+      {LimiterType::kApmAgcLimiter, NativeRate::kSampleRate16kHz, 1, 50.f},
+      {LimiterType::kApmAgcLimiter, NativeRate::kSampleRate16kHz, 2, 3200.f},
+      {LimiterType::kApmAgcLimiter, NativeRate::kSampleRate8kHz, 2, 50.f},
+      {LimiterType::kApmAgc2Limiter, NativeRate::kSampleRate8kHz, 1, 3200.f},
+      {LimiterType::kApmAgc2Limiter, NativeRate::kSampleRate32kHz, 1, 50.f},
+      {LimiterType::kApmAgc2Limiter, NativeRate::kSampleRate48kHz, 2, 3200.f},
+  };
 
-        FrameCombiner combiner(use_limiter);
+  for (const auto& config : configs) {
+    SCOPED_TRACE(ProduceDebugText(config));
 
-        constexpr int16_t wave_amplitude = 30000;
-        SineWaveGenerator wave_generator(wave_frequency, wave_amplitude);
+    FrameCombiner combiner(config.limiter_type);
 
-        GainChangeCalculator change_calculator;
-        float cumulative_change = 0.f;
+    constexpr int16_t wave_amplitude = 30000;
+    SineWaveGenerator wave_generator(config.wave_frequency, wave_amplitude);
 
-        constexpr size_t iterations = 100;
+    GainChangeCalculator change_calculator;
+    float cumulative_change = 0.f;
 
-        for (size_t i = 0; i < iterations; ++i) {
-          SetUpFrames(rate, number_of_channels);
-          wave_generator.GenerateNextFrame(&frame1);
-          AudioFrameOperations::Mute(&frame2);
+    constexpr size_t iterations = 100;
 
-          std::vector<AudioFrame*> frames_to_combine = {&frame1};
-          if (i % 2 == 0) {
-            frames_to_combine.push_back(&frame2);
-          }
-          const size_t number_of_samples =
-              frame1.samples_per_channel_ * number_of_channels;
+    for (size_t i = 0; i < iterations; ++i) {
+      SetUpFrames(config.sample_rate_hz, config.number_of_channels);
+      wave_generator.GenerateNextFrame(&frame1);
+      AudioFrameOperations::Mute(&frame2);
 
-          // Ensures limiter is on if 'use_limiter'.
-          constexpr size_t number_of_streams = 2;
-          combiner.Combine(frames_to_combine, number_of_channels, rate,
-                           number_of_streams, &audio_frame_for_mixing);
-          cumulative_change += change_calculator.CalculateGainChange(
-              rtc::ArrayView<const int16_t>(frame1.data(), number_of_samples),
-              rtc::ArrayView<const int16_t>(audio_frame_for_mixing.data(),
-                                            number_of_samples));
-        }
-        RTC_DCHECK_LT(cumulative_change, 10);
+      std::vector<AudioFrame*> frames_to_combine = {&frame1};
+      if (i % 2 == 0) {
+        frames_to_combine.push_back(&frame2);
       }
+      const size_t number_of_samples =
+          frame1.samples_per_channel_ * config.number_of_channels;
+
+      // Ensures limiter is on if 'use_limiter'.
+      constexpr size_t number_of_streams = 2;
+      combiner.Combine(frames_to_combine, config.number_of_channels,
+                       config.sample_rate_hz, number_of_streams,
+                       &audio_frame_for_mixing);
+      cumulative_change += change_calculator.CalculateGainChange(
+          rtc::ArrayView<const int16_t>(frame1.data(), number_of_samples),
+          rtc::ArrayView<const int16_t>(audio_frame_for_mixing.data(),
+                                        number_of_samples));
     }
+
+    // Check that the gain doesn't vary too much.
+    EXPECT_LT(cumulative_change, 10);
+
+    // Check that the latest gain is within reasonable bounds. It
+    // should be slightly less that 1.
+    EXPECT_LT(0.9f, change_calculator.LatestGain());
+    EXPECT_LT(change_calculator.LatestGain(), 1.01f);
   }
 }
 }  // namespace webrtc
diff --git a/modules/audio_mixer/gain_change_calculator.cc b/modules/audio_mixer/gain_change_calculator.cc
index ae7c6d5..6472793 100644
--- a/modules/audio_mixer/gain_change_calculator.cc
+++ b/modules/audio_mixer/gain_change_calculator.cc
@@ -29,6 +29,10 @@
   return CalculateDifferences(gain);
 }
 
+float GainChangeCalculator::LatestGain() const {
+  return last_reliable_gain_;
+}
+
 void GainChangeCalculator::CalculateGain(rtc::ArrayView<const int16_t> in,
                                          rtc::ArrayView<const int16_t> out,
                                          rtc::ArrayView<float> gain) {
diff --git a/modules/audio_mixer/gain_change_calculator.h b/modules/audio_mixer/gain_change_calculator.h
index fe7957c..1d7f151 100644
--- a/modules/audio_mixer/gain_change_calculator.h
+++ b/modules/audio_mixer/gain_change_calculator.h
@@ -23,6 +23,8 @@
   float CalculateGainChange(rtc::ArrayView<const int16_t> in,
                             rtc::ArrayView<const int16_t> out);
 
+  float LatestGain() const;
+
  private:
   void CalculateGain(rtc::ArrayView<const int16_t> in,
                      rtc::ArrayView<const int16_t> out,
diff --git a/modules/audio_processing/agc2/gain_curve_applier.cc b/modules/audio_processing/agc2/gain_curve_applier.cc
index 1610c4a..122839a 100644
--- a/modules/audio_processing/agc2/gain_curve_applier.cc
+++ b/modules/audio_processing/agc2/gain_curve_applier.cc
@@ -15,6 +15,7 @@
 #include <cmath>
 
 #include "api/array_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/checks.h"
 
 namespace webrtc {
diff --git a/modules/audio_processing/agc2/gain_curve_applier_unittest.cc b/modules/audio_processing/agc2/gain_curve_applier_unittest.cc
index a7cb1b6..d9179a4 100644
--- a/modules/audio_processing/agc2/gain_curve_applier_unittest.cc
+++ b/modules/audio_processing/agc2/gain_curve_applier_unittest.cc
@@ -14,6 +14,7 @@
 #include "modules/audio_processing/agc2/agc2_common.h"
 #include "modules/audio_processing/agc2/agc2_testing_common.h"
 #include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/gunit.h"
 
 namespace webrtc {
diff --git a/modules/audio_processing/agc2/interpolated_gain_curve.h b/modules/audio_processing/agc2/interpolated_gain_curve.h
index de79c14..533afe2 100644
--- a/modules/audio_processing/agc2/interpolated_gain_curve.h
+++ b/modules/audio_processing/agc2/interpolated_gain_curve.h
@@ -14,8 +14,8 @@
 #include <array>
 
 #include "modules/audio_processing/agc2/agc2_common.h"
-#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/basictypes.h"
+#include "rtc_base/constructormagic.h"
 #include "rtc_base/gtest_prod_util.h"
 
 namespace webrtc {
@@ -114,7 +114,7 @@
   // Stats.
   mutable Stats stats_;
 
-  // RTC_DISALLOW_COPY_AND_ASSIGN(InterpolatedGainCurve);
+  RTC_DISALLOW_COPY_AND_ASSIGN(InterpolatedGainCurve);
 };
 
 }  // namespace webrtc