Add a wrapper around PushSincResampler and the old Resampler.

The old resampler is used whenever it supports the requested rates. Otherwise
the sinc resampler is enabled.

Integrated with output_mixer in order to test the change through
output_mixer_unittest. The sinc resampler will not yet be used, since we don't
feed VoE with any rates that trigger it.

BUG=webrtc:1395
R=bjornv@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1355004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@3915 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/common_audio/audio_util.cc b/webrtc/common_audio/audio_util.cc
new file mode 100644
index 0000000..a6114fd
--- /dev/null
+++ b/webrtc/common_audio/audio_util.cc
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_audio/include/audio_util.h"
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+void Deinterleave(const int16_t* interleaved, int samples_per_channel,
+                  int num_channels, int16_t** deinterleaved) {
+  for (int i = 0; i < num_channels; i++) {
+    int16_t* channel = deinterleaved[i];
+    int interleaved_idx = i;
+    for (int j = 0; j < samples_per_channel; j++) {
+      channel[j] = interleaved[interleaved_idx];
+      interleaved_idx += num_channels;
+    }
+  }
+}
+
+void Interleave(const int16_t* const* deinterleaved, int samples_per_channel,
+                int num_channels, int16_t* interleaved) {
+  for (int i = 0; i < num_channels; ++i) {
+    const int16_t* channel = deinterleaved[i];
+    int interleaved_idx = i;
+    for (int j = 0; j < samples_per_channel; j++) {
+      interleaved[interleaved_idx] = channel[j];
+      interleaved_idx += num_channels;
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/webrtc/common_audio/audio_util_unittest.cc b/webrtc/common_audio/audio_util_unittest.cc
new file mode 100644
index 0000000..9ffed73
--- /dev/null
+++ b/webrtc/common_audio/audio_util_unittest.cc
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common_audio/include/audio_util.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+void ExpectArraysEq(const int16_t* ref, const int16_t* test, int length) {
+  for (int i = 0; i < length; ++i) {
+    EXPECT_EQ(test[i], ref[i]);
+  }
+}
+
+TEST(AudioUtilTest, InterleavingStereo) {
+  const int16_t kInterleaved[] = {2, 3, 4, 9, 8, 27, 16, 81};
+  const int kSamplesPerChannel = 4;
+  const int kNumChannels = 2;
+  const int kLength = kSamplesPerChannel * kNumChannels;
+  int16_t left[kSamplesPerChannel], right[kSamplesPerChannel];
+  int16_t* deinterleaved[] = {left, right};
+  Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
+  const int16_t kRefLeft[] = {2, 4, 8, 16};
+  const int16_t kRefRight[] = {3, 9, 27, 81};
+  ExpectArraysEq(left, kRefLeft, kSamplesPerChannel);
+  ExpectArraysEq(right, kRefRight, kSamplesPerChannel);
+
+  int16_t interleaved[kLength];
+  Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
+  ExpectArraysEq(interleaved, kInterleaved, kLength);
+}
+
+TEST(AudioUtilTest, InterleavingMonoIsIdentical) {
+  const int16_t kInterleaved[] = {1, 2, 3, 4, 5};
+  const int kSamplesPerChannel = 5;
+  const int kNumChannels = 1;
+  int16_t mono[kSamplesPerChannel];
+  int16_t* deinterleaved[] = {mono};
+  Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
+  ExpectArraysEq(mono, kInterleaved, kSamplesPerChannel);
+
+  int16_t interleaved[kSamplesPerChannel];
+  Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
+  ExpectArraysEq(interleaved, mono, kSamplesPerChannel);
+}
+
+}  // namespace webrtc
diff --git a/webrtc/common_audio/include/audio_util.h b/webrtc/common_audio/include/audio_util.h
new file mode 100644
index 0000000..2196fc3
--- /dev/null
+++ b/webrtc/common_audio/include/audio_util.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
+#define WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// Deinterleave audio from |interleaved| to the channel buffers pointed to
+// by |deinterleaved|. There must be sufficient space allocated in the
+// |deinterleaved| buffers (|num_channel| buffers with |samples_per_channel|
+// per buffer).
+void Deinterleave(const int16_t* interleaved, int samples_per_channel,
+                  int num_channels, int16_t** deinterleaved);
+
+// Interleave audio from the channel buffers pointed to by |deinterleaved| to
+// |interleaved|. There must be sufficient space allocated in |interleaved|
+// (|samples_per_channel| * |num_channels|).
+void Interleave(const int16_t* const* deinterleaved, int samples_per_channel,
+                int num_channels, int16_t* interleaved);
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
diff --git a/webrtc/common_audio/resampler/include/push_resampler.h b/webrtc/common_audio/resampler/include/push_resampler.h
new file mode 100644
index 0000000..0183f91
--- /dev/null
+++ b/webrtc/common_audio/resampler/include/push_resampler.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
+#define WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
+
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class Resampler;
+class PushSincResampler;
+
+// Wraps the old resampler and new arbitrary rate conversion resampler. The
+// old resampler will be used whenever it supports the requested rates, and
+// otherwise the sinc resampler will be enabled.
+class PushResampler {
+ public:
+  PushResampler();
+  virtual ~PushResampler();
+
+  // Must be called whenever the parameters change. Free to be called at any
+  // time as it is a no-op if parameters have not changed since the last call.
+  int InitializeIfNeeded(int src_sample_rate_hz, int dst_sample_rate_hz,
+                         int num_channels);
+
+  // Returns the total number of samples provided in destination (e.g. 32 kHz,
+  // 2 channel audio gives 640 samples).
+  int Resample(const int16_t* src, int src_length, int16_t* dst,
+               int dst_capacity);
+
+  bool use_sinc_resampler() const { return use_sinc_resampler_; }
+
+ private:
+  int ResampleSinc(const int16_t* src, int src_length, int16_t* dst,
+                   int dst_capacity);
+
+  scoped_ptr<Resampler> resampler_;
+  scoped_ptr<PushSincResampler> sinc_resampler_;
+  scoped_ptr<PushSincResampler> sinc_resampler_right_;
+  int src_sample_rate_hz_;
+  int dst_sample_rate_hz_;
+  int num_channels_;
+  bool use_sinc_resampler_;
+  scoped_array<int16_t> src_left_;
+  scoped_array<int16_t> src_right_;
+  scoped_array<int16_t> dst_left_;
+  scoped_array<int16_t> dst_right_;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
diff --git a/webrtc/common_audio/resampler/push_resampler.cc b/webrtc/common_audio/resampler/push_resampler.cc
new file mode 100644
index 0000000..6c59e0b
--- /dev/null
+++ b/webrtc/common_audio/resampler/push_resampler.cc
@@ -0,0 +1,133 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+
+#include <cstring>
+
+#include "webrtc/common_audio/include/audio_util.h"
+#include "webrtc/common_audio/resampler/include/resampler.h"
+#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
+
+namespace webrtc {
+
+PushResampler::PushResampler()
+      // Requires valid values at construction, so give it something arbitrary.
+    : resampler_(new Resampler(48000, 48000, kResamplerSynchronous)),
+      sinc_resampler_(NULL),
+      sinc_resampler_right_(NULL),
+      src_sample_rate_hz_(0),
+      dst_sample_rate_hz_(0),
+      num_channels_(0),
+      use_sinc_resampler_(false),
+      src_left_(NULL),
+      src_right_(NULL),
+      dst_left_(NULL),
+      dst_right_(NULL) {
+}
+
+PushResampler::~PushResampler() {
+}
+
+int PushResampler::InitializeIfNeeded(int src_sample_rate_hz,
+                                      int dst_sample_rate_hz,
+                                      int num_channels) {
+  if (src_sample_rate_hz == src_sample_rate_hz_ &&
+      dst_sample_rate_hz == dst_sample_rate_hz_ &&
+      num_channels == num_channels_) {
+    // No-op if settings haven't changed.
+    return 0;
+  }
+
+  if (src_sample_rate_hz <= 0 || dst_sample_rate_hz <= 0 ||
+      num_channels <= 0 || num_channels > 2) {
+    return -1;
+  }
+
+  src_sample_rate_hz_ = src_sample_rate_hz;
+  dst_sample_rate_hz_ = dst_sample_rate_hz;
+  num_channels_ = num_channels;
+
+  const ResamplerType resampler_type =
+      num_channels == 1 ? kResamplerSynchronous : kResamplerSynchronousStereo;
+  if (resampler_->Reset(src_sample_rate_hz, dst_sample_rate_hz,
+                        resampler_type) == 0) {
+    // The resampler supports these rates.
+    use_sinc_resampler_ = false;
+    return 0;
+  }
+
+  use_sinc_resampler_ = true;
+  const int src_size_10ms_mono = src_sample_rate_hz / 100;
+  const int dst_size_10ms_mono = dst_sample_rate_hz / 100;
+  sinc_resampler_.reset(new PushSincResampler(src_size_10ms_mono,
+                                              dst_size_10ms_mono));
+  if (num_channels_ == 2) {
+    src_left_.reset(new int16_t[src_size_10ms_mono]);
+    src_right_.reset(new int16_t[src_size_10ms_mono]);
+    dst_left_.reset(new int16_t[dst_size_10ms_mono]);
+    dst_right_.reset(new int16_t[dst_size_10ms_mono]);
+    sinc_resampler_right_.reset(new PushSincResampler(src_size_10ms_mono,
+                                                      dst_size_10ms_mono));
+  }
+
+  return 0;
+}
+
+int PushResampler::Resample(const int16_t* src, int src_length,
+                            int16_t* dst, int dst_capacity) {
+  const int src_size_10ms = src_sample_rate_hz_ * num_channels_ / 100;
+  const int dst_size_10ms = dst_sample_rate_hz_ * num_channels_ / 100;
+  if (src_length != src_size_10ms || dst_capacity < dst_size_10ms) {
+    return -1;
+  }
+
+  if (use_sinc_resampler_) {
+    return ResampleSinc(src, src_length, dst, dst_capacity);
+  }
+
+  int resulting_length = 0;
+  if (resampler_->Push(src, src_length, dst, dst_capacity,
+                       resulting_length) != 0) {
+    return -1;
+  }
+  return resulting_length;
+}
+
+int PushResampler::ResampleSinc(const int16_t* src, int src_length,
+                                int16_t* dst, int dst_capacity) {
+  if (src_sample_rate_hz_ == dst_sample_rate_hz_) {
+    // The old resampler provides this memcpy facility in the case of matching
+    // sample rates, so reproduce it here for the sinc resampler.
+    memcpy(dst, src, src_length * sizeof(int16_t));
+    return src_length;
+  }
+  if (num_channels_ == 2) {
+    const int src_length_mono = src_length / num_channels_;
+    const int dst_capacity_mono = dst_capacity / num_channels_;
+    int16_t* deinterleaved[] = {src_left_.get(), src_right_.get()};
+    Deinterleave(src, src_length_mono, num_channels_, deinterleaved);
+
+    int dst_length_mono =
+        sinc_resampler_->Resample(src_left_.get(), src_length_mono,
+                                  dst_left_.get(), dst_capacity_mono);
+    sinc_resampler_right_->Resample(src_right_.get(), src_length_mono,
+                                    dst_right_.get(), dst_capacity_mono);
+
+    deinterleaved[0] = dst_left_.get();
+    deinterleaved[1] = dst_right_.get();
+    Interleave(deinterleaved, dst_length_mono, num_channels_, dst);
+    return dst_length_mono * num_channels_;
+  } else {
+    return sinc_resampler_->Resample(src, src_length, dst, dst_capacity);
+  }
+}
+
+}  // namespace webrtc
diff --git a/webrtc/common_audio/resampler/push_resampler_unittest.cc b/webrtc/common_audio/resampler/push_resampler_unittest.cc
new file mode 100644
index 0000000..6b60d05
--- /dev/null
+++ b/webrtc/common_audio/resampler/push_resampler_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+
+// Quality testing of PushResampler is handled through output_mixer_unittest.cc.
+
+namespace webrtc {
+
+typedef std::tr1::tuple<int, int, bool> PushResamplerTestData;
+class PushResamplerTest
+    : public testing::TestWithParam<PushResamplerTestData> {
+ public:
+  PushResamplerTest()
+      : input_rate_(std::tr1::get<0>(GetParam())),
+        output_rate_(std::tr1::get<1>(GetParam())),
+        use_sinc_resampler_(std::tr1::get<2>(GetParam())) {
+  }
+
+  virtual ~PushResamplerTest() {}
+
+ protected:
+  int input_rate_;
+  int output_rate_;
+  bool use_sinc_resampler_;
+};
+
+TEST_P(PushResamplerTest, SincResamplerOnlyUsedWhenNecessary) {
+  PushResampler resampler;
+  resampler.InitializeIfNeeded(input_rate_, output_rate_, 1);
+  EXPECT_EQ(use_sinc_resampler_, resampler.use_sinc_resampler());
+}
+
+INSTANTIATE_TEST_CASE_P(
+    PushResamplerTest, PushResamplerTest, testing::Values(
+        // To 8 kHz
+        std::tr1::make_tuple(8000, 8000, false),
+        std::tr1::make_tuple(16000, 8000, false),
+        std::tr1::make_tuple(32000, 8000, false),
+        std::tr1::make_tuple(44100, 8000, true),
+        std::tr1::make_tuple(48000, 8000, false),
+        std::tr1::make_tuple(96000, 8000, false),
+        std::tr1::make_tuple(192000, 8000, true),
+
+        // To 16 kHz
+        std::tr1::make_tuple(8000, 16000, false),
+        std::tr1::make_tuple(16000, 16000, false),
+        std::tr1::make_tuple(32000, 16000, false),
+        std::tr1::make_tuple(44100, 16000, true),
+        std::tr1::make_tuple(48000, 16000, false),
+        std::tr1::make_tuple(96000, 16000, false),
+        std::tr1::make_tuple(192000, 16000, false),
+
+        // To 32 kHz
+        std::tr1::make_tuple(8000, 32000, false),
+        std::tr1::make_tuple(16000, 32000, false),
+        std::tr1::make_tuple(32000, 32000, false),
+        std::tr1::make_tuple(44100, 32000, true),
+        std::tr1::make_tuple(48000, 32000, false),
+        std::tr1::make_tuple(96000, 32000, false),
+        std::tr1::make_tuple(192000, 32000, false),
+
+        // To 44.1kHz
+        std::tr1::make_tuple(8000, 44100, true),
+        std::tr1::make_tuple(16000, 44100, true),
+        std::tr1::make_tuple(32000, 44100, true),
+        std::tr1::make_tuple(44100, 44100, false),
+        std::tr1::make_tuple(48000, 44100, true),
+        std::tr1::make_tuple(96000, 44100, true),
+        std::tr1::make_tuple(192000, 44100, true),
+
+        // To 48kHz
+        std::tr1::make_tuple(8000, 48000, false),
+        std::tr1::make_tuple(16000, 48000, false),
+        std::tr1::make_tuple(32000, 48000, false),
+        std::tr1::make_tuple(44100, 48000, true),
+        std::tr1::make_tuple(48000, 48000, false),
+        std::tr1::make_tuple(96000, 48000, false),
+        std::tr1::make_tuple(192000, 48000, false),
+
+        // To 96kHz
+        std::tr1::make_tuple(8000, 96000, false),
+        std::tr1::make_tuple(16000, 96000, false),
+        std::tr1::make_tuple(32000, 96000, false),
+        std::tr1::make_tuple(44100, 96000, true),
+        std::tr1::make_tuple(48000, 96000, false),
+        std::tr1::make_tuple(96000, 96000, false),
+        std::tr1::make_tuple(192000, 96000, false),
+
+        // To 192kHz
+        std::tr1::make_tuple(8000, 192000, true),
+        std::tr1::make_tuple(16000, 192000, false),
+        std::tr1::make_tuple(32000, 192000, false),
+        std::tr1::make_tuple(44100, 192000, true),
+        std::tr1::make_tuple(48000, 192000, false),
+        std::tr1::make_tuple(96000, 192000, false),
+        std::tr1::make_tuple(192000, 192000, false)));
+
+}  // namespace webrtc
diff --git a/webrtc/common_audio/resampler/resampler.gypi b/webrtc/common_audio/resampler/resampler.gypi
index ac429ee..86d9903 100644
--- a/webrtc/common_audio/resampler/resampler.gypi
+++ b/webrtc/common_audio/resampler/resampler.gypi
@@ -23,7 +23,13 @@
         ],
       },
       'sources': [
+        # TODO(ajm): Adding audio_util here for now. We should transition
+        # to having a single common_audio target.
+        '../audio_util.cc',
+        '../include/audio_util.h',
+        'include/push_resampler.h',
         'include/resampler.h',
+        'push_resampler.cc',
         'push_sinc_resampler.cc',
         'push_sinc_resampler.h',
         'resampler.cc',
@@ -45,7 +51,9 @@
             '<(DEPTH)/testing/gtest.gyp:gtest',
           ],
           'sources': [
+            '../audio_util_unittest.cc',
             'resampler_unittest.cc',
+            'push_resampler_unittest.cc',
             'push_sinc_resampler_unittest.cc',
             'sinc_resampler_unittest.cc',
             'sinusoidal_linear_chirp_source.cc',
diff --git a/webrtc/voice_engine/output_mixer.cc b/webrtc/voice_engine/output_mixer.cc
index a124564..a8e4177 100644
--- a/webrtc/voice_engine/output_mixer.cc
+++ b/webrtc/voice_engine/output_mixer.cc
@@ -8,16 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "output_mixer.h"
+#include "webrtc/voice_engine/output_mixer.h"
 
-#include "audio_processing.h"
-#include "audio_frame_operations.h"
-#include "critical_section_wrapper.h"
-#include "file_wrapper.h"
-#include "output_mixer_internal.h"
-#include "statistics.h"
-#include "trace.h"
-#include "voe_external_media.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/file_wrapper.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/output_mixer_internal.h"
+#include "webrtc/voice_engine/statistics.h"
 
 namespace webrtc {
 
@@ -528,7 +528,7 @@
   frame->sample_rate_hz_ = sample_rate_hz;
   // TODO(andrew): Ideally the downmixing would occur much earlier, in
   // AudioCodingModule.
-  return RemixAndResample(_audioFrame, &_resampler, frame);
+  return RemixAndResample(_audioFrame, &resampler_, frame);
 }
 
 int32_t
@@ -602,7 +602,7 @@
   AudioFrame frame;
   frame.num_channels_ = 1;
   frame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz();
-  if (RemixAndResample(_audioFrame, &_apmResampler, &frame) == -1)
+  if (RemixAndResample(_audioFrame, &audioproc_resampler_, &frame) == -1)
     return;
 
   if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) == -1) {
diff --git a/webrtc/voice_engine/output_mixer.h b/webrtc/voice_engine/output_mixer.h
index e2ca366..b98f88e 100644
--- a/webrtc/voice_engine/output_mixer.h
+++ b/webrtc/voice_engine/output_mixer.h
@@ -11,14 +11,14 @@
 #ifndef WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
 #define WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
 
-#include "audio_conference_mixer.h"
-#include "audio_conference_mixer_defines.h"
-#include "common_types.h"
-#include "dtmf_inband.h"
-#include "file_recorder.h"
-#include "level_indicator.h"
-#include "resampler.h"
-#include "voice_engine_defines.h"
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h"
+#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
+#include "webrtc/modules/utility/interface/file_recorder.h"
+#include "webrtc/voice_engine/dtmf_inband.h"
+#include "webrtc/voice_engine/level_indicator.h"
+#include "webrtc/voice_engine/voice_engine_defines.h"
 
 namespace webrtc {
 
@@ -133,8 +133,8 @@
     CriticalSectionWrapper& _fileCritSect;
     AudioConferenceMixer& _mixerModule;
     AudioFrame _audioFrame;
-    Resampler _resampler;        // converts mixed audio to fit ADM format
-    Resampler _apmResampler;    // converts mixed audio to fit APM rate
+    PushResampler resampler_;  // converts mixed audio to fit ADM format
+    PushResampler audioproc_resampler_;  // converts mixed audio to fit APM rate
     AudioLevel _audioLevel;    // measures audio level for the combined signal
     DtmfInband _dtmfGenerator;
     int _instanceId;
diff --git a/webrtc/voice_engine/output_mixer_internal.cc b/webrtc/voice_engine/output_mixer_internal.cc
index dfa7d95..55eedb3 100644
--- a/webrtc/voice_engine/output_mixer_internal.cc
+++ b/webrtc/voice_engine/output_mixer_internal.cc
@@ -8,18 +8,19 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "output_mixer_internal.h"
+#include "webrtc/voice_engine/output_mixer_internal.h"
 
-#include "audio_frame_operations.h"
-#include "common_audio/resampler/include/resampler.h"
-#include "module_common_types.h"
-#include "trace.h"
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+#include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
 namespace voe {
 
 int RemixAndResample(const AudioFrame& src_frame,
-                     Resampler* resampler,
+                     PushResampler* resampler,
                      AudioFrame* dst_frame) {
   const int16_t* audio_ptr = src_frame.data_;
   int audio_ptr_num_channels = src_frame.num_channels_;
@@ -34,30 +35,26 @@
     audio_ptr_num_channels = 1;
   }
 
-  const ResamplerType resampler_type = audio_ptr_num_channels == 1 ?
-      kResamplerSynchronous : kResamplerSynchronousStereo;
-  if (resampler->ResetIfNeeded(src_frame.sample_rate_hz_,
-                               dst_frame->sample_rate_hz_,
-                               resampler_type) == -1) {
+  if (resampler->InitializeIfNeeded(src_frame.sample_rate_hz_,
+                                    dst_frame->sample_rate_hz_,
+                                    audio_ptr_num_channels) == -1) {
     dst_frame->CopyFrom(src_frame);
-    WEBRTC_TRACE(kTraceError, kTraceVoice, -1,
-                "%s ResetIfNeeded failed", __FUNCTION__);
+    LOG_FERR3(LS_ERROR, InitializeIfNeeded, src_frame.sample_rate_hz_,
+              dst_frame->sample_rate_hz_, audio_ptr_num_channels);
     return -1;
   }
 
-  int out_length = 0;
-  if (resampler->Push(audio_ptr,
-                      src_frame.samples_per_channel_* audio_ptr_num_channels,
-                      dst_frame->data_,
-                      AudioFrame::kMaxDataSizeSamples,
-                      out_length) == 0) {
-    dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
-  } else {
+  const int src_length = src_frame.samples_per_channel_ *
+                         audio_ptr_num_channels;
+  int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_,
+                                       AudioFrame::kMaxDataSizeSamples);
+  if (out_length == -1) {
     dst_frame->CopyFrom(src_frame);
-    WEBRTC_TRACE(kTraceError, kTraceVoice, -1,
-                 "%s resampling failed", __FUNCTION__);
+    LOG_FERR3(LS_ERROR, Resample, src_length, dst_frame->data_,
+              AudioFrame::kMaxDataSizeSamples);
     return -1;
   }
+  dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
 
   // Upmix after resampling.
   if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) {
diff --git a/webrtc/voice_engine/output_mixer_internal.h b/webrtc/voice_engine/output_mixer_internal.h
index 8d23a14..88a3a5b 100644
--- a/webrtc/voice_engine/output_mixer_internal.h
+++ b/webrtc/voice_engine/output_mixer_internal.h
@@ -14,7 +14,7 @@
 namespace webrtc {
 
 class AudioFrame;
-class Resampler;
+class PushResampler;
 
 namespace voe {
 
@@ -24,7 +24,7 @@
 //
 // On failure, returns -1 and copies |src_frame| to |dst_frame|.
 int RemixAndResample(const AudioFrame& src_frame,
-                     Resampler* resampler,
+                     PushResampler* resampler,
                      AudioFrame* dst_frame);
 
 }  // namespace voe
diff --git a/webrtc/voice_engine/output_mixer_unittest.cc b/webrtc/voice_engine/output_mixer_unittest.cc
index dbcb251..24d3917 100644
--- a/webrtc/voice_engine/output_mixer_unittest.cc
+++ b/webrtc/voice_engine/output_mixer_unittest.cc
@@ -10,10 +10,9 @@
 
 #include <math.h>
 
-#include "gtest/gtest.h"
-
-#include "output_mixer.h"
-#include "output_mixer_internal.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/voice_engine/output_mixer.h"
+#include "webrtc/voice_engine/output_mixer_internal.h"
 
 namespace webrtc {
 namespace voe {
@@ -32,7 +31,7 @@
   void RunResampleTest(int src_channels, int src_sample_rate_hz,
                        int dst_channels, int dst_sample_rate_hz);
 
-  Resampler resampler_;
+  PushResampler resampler_;
   AudioFrame src_frame_;
   AudioFrame dst_frame_;
   AudioFrame golden_frame_;
@@ -42,6 +41,7 @@
 // used so non-integer values result in rounding error, but not an accumulating
 // error.
 void SetMonoFrame(AudioFrame* frame, float data, int sample_rate_hz) {
+  memset(frame->data_, 0, sizeof(frame->data_));
   frame->num_channels_ = 1;
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
@@ -59,6 +59,7 @@
 // each channel respectively.
 void SetStereoFrame(AudioFrame* frame, float left, float right,
                     int sample_rate_hz) {
+  memset(frame->data_, 0, sizeof(frame->data_));
   frame->num_channels_ = 2;
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
@@ -80,13 +81,14 @@
 }
 
 // Computes the best SNR based on the error between |ref_frame| and
-// |test_frame|. It allows for up to a 30 sample delay between the signals to
-// compensate for the resampling delay.
-float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
+// |test_frame|. It allows for up to a |max_delay| in samples between the
+// signals to compensate for the resampling delay.
+float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame,
+                 int max_delay) {
   VerifyParams(ref_frame, test_frame);
   float best_snr = 0;
   int best_delay = 0;
-  for (int delay = 0; delay < 30; delay++) {
+  for (int delay = 0; delay <= max_delay; delay++) {
     float mse = 0;
     float variance = 0;
     for (int i = 0; i < ref_frame.samples_per_channel_ *
@@ -120,14 +122,14 @@
                                       int src_sample_rate_hz,
                                       int dst_channels,
                                       int dst_sample_rate_hz) {
-  Resampler resampler;  // Create a new one with every test.
-  const int16_t kSrcLeft = 60;  // Shouldn't overflow for any used sample rate.
-  const int16_t kSrcRight = 30;
-  const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+  PushResampler resampler;  // Create a new one with every test.
+  const int16_t kSrcLeft = 30;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcRight = 15;
+  const float resampling_factor = (1.0 * src_sample_rate_hz) /
       dst_sample_rate_hz;
-  const float kDstLeft = kResamplingFactor * kSrcLeft;
-  const float kDstRight = kResamplingFactor * kSrcRight;
-  const float kDstMono = (kDstLeft + kDstRight) / 2;
+  const float dst_left = resampling_factor * kSrcLeft;
+  const float dst_right = resampling_factor * kSrcRight;
+  const float dst_mono = (dst_left + dst_right) / 2;
   if (src_channels == 1)
     SetMonoFrame(&src_frame_, kSrcLeft, src_sample_rate_hz);
   else
@@ -136,27 +138,27 @@
   if (dst_channels == 1) {
     SetMonoFrame(&dst_frame_, 0, dst_sample_rate_hz);
     if (src_channels == 1)
-      SetMonoFrame(&golden_frame_, kDstLeft, dst_sample_rate_hz);
+      SetMonoFrame(&golden_frame_, dst_left, dst_sample_rate_hz);
     else
-      SetMonoFrame(&golden_frame_, kDstMono, dst_sample_rate_hz);
+      SetMonoFrame(&golden_frame_, dst_mono, dst_sample_rate_hz);
   } else {
     SetStereoFrame(&dst_frame_, 0, 0, dst_sample_rate_hz);
     if (src_channels == 1)
-      SetStereoFrame(&golden_frame_, kDstLeft, kDstLeft, dst_sample_rate_hz);
+      SetStereoFrame(&golden_frame_, dst_left, dst_left, dst_sample_rate_hz);
     else
-      SetStereoFrame(&golden_frame_, kDstLeft, kDstRight, dst_sample_rate_hz);
+      SetStereoFrame(&golden_frame_, dst_left, dst_right, dst_sample_rate_hz);
   }
 
+  // The sinc resampler has a known delay, which we compute here. Multiplying by
+  // two gives us a crude maximum for any resampling, as the old resampler
+  // typically (but not always) has lower delay.
+  static const int kInputKernelDelaySamples = 16;
+  const int max_delay = static_cast<double>(dst_sample_rate_hz)
+      / src_sample_rate_hz * kInputKernelDelaySamples * dst_channels * 2;
   printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
       src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
   EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler, &dst_frame_));
-  EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_), 40.0f);
-}
-
-TEST_F(OutputMixerTest, RemixAndResampleFailsWithBadSampleRate) {
-  SetMonoFrame(&dst_frame_, 10, 44100);
-  EXPECT_EQ(-1, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
-  VerifyFramesAreEqual(src_frame_, dst_frame_);
+  EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 39.0f);
 }
 
 TEST_F(OutputMixerTest, RemixAndResampleCopyFrameSucceeds) {
@@ -190,10 +192,9 @@
 }
 
 TEST_F(OutputMixerTest, RemixAndResampleSucceeds) {
-  // We don't attempt to be exhaustive here, but just get good coverage. Some
-  // combinations of rates will not be resampled, and some give an odd
-  // resampling factor which makes it more difficult to evaluate.
-  const int kSampleRates[] = {16000, 32000, 48000};
+  // TODO(ajm): convert this to the parameterized TEST_P style used in
+  // sinc_resampler_unittest.cc. We can then easily add tighter SNR thresholds.
+  const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000, 96000};
   const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
   const int kChannels[] = {1, 2};
   const int kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);