Merge third_party/webrtc from https://chromium.googlesource.com/external/webrtc/trunk/webrtc.git at 4a1b3e3a69d349b0d3e91f607f24e02d8b975688
This commit was generated by merge_from_chromium.py.
Change-Id: Iada7abd78f123301a98db982a6272cd9487de72f
diff --git a/base/base.gyp b/base/base.gyp
index f9ba404..6a0c359 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -85,6 +85,8 @@
'diskcache_win32.h',
'event.cc',
'event.h',
+ 'exp_filter.cc',
+ 'exp_filter.h',
'filelock.cc',
'filelock.h',
'fileutils.cc',
diff --git a/base/base_tests.gyp b/base/base_tests.gyp
index ca0d72a..3cef102 100644
--- a/base/base_tests.gyp
+++ b/base/base_tests.gyp
@@ -57,6 +57,7 @@
'crc32_unittest.cc',
'criticalsection_unittest.cc',
'event_unittest.cc',
+ 'exp_filter_unittest.cc',
'filelock_unittest.cc',
'fileutils_unittest.cc',
'helpers_unittest.cc',
diff --git a/base/checks.cc b/base/checks.cc
index 67f5003..0f67c76 100644
--- a/base/checks.cc
+++ b/base/checks.cc
@@ -26,7 +26,7 @@
va_end(arguments);
LOG(LS_ERROR) << "\n\n#\n# Fatal error in " << file
- << ", line " << line << "\n#" << msg
+ << ", line " << line << "\n# " << msg
<< "\n#\n";
abort();
}
diff --git a/base/checks.h b/base/checks.h
index 5a2841a..b85b50a 100644
--- a/base/checks.h
+++ b/base/checks.h
@@ -14,8 +14,6 @@
#ifndef WEBRTC_BASE_CHECKS_H_
#define WEBRTC_BASE_CHECKS_H_
-#include <string.h>
-
namespace rtc {
// Prints an error message to stderr and aborts execution.
@@ -23,8 +21,17 @@
} // namespace rtc
+// Trigger a fatal error (which aborts the process and prints an error
+// message). FATAL_ERROR_IF may seem a lot like assert, but there's a crucial
+// difference: it's always "on". This means that it can be used to check for
+// regular errors that could actually happen, not just programming errors that
+// supposedly can't happen---but triggering a fatal error will kill the process
+// in an ugly way, so it's not suitable for catching errors that might happen
+// in production.
+#define FATAL_ERROR(msg) do { rtc::Fatal(__FILE__, __LINE__, msg); } while (0)
+#define FATAL_ERROR_IF(x) do { if (x) FATAL_ERROR("check failed"); } while (0)
+
// The UNREACHABLE macro is very useful during development.
-#define UNREACHABLE() \
- rtc::Fatal(__FILE__, __LINE__, "unreachable code")
+#define UNREACHABLE() FATAL_ERROR("unreachable code")
#endif // WEBRTC_BASE_CHECKS_H_
diff --git a/base/exp_filter.cc b/base/exp_filter.cc
new file mode 100644
index 0000000..9529480
--- /dev/null
+++ b/base/exp_filter.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/exp_filter.h"
+
+#include <math.h>
+
+namespace rtc {
+
+const float ExpFilter::kValueUndefined = -1.0f;
+
+void ExpFilter::Reset(float alpha) {
+ alpha_ = alpha;
+ filtered_ = kValueUndefined;
+}
+
+float ExpFilter::Apply(float exp, float sample) {
+ if (filtered_ == kValueUndefined) {
+ // Initialize filtered value.
+ filtered_ = sample;
+ } else if (exp == 1.0) {
+ filtered_ = alpha_ * filtered_ + (1 - alpha_) * sample;
+ } else {
+ float alpha = pow(alpha_, exp);
+ filtered_ = alpha * filtered_ + (1 - alpha) * sample;
+ }
+ if (max_ != kValueUndefined && filtered_ > max_) {
+ filtered_ = max_;
+ }
+ return filtered_;
+}
+
+void ExpFilter::UpdateBase(float alpha) {
+ alpha_ = alpha;
+}
+} // namespace rtc
diff --git a/base/exp_filter.h b/base/exp_filter.h
new file mode 100644
index 0000000..174159b
--- /dev/null
+++ b/base/exp_filter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_BASE_EXP_FILTER_H_
+#define WEBRTC_BASE_EXP_FILTER_H_
+
+namespace rtc {
+
+// This class can be used, for example, for smoothing the result of bandwidth
+// estimation and packet loss estimation.
+
+class ExpFilter {
+ public:
+ static const float kValueUndefined;
+
+ explicit ExpFilter(float alpha, float max = kValueUndefined)
+ : max_(max) {
+ Reset(alpha);
+ }
+
+ // Resets the filter to its initial state, and resets filter factor base to
+ // the given value |alpha|.
+ void Reset(float alpha);
+
+ // Applies the filter with a given exponent on the provided sample:
+ // y(k) = min(alpha_^ exp * y(k-1) + (1 - alpha_^ exp) * sample, max_).
+ float Apply(float exp, float sample);
+
+ // Returns current filtered value.
+ float filtered() const { return filtered_; }
+
+ // Changes the filter factor base to the given value |alpha|.
+ void UpdateBase(float alpha);
+
+ private:
+ float alpha_; // Filter factor base.
+ float filtered_; // Current filter output.
+ const float max_;
+};
+} // namespace rtc
+
+#endif // WEBRTC_BASE_EXP_FILTER_H_
diff --git a/base/exp_filter_unittest.cc b/base/exp_filter_unittest.cc
new file mode 100644
index 0000000..f027808
--- /dev/null
+++ b/base/exp_filter_unittest.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "webrtc/base/gunit.h"
+#include "webrtc/base/exp_filter.h"
+
+namespace rtc {
+
+TEST(ExpFilterTest, FirstTimeOutputEqualInput) {
+ // No max value defined.
+ ExpFilter filter = ExpFilter(0.9f);
+ filter.Apply(100.0f, 10.0f);
+
+ // First time, first argument no effect.
+ double value = 10.0f;
+ EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+TEST(ExpFilterTest, SecondTime) {
+ double value;
+
+ ExpFilter filter = ExpFilter(0.9f);
+ filter.Apply(100.0f, 10.0f);
+
+ // First time, first argument no effect.
+ value = 10.0f;
+
+ filter.Apply(10.0f, 20.0f);
+ double alpha = pow(0.9f, 10.0f);
+ value = alpha * value + (1.0f - alpha) * 20.0f;
+ EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+TEST(ExpFilterTest, Reset) {
+ ExpFilter filter = ExpFilter(0.9f);
+ filter.Apply(100.0f, 10.0f);
+
+ filter.Reset(0.8f);
+ filter.Apply(100.0f, 1.0f);
+
+ // Become first time after a reset.
+ double value = 1.0f;
+ EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+TEST(ExpfilterTest, OutputLimitedByMax) {
+ double value;
+
+ // Max value defined.
+ ExpFilter filter = ExpFilter(0.9f, 1.0f);
+ filter.Apply(100.0f, 10.0f);
+
+ // Limited to max value.
+ value = 1.0f;
+ EXPECT_EQ(value, filter.filtered());
+
+ filter.Apply(1.0f, 0.0f);
+ value = 0.9f * value;
+ EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+} // namespace rtc
diff --git a/base/logging.h b/base/logging.h
index 91d61b3..e07045f 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -246,21 +246,7 @@
const void* data, size_t len, bool hex_mode,
LogMultilineState* state);
-//////////////////////////////////////////////////////////////////////
-// Macros which automatically disable logging when LOGGING == 0
-//////////////////////////////////////////////////////////////////////
-
-// If LOGGING is not explicitly defined, default to enabled in debug mode
-#if !defined(LOGGING)
-#if defined(_DEBUG) && !defined(NDEBUG)
-#define LOGGING 1
-#else
-#define LOGGING 0
-#endif
-#endif // !defined(LOGGING)
-
#ifndef LOG
-#if LOGGING
// The following non-obvious technique for implementation of a
// conditional log stream was stolen from google3/base/logging.h.
@@ -317,30 +303,6 @@
#define LOG_T(sev) LOG(sev) << this << ": "
-#else // !LOGGING
-
-// Hopefully, the compiler will optimize away some of this code.
-// Note: syntax of "1 ? (void)0 : LogMessage" was causing errors in g++,
-// converted to "while (false)"
-#define LOG(sev) \
- while (false)rtc:: LogMessage(NULL, 0, rtc::sev).stream()
-#define LOG_V(sev) \
- while (false) rtc::LogMessage(NULL, 0, sev).stream()
-#define LOG_F(sev) LOG(sev) << __FUNCTION__ << ": "
-#define LOG_CHECK_LEVEL(sev) \
- false
-#define LOG_CHECK_LEVEL_V(sev) \
- false
-
-#define LOG_E(sev, ctx, err, ...) \
- while (false) rtc::LogMessage(__FILE__, __LINE__, rtc::sev, \
- rtc::ERRCTX_ ## ctx, err , ##__VA_ARGS__) \
- .stream()
-
-#define LOG_T(sev) LOG(sev) << this << ": "
-#define LOG_T_F(sev) LOG(sev) << this << ": " << __FUNCTION__ <<
-#endif // !LOGGING
-
#define LOG_ERRNO_EX(sev, err) \
LOG_E(sev, ERRNO, err)
#define LOG_ERRNO(sev) \
diff --git a/base/openssladapter.cc b/base/openssladapter.cc
index 601b4af..68a1fcb 100644
--- a/base/openssladapter.cc
+++ b/base/openssladapter.cc
@@ -695,7 +695,10 @@
}
STACK_OF(CONF_VALUE)* value = meth->i2v(meth, ext_str, NULL);
- for (int j = 0; j < sk_CONF_VALUE_num(value); ++j) {
+
+ // Cast to size_t to be compilable for both OpenSSL and BoringSSL.
+ for (size_t j = 0; j < static_cast<size_t>(sk_CONF_VALUE_num(value));
+ ++j) {
CONF_VALUE* nval = sk_CONF_VALUE_value(value, j);
// The value for nval can contain wildcards
if (!strcmp(nval->name, "DNS") && string_match(host, nval->value)) {
diff --git a/base/thread.cc b/base/thread.cc
index 49a299d..6da9a7f 100644
--- a/base/thread.cc
+++ b/base/thread.cc
@@ -125,6 +125,16 @@
Runnable* runnable;
};
+Thread::ScopedDisallowBlockingCalls::ScopedDisallowBlockingCalls()
+ : thread_(Thread::Current()),
+ previous_state_(thread_->SetAllowBlockingCalls(false)) {
+}
+
+Thread::ScopedDisallowBlockingCalls::~ScopedDisallowBlockingCalls() {
+ ASSERT(thread_->IsCurrent());
+ thread_->SetAllowBlockingCalls(previous_state_);
+}
+
Thread::Thread(SocketServer* ss)
: MessageQueue(ss),
priority_(PRIORITY_NORMAL),
@@ -133,7 +143,8 @@
thread_(NULL),
thread_id_(0),
#endif
- owned_(true) {
+ owned_(true),
+ blocking_calls_allowed_(true) {
SetName("Thread", this); // default name
}
@@ -143,6 +154,8 @@
}
bool Thread::SleepMs(int milliseconds) {
+ AssertBlockingIsAllowedOnCurrentThread();
+
#if defined(WEBRTC_WIN)
::Sleep(milliseconds);
return true;
@@ -276,6 +289,8 @@
}
void Thread::Join() {
+ AssertBlockingIsAllowedOnCurrentThread();
+
if (running()) {
ASSERT(!IsCurrent());
#if defined(WEBRTC_WIN)
@@ -291,6 +306,21 @@
}
}
+bool Thread::SetAllowBlockingCalls(bool allow) {
+ ASSERT(IsCurrent());
+ bool previous = blocking_calls_allowed_;
+ blocking_calls_allowed_ = allow;
+ return previous;
+}
+
+// static
+void Thread::AssertBlockingIsAllowedOnCurrentThread() {
+#ifdef _DEBUG
+ Thread* current = Thread::Current();
+ ASSERT(!current || current->blocking_calls_allowed_);
+#endif
+}
+
#if defined(WEBRTC_WIN)
// As seen on MSDN.
// http://msdn.microsoft.com/en-us/library/xcb2z8hs(VS.71).aspx
@@ -357,6 +387,8 @@
}
void Thread::Send(MessageHandler *phandler, uint32 id, MessageData *pdata) {
+ AssertBlockingIsAllowedOnCurrentThread();
+
if (fStop_)
return;
diff --git a/base/thread.h b/base/thread.h
index 3872746..742ba6d 100644
--- a/base/thread.h
+++ b/base/thread.h
@@ -108,6 +108,19 @@
static Thread* Current();
+ // Used to catch performance regressions. Use this to disallow blocking calls
+ // (Invoke) for a given scope. If a synchronous call is made while this is in
+ // effect, an assert will be triggered.
+ // Note that this is a single threaded class.
+ class ScopedDisallowBlockingCalls {
+ public:
+ ScopedDisallowBlockingCalls();
+ ~ScopedDisallowBlockingCalls();
+ private:
+ Thread* const thread_;
+ const bool previous_state_;
+ };
+
bool IsCurrent() const {
return Current() == this;
}
@@ -148,8 +161,11 @@
// Uses Send() internally, which blocks the current thread until execution
// is complete.
// Ex: bool result = thread.Invoke<bool>(&MyFunctionReturningBool);
+ // NOTE: This function can only be called when synchronous calls are allowed.
+ // See ScopedDisallowBlockingCalls for details.
template <class ReturnT, class FunctorT>
ReturnT Invoke(const FunctorT& functor) {
+ AssertBlockingIsAllowedOnCurrentThread();
FunctorMessageHandler<ReturnT, FunctorT> handler(functor);
Send(&handler);
return handler.result();
@@ -202,16 +218,19 @@
// question to guarantee that the returned value remains true for the duration
// of whatever code is conditionally executing because of the return value!
bool RunningForTest() { return running(); }
- // This is a legacy call-site that probably doesn't need to exist in the first
- // place.
- // TODO(fischman): delete once the ASSERT added in channelmanager.cc sticks
- // for a month (ETA 2014/06/22).
- bool RunningForChannelManager() { return running(); }
protected:
// Blocks the calling thread until this thread has terminated.
void Join();
+ // Sets the per-thread allow-blocking-calls flag and returns the previous
+ // value.
+ bool SetAllowBlockingCalls(bool allow);
+
+ static void AssertBlockingIsAllowedOnCurrentThread();
+
+ friend class ScopedDisallowBlockingCalls;
+
private:
static void *PreRun(void *pv);
@@ -238,6 +257,7 @@
#endif
bool owned_;
+ bool blocking_calls_allowed_; // By default set to |true|.
friend class ThreadManager;
diff --git a/common_audio/audio_util.cc b/common_audio/audio_util.cc
index 0c961e1..f2936b0 100644
--- a/common_audio/audio_util.cc
+++ b/common_audio/audio_util.cc
@@ -14,18 +14,18 @@
namespace webrtc {
-void RoundToInt16(const float* src, int size, int16_t* dest) {
- for (int i = 0; i < size; ++i)
+void RoundToInt16(const float* src, size_t size, int16_t* dest) {
+ for (size_t i = 0; i < size; ++i)
dest[i] = RoundToInt16(src[i]);
}
-void ScaleAndRoundToInt16(const float* src, int size, int16_t* dest) {
- for (int i = 0; i < size; ++i)
+void ScaleAndRoundToInt16(const float* src, size_t size, int16_t* dest) {
+ for (size_t i = 0; i < size; ++i)
dest[i] = ScaleAndRoundToInt16(src[i]);
}
-void ScaleToFloat(const int16_t* src, int size, float* dest) {
- for (int i = 0; i < size; ++i)
+void ScaleToFloat(const int16_t* src, size_t size, float* dest) {
+ for (size_t i = 0; i < size; ++i)
dest[i] = ScaleToFloat(src[i]);
}
diff --git a/common_audio/include/audio_util.h b/common_audio/include/audio_util.h
index 18fdbe2..9972a0e 100644
--- a/common_audio/include/audio_util.h
+++ b/common_audio/include/audio_util.h
@@ -47,14 +47,14 @@
}
// Round |size| elements of |src| to int16 with clamping and write to |dest|.
-void RoundToInt16(const float* src, int size, int16_t* dest);
+void RoundToInt16(const float* src, size_t size, int16_t* dest);
// Scale (from [-1, 1]) and round |size| elements of |src| to full-range int16
// with clamping and write to |dest|.
-void ScaleAndRoundToInt16(const float* src, int size, int16_t* dest);
+void ScaleAndRoundToInt16(const float* src, size_t size, int16_t* dest);
// Scale |size| elements of |src| to float [-1, 1] and write to |dest|.
-void ScaleToFloat(const int16_t* src, int size, float* dest);
+void ScaleToFloat(const int16_t* src, size_t size, float* dest);
// Deinterleave audio from |interleaved| to the channel buffers pointed to
// by |deinterleaved|. There must be sufficient space allocated in the
diff --git a/config.h b/config.h
index 2e96ec1..e4bccf9 100644
--- a/config.h
+++ b/config.h
@@ -10,8 +10,8 @@
// TODO(pbos): Move Config from common.h to here.
-#ifndef WEBRTC_VIDEO_ENGINE_NEW_INCLUDE_CONFIG_H_
-#define WEBRTC_VIDEO_ENGINE_NEW_INCLUDE_CONFIG_H_
+#ifndef WEBRTC_CONFIG_H_
+#define WEBRTC_CONFIG_H_
#include <string>
#include <vector>
@@ -73,9 +73,10 @@
// RTP header extension to use for the video stream, see RFC 5285.
struct RtpExtension {
- RtpExtension(const char* name, int id) : name(name), id(id) {}
+ RtpExtension(const std::string& name, int id) : name(name), id(id) {}
std::string ToString() const;
- // TODO(mflodman) Add API to query supported extensions.
+ static bool IsSupported(const std::string& name);
+
static const char* kTOffset;
static const char* kAbsSendTime;
std::string name;
@@ -109,4 +110,4 @@
} // namespace webrtc
-#endif // WEBRTC_VIDEO_ENGINE_NEW_INCLUDE_CONFIG_H_
+#endif // WEBRTC_CONFIG_H_
diff --git a/experiments.h b/experiments.h
index 3b019b4..ec871f2 100644
--- a/experiments.h
+++ b/experiments.h
@@ -21,15 +21,6 @@
uint32_t min_rate;
};
-struct SkipEncodingUnusedStreams {
- SkipEncodingUnusedStreams() : enabled(false) {}
- explicit SkipEncodingUnusedStreams(bool set_enabled)
- : enabled(set_enabled) {}
- virtual ~SkipEncodingUnusedStreams() {}
-
- const bool enabled;
-};
-
struct AimdRemoteRateControl {
AimdRemoteRateControl() : enabled(false) {}
explicit AimdRemoteRateControl(bool set_enabled)
diff --git a/modules/audio_coding/codecs/isac/fix/source/codec.h b/modules/audio_coding/codecs/isac/fix/source/codec.h
index 2f64932..a38c6e5 100644
--- a/modules/audio_coding/codecs/isac/fix/source/codec.h
+++ b/modules/audio_coding/codecs/isac/fix/source/codec.h
@@ -101,6 +101,16 @@
int32_t* outre2Q16);
#endif
+#if defined(MIPS32_LE)
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outre,
+ int16_t* outim);
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t* inreQ7,
+ int16_t* inimQ7,
+ int32_t* outre1Q16,
+ int32_t* outre2Q16);
+#endif
/* filterbank functions */
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
index 3fefc1a..7a5f746 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
@@ -23,10 +23,23 @@
* coefficient: Input.
* state: Input/output, filter state, in Q4.
*/
-void WebRtcIsacfix_HighpassFilterFixDec32(int16_t *io,
- int16_t len,
- const int16_t *coefficient,
- int32_t *state);
+typedef void (*HighpassFilterFixDec32)(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+extern HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+#endif
typedef void (*AllpassFilter2FixDec16)(
int16_t *data_ch1, // Input and output in channel 1, in Q0
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks.c b/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
index 64557e1..1928a7c 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
@@ -86,10 +86,13 @@
filter_state_ch2[1] = state1_ch2;
}
-void WebRtcIsacfix_HighpassFilterFixDec32(int16_t *io,
- int16_t len,
- const int16_t *coefficient,
- int32_t *state)
+// Declare a function pointer.
+HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t *io,
+ int16_t len,
+ const int16_t *coefficient,
+ int32_t *state)
{
int k;
int32_t a1 = 0, b1 = 0, c = 0, in = 0;
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c b/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
index 1887745..4dd70cf 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
@@ -10,26 +10,26 @@
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
-// WebRtcIsacfix_AllpassFilter2FixDec16 function optimized for MIPSDSP platform
-// Bit-exact with WebRtcIsacfix_AllpassFilter2FixDec16C from filterbanks.c
+// WebRtcIsacfix_AllpassFilter2FixDec16 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_AllpassFilter2FixDec16C from filterbanks.c.
void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
- int16_t *data_ch1, // Input and output in channel 1, in Q0
- int16_t *data_ch2, // Input and output in channel 2, in Q0
- const int16_t *factor_ch1, // Scaling factor for channel 1, in Q15
- const int16_t *factor_ch2, // Scaling factor for channel 2, in Q15
- const int length, // Length of the data buffers
- int32_t *filter_state_ch1, // Filter state for channel 1, in Q16
- int32_t *filter_state_ch2) { // Filter state for channel 2, in Q16
+ int16_t* data_ch1, // Input and output in channel 1, in Q0.
+ int16_t* data_ch2, // Input and output in channel 2, in Q0.
+ const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15.
+ const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15.
+ const int length, // Length of the data buffers.
+ int32_t* filter_state_ch1, // Filter state for channel 1, in Q16.
+ int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16.
- int32_t st0_ch1, st1_ch1; // channel1 state variables
- int32_t st0_ch2, st1_ch2; // channel2 state variables
- int32_t f_ch10, f_ch11, f_ch20, f_ch21; // factor variables
- int32_t r0, r1, r2, r3, r4, r5; // temporary ragister variables
+ int32_t st0_ch1, st1_ch1; // channel1 state variables.
+ int32_t st0_ch2, st1_ch2; // channel2 state variables.
+ int32_t f_ch10, f_ch11, f_ch20, f_ch21; // factor variables.
+ int32_t r0, r1, r2, r3, r4, r5; // temporary register variables.
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
- // Load all the state and factor variables
+ // Load all the state and factor variables.
"lh %[f_ch10], 0(%[factor_ch1]) \n\t"
"lh %[f_ch20], 0(%[factor_ch2]) \n\t"
"lh %[f_ch11], 2(%[factor_ch1]) \n\t"
@@ -38,7 +38,7 @@
"lw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
"lw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
"lw %[st1_ch2], 4(%[filter_state_ch2]) \n\t"
- // Allpass filtering loop
+ // Allpass filtering loop.
"1: \n\t"
"lh %[r0], 0(%[data_ch1]) \n\t"
"lh %[r1], 0(%[data_ch2]) \n\t"
@@ -80,7 +80,7 @@
"subq_s.w %[st1_ch2], %[r3], %[r1] \n\t"
"bgtz %[length], 1b \n\t"
" addiu %[data_ch2], %[data_ch2], 2 \n\t"
- // Store channel states
+ // Store channel states.
"sw %[st0_ch1], 0(%[filter_state_ch1]) \n\t"
"sw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
"sw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
@@ -100,3 +100,143 @@
: "memory", "hi", "lo"
);
}
+
+// WebRtcIsacfix_HighpassFilterFixDec32 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_HighpassFilterFixDec32C from filterbanks.c.
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state) {
+ int k;
+ int32_t a1, a2, b1, b2, in;
+ int32_t state0 = state[0];
+ int32_t state1 = state[1];
+
+ int32_t c0, c1, c2, c3;
+ int32_t c4, c5, c6, c7;
+ int32_t state0_lo, state0_hi;
+ int32_t state1_lo, state1_hi;
+ int32_t t0, t1, t2, t3, t4, t5;
+
+ __asm __volatile (
+ "lh %[c0], 0(%[coeff_ptr]) \n\t"
+ "lh %[c1], 2(%[coeff_ptr]) \n\t"
+ "lh %[c2], 4(%[coeff_ptr]) \n\t"
+ "lh %[c3], 6(%[coeff_ptr]) \n\t"
+ "sra %[state0_hi], %[state0], 16 \n\t"
+ "sra %[state1_hi], %[state1], 16 \n\t"
+ "andi %[state0_lo], %[state0], 0xFFFF \n\t"
+ "andi %[state1_lo], %[state1], 0xFFFF \n\t"
+ "lh %[c4], 8(%[coeff_ptr]) \n\t"
+ "lh %[c5], 10(%[coeff_ptr]) \n\t"
+ "lh %[c6], 12(%[coeff_ptr]) \n\t"
+ "lh %[c7], 14(%[coeff_ptr]) \n\t"
+ "sra %[state0_lo], %[state0_lo], 1 \n\t"
+ "sra %[state1_lo], %[state1_lo], 1 \n\t"
+ : [c0] "=&r" (c0), [c1] "=&r" (c1), [c2] "=&r" (c2), [c3] "=&r" (c3),
+ [c4] "=&r" (c4), [c5] "=&r" (c5), [c6] "=&r" (c6), [c7] "=&r" (c7),
+ [state0_hi] "=&r" (state0_hi), [state0_lo] "=&r" (state0_lo),
+ [state1_hi] "=&r" (state1_hi), [state1_lo] "=&r" (state1_lo)
+ : [coeff_ptr] "r" (coefficient), [state0] "r" (state0),
+ [state1] "r" (state1)
+ : "memory"
+ );
+
+ for (k = 0; k < len; k++) {
+ in = (int32_t)io[k];
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mul %[t2], %[c4], %[state0_lo] \n\t"
+ "mul %[t0], %[c5], %[state0_lo] \n\t"
+ "mul %[t1], %[c4], %[state0_hi] \n\t"
+ "mul %[a1], %[c5], %[state0_hi] \n\t"
+ "mul %[t5], %[c6], %[state1_lo] \n\t"
+ "mul %[t3], %[c7], %[state1_lo] \n\t"
+ "mul %[t4], %[c6], %[state1_hi] \n\t"
+ "mul %[b1], %[c7], %[state1_hi] \n\t"
+ "shra_r.w %[t2], %[t2], 15 \n\t"
+ "shra_r.w %[t0], %[t0], 15 \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "addu %[a1], %[a1], %[t0] \n\t"
+ "sra %[t1], %[t1], 16 \n\t"
+ "addu %[a1], %[a1], %[t1] \n\t"
+ "shra_r.w %[t5], %[t5], 15 \n\t"
+ "shra_r.w %[t3], %[t3], 15 \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[b1], %[b1], %[t3] \n\t"
+ "sra %[t4], %[t4], 16 \n\t"
+ "addu %[b1], %[b1], %[t4] \n\t"
+ "mul %[t2], %[c0], %[state0_lo] \n\t"
+ "mul %[t0], %[c1], %[state0_lo] \n\t"
+ "mul %[t1], %[c0], %[state0_hi] \n\t"
+ "mul %[a2], %[c1], %[state0_hi] \n\t"
+ "mul %[t5], %[c2], %[state1_lo] \n\t"
+ "mul %[t3], %[c3], %[state1_lo] \n\t"
+ "mul %[t4], %[c2], %[state1_hi] \n\t"
+ "mul %[b2], %[c3], %[state1_hi] \n\t"
+ "shra_r.w %[t2], %[t2], 15 \n\t"
+ "shra_r.w %[t0], %[t0], 15 \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "addu %[a2], %[a2], %[t0] \n\t"
+ "sra %[t1], %[t1], 16 \n\t"
+ "addu %[a2], %[a2], %[t1] \n\t"
+ "shra_r.w %[t5], %[t5], 15 \n\t"
+ "shra_r.w %[t3], %[t3], 15 \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[b2], %[b2], %[t3] \n\t"
+ "sra %[t4], %[t4], 16 \n\t"
+ "addu %[b2], %[b2], %[t4] \n\t"
+ "addu %[a1], %[a1], %[b1] \n\t"
+ "sra %[a1], %[a1], 7 \n\t"
+ "addu %[a1], %[a1], %[in] \n\t"
+ "sll %[t0], %[in], 2 \n\t"
+ "addu %[a2], %[a2], %[b2] \n\t"
+ "subu %[t0], %[t0], %[a2] \n\t"
+ "shll_s.w %[a1], %[a1], 16 \n\t"
+ "shll_s.w %[t0], %[t0], 2 \n\t"
+ "sra %[a1], %[a1], 16 \n\t"
+ "addu %[state1_hi], %[state0_hi], $0 \n\t"
+ "addu %[state1_lo], %[state0_lo], $0 \n\t"
+ "sra %[state0_hi], %[t0], 16 \n\t"
+ "andi %[state0_lo], %[t0], 0xFFFF \n\t"
+ "sra %[state0_lo], %[state0_lo], 1 \n\t"
+ ".set pop \n\t"
+ : [a1] "=&r" (a1), [b1] "=&r" (b1), [a2] "=&r" (a2), [b2] "=&r" (b2),
+ [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+ [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo),
+ [t0] "=&r" (t0), [t1] "=&r" (t1), [t2] "=&r" (t2),
+ [t3] "=&r" (t3), [t4] "=&r" (t4), [t5] "=&r" (t5)
+ : [c0] "r" (c0), [c1] "r" (c1), [c2] "r" (c2), [c3] "r" (c3),
+ [c4] "r" (c4), [c5] "r" (c5), [c6] "r" (c6), [c7] "r" (c7),
+ [in] "r" (in)
+ : "hi", "lo"
+ );
+ io[k] = (int16_t)a1;
+ }
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+#if !defined(MIPS_DSP_R2_LE)
+ "sll %[state0_hi], %[state0_hi], 16 \n\t"
+ "sll %[state0_lo], %[state0_lo], 1 \n\t"
+ "sll %[state1_hi], %[state1_hi], 16 \n\t"
+ "sll %[state1_lo], %[state1_lo], 1 \n\t"
+ "or %[state0_hi], %[state0_hi], %[state0_lo] \n\t"
+ "or %[state1_hi], %[state1_hi], %[state1_lo] \n\t"
+#else
+ "sll %[state0_lo], %[state0_lo], 1 \n\t"
+ "sll %[state1_lo], %[state1_lo], 1 \n\t"
+ "precr_sra.ph.w %[state0_hi], %[state0_lo], 0 \n\t"
+ "precr_sra.ph.w %[state1_hi], %[state1_lo], 0 \n\t"
+#endif
+ "sw %[state0_hi], 0(%[state]) \n\t"
+ "sw %[state1_hi], 4(%[state]) \n\t"
+ ".set pop \n\t"
+ : [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+ [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo)
+ : [state] "r" (state)
+ : "memory"
+ );
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc b/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
index d748427..d15318a 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
@@ -86,6 +86,13 @@
-1280, -8554, -14496, -7561, -23541, -27263, -30560, -32768, -3441, -32768,
25203, -27550, 22419};
#endif
+ HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+#if defined(MIPS_DSP_R1_LE)
+ WebRtcIsacfix_HighpassFilterFixDec32 =
+ WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#else
+ WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
+#endif
for (int i = 0; i < kSamples; i++) {
in[i] = WEBRTC_SPL_WORD32_MAX / (i + 1);
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index 7635908..887a7ba 100644
--- a/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -209,9 +209,17 @@
static void WebRtcIsacfix_InitMIPS(void) {
WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrMIPS;
WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopMIPS;
+ WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeMIPS;
+ WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecMIPS;
#if defined(MIPS_DSP_R1_LE)
WebRtcIsacfix_AllpassFilter2FixDec16 =
WebRtcIsacfix_AllpassFilter2FixDec16MIPS;
+ WebRtcIsacfix_HighpassFilterFixDec32 =
+ WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#endif
+#if defined(MIPS_DSP_R2_LE)
+ WebRtcIsacfix_CalculateResidualEnergy =
+ WebRtcIsacfix_CalculateResidualEnergyMIPS;
#endif
}
#endif
@@ -300,10 +308,11 @@
WebRtcIsacfix_CalculateResidualEnergy =
WebRtcIsacfix_CalculateResidualEnergyC;
WebRtcIsacfix_AllpassFilter2FixDec16 = WebRtcIsacfix_AllpassFilter2FixDec16C;
+ WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecC;
WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeC;
WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1C;
- WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C ;
+ WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C;
#ifdef WEBRTC_DETECT_ARM_NEON
if ((WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) != 0) {
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi b/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi
index a18a803..e5aade6 100644
--- a/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi
@@ -47,12 +47,14 @@
'lpc_masking_model.c',
'lpc_tables.c',
'pitch_estimator.c',
+ 'pitch_estimator_c.c',
'pitch_filter.c',
'pitch_filter_c.c',
'pitch_gain_tables.c',
'pitch_lag_tables.c',
'spectrum_ar_model_tables.c',
'transform.c',
+ 'transform_tables.c',
'arith_routins.h',
'bandwidth_estimator.h',
'codec.h',
@@ -89,9 +91,12 @@
'sources': [
'filters_mips.c',
'lattice_mips.c',
+ 'pitch_estimator_mips.c',
+ 'transform_mips.c',
],
'sources!': [
'lattice_c.c',
+ 'pitch_estimator_c.c',
],
'conditions': [
['mips_dsp_rev>0', {
@@ -101,6 +106,7 @@
}],
['mips_dsp_rev>1', {
'sources': [
+ 'lpc_masking_model_mips.c',
'pitch_filter_mips.c',
],
'sources!': [
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
index 72e0cfc..1270c14 100644
--- a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
@@ -62,6 +62,15 @@
int* q_val_residual_energy);
#endif
+#if defined(MIPS_DSP_R2_LE)
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy);
+#endif
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
new file mode 100644
index 0000000..55602b9
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+
+// MIPS DSPR2 optimization for function WebRtcIsacfix_CalculateResidualEnergy
+// Bit-exact with WebRtcIsacfix_CalculateResidualEnergyC from file
+// lpc_masking_model.c
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy) {
+
+ int i = 0, j = 0;
+ int shift_internal = 0, shift_norm = 0;
+ int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
+ int32_t tmp_corr_c = corr_coeffs[0];
+ int16_t* tmp_a_poly = &a_polynomial[0];
+ int32_t sum64_hi = 0;
+ int32_t sum64_lo = 0;
+
+ for (j = 0; j <= lpc_order; j++) {
+ // For the case of i == 0:
+ // residual_energy +=
+ // a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
+
+ int32_t tmp2, tmp3;
+ int16_t sign_1;
+ int16_t sign_2;
+ int16_t sign_3;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp2], 0(%[tmp_a_poly]) \n\t"
+ "mul %[tmp32], %[tmp2], %[tmp2] \n\t"
+ "addiu %[tmp_a_poly], %[tmp_a_poly], 2 \n\t"
+ "sra %[sign_2], %[sum64_hi], 31 \n\t"
+ "mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
+ "shilov $ac0, %[shift_internal] \n\t"
+ "mfhi %[tmp2], $ac0 \n\t"
+ "mflo %[tmp3], $ac0 \n\t"
+ "sra %[sign_1], %[tmp2], 31 \n\t"
+ "xor %[sign_3], %[sign_1], %[sign_2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+ [tmp_a_poly] "+r" (tmp_a_poly), [sign_1] "=&r" (sign_1),
+ [sign_3] "=&r" (sign_3), [sign_2] "=&r" (sign_2),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+ : "hi", "lo", "memory"
+ );
+
+ if (sign_3 != 0) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+ : "hi", "lo", "memory"
+ );
+ } else {
+ if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+ ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+ // Shift right for overflow.
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[shift_internal], %[shift_internal], 1 \n\t"
+ "prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
+ "sra %[sum64_hi], %[sum64_hi], 1 \n\t"
+ "prepend %[tmp3], %[tmp2], 1 \n\t"
+ "sra %[tmp2], %[tmp2], 1 \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [shift_internal] "+r" (shift_internal),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ } else {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+ : "hi", "lo", "memory"
+ );
+ }
+ }
+ }
+
+ for (i = 1; i <= lpc_order; i++) {
+ tmp_corr_c = corr_coeffs[i];
+ int16_t* tmp_a_poly_j = &a_polynomial[i];
+ int16_t* tmp_a_poly_j_i = &a_polynomial[0];
+ for (j = i; j <= lpc_order; j++) {
+ // For the case of i = 1 .. lpc_order:
+ // residual_energy +=
+ // a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
+
+ int32_t tmp2, tmp3;
+ int16_t sign_1;
+ int16_t sign_2;
+ int16_t sign_3;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp3], 0(%[tmp_a_poly_j]) \n\t"
+ "lh %[tmp2], 0(%[tmp_a_poly_j_i]) \n\t"
+ "addiu %[tmp_a_poly_j], %[tmp_a_poly_j], 2 \n\t"
+ "addiu %[tmp_a_poly_j_i], %[tmp_a_poly_j_i], 2 \n\t"
+ "mul %[tmp32], %[tmp3], %[tmp2] \n\t"
+ "sll %[tmp32], %[tmp32], 1 \n\t"
+ "mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
+ "shilov $ac0, %[shift_internal] \n\t"
+ "mfhi %[tmp2], $ac0 \n\t"
+ "mflo %[tmp3], $ac0 \n\t"
+ "sra %[sign_1], %[tmp2], 31 \n\t"
+ "sra %[sign_2], %[sum64_hi], 31 \n\t"
+ "xor %[sign_3], %[sign_1], %[sign_2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+ [tmp_a_poly_j] "+r" (tmp_a_poly_j), [sign_1] "=&r" (sign_1),
+ [tmp_a_poly_j_i] "+r" (tmp_a_poly_j_i), [sign_2] "=&r" (sign_2),
+ [sign_3] "=&r" (sign_3), [sum64_hi] "+r" (sum64_hi),
+ [sum64_lo] "+r" (sum64_lo)
+ : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+ : "hi", "lo", "memory"
+ );
+ if (sign_3 != 0) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), [sum64_hi] "+r" (sum64_hi),
+ [sum64_lo] "+r" (sum64_lo)
+ :
+ :"memory"
+ );
+ } else {
+ // Test overflow and sum the result.
+ if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+ ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+ // Shift right for overflow.
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[shift_internal], %[shift_internal], 1 \n\t"
+ "prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
+ "sra %[sum64_hi], %[sum64_hi], 1 \n\t"
+ "prepend %[tmp3], %[tmp2], 1 \n\t"
+ "sra %[tmp2], %[tmp2], 1 \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [shift_internal] "+r" (shift_internal),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ } else {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ }
+ }
+ }
+ }
+ word32_high = sum64_hi;
+ word32_low = sum64_lo;
+
+ // Calculate the value of shifting (shift_norm) for the 64-bit sum.
+ if (word32_high != 0) {
+ shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
+ int tmp1;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "srl %[residual_energy], %[sum64_lo], %[shift_norm] \n\t"
+ "li %[tmp1], 32 \n\t"
+ "subu %[tmp1], %[tmp1], %[shift_norm] \n\t"
+ "sll %[tmp1], %[sum64_hi], %[tmp1] \n\t"
+ "or %[residual_energy], %[residual_energy], %[tmp1] \n\t"
+ ".set pop \n\t"
+ : [residual_energy] "=&r" (residual_energy), [tmp1]"=&r"(tmp1),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [shift_norm] "r" (shift_norm)
+ : "memory"
+ );
+ } else {
+ if ((word32_low & 0x80000000) != 0) {
+ shift_norm = 1;
+ residual_energy = (uint32_t)word32_low >> 1;
+ } else {
+ shift_norm = WebRtcSpl_NormW32(word32_low);
+ residual_energy = word32_low << shift_norm;
+ shift_norm = -shift_norm;
+ }
+ }
+
+ // Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
+ // = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
+ *q_val_residual_energy =
+ q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2;
+
+ return residual_energy;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
index 9c4e587..426b2cf 100644
--- a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
@@ -29,7 +29,7 @@
-static __inline int32_t Log2Q8( uint32_t x ) {
+__inline int32_t WebRtcIsacfix_Log2Q8( uint32_t x ) {
int32_t zeros, lg2;
int16_t frac;
@@ -153,109 +153,7 @@
-static void PCorr2Q32(const int16_t *in, int32_t *logcorQ8)
-{
- int16_t scaling,n,k;
- int32_t ysum32,csum32, lys, lcs;
- int32_t oneQ8;
-
-
- const int16_t *x, *inptr;
-
- oneQ8 = WEBRTC_SPL_LSHIFT_W32((int32_t)1, 8); // 1.00 in Q8
-
- x = in + PITCH_MAX_LAG/2 + 2;
- scaling = WebRtcSpl_GetScalingSquare ((int16_t *) in, PITCH_CORR_LEN2, PITCH_CORR_LEN2);
- ysum32 = 1;
- csum32 = 0;
- x = in + PITCH_MAX_LAG/2 + 2;
- for (n = 0; n < PITCH_CORR_LEN2; n++) {
- ysum32 += WEBRTC_SPL_MUL_16_16_RSFT( (int16_t) in[n],(int16_t) in[n], scaling); // Q0
- csum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t) x[n],(int16_t) in[n], scaling); // Q0
- }
-
- logcorQ8 += PITCH_LAG_SPAN2 - 1;
-
- lys=Log2Q8((uint32_t) ysum32); // Q8
- lys=WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
-
- if (csum32>0) {
-
- lcs=Log2Q8((uint32_t) csum32); // 2log(csum) in Q8
-
- if (lcs>(lys + oneQ8) ){ // csum/sqrt(ysum) > 2 in Q8
- *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
- } else {
- *logcorQ8 = oneQ8; // 1.00
- }
-
- } else {
- *logcorQ8 = 0;
- }
-
-
- for (k = 1; k < PITCH_LAG_SPAN2; k++) {
- inptr = &in[k];
- ysum32 -= WEBRTC_SPL_MUL_16_16_RSFT( (int16_t) in[k-1],(int16_t) in[k-1], scaling);
- ysum32 += WEBRTC_SPL_MUL_16_16_RSFT( (int16_t) in[PITCH_CORR_LEN2 + k - 1],(int16_t) in[PITCH_CORR_LEN2 + k - 1], scaling);
-
-#ifdef WEBRTC_ARCH_ARM_NEON
- {
- int32_t vbuff[4];
- int32x4_t int_32x4_sum = vmovq_n_s32(0);
- // Can't shift a Neon register to right with a non-constant shift value.
- int32x4_t int_32x4_scale = vdupq_n_s32(-scaling);
- // Assert a codition used in loop unrolling at compile-time.
- COMPILE_ASSERT(PITCH_CORR_LEN2 %4 == 0);
-
- for (n = 0; n < PITCH_CORR_LEN2; n += 4) {
- int16x4_t int_16x4_x = vld1_s16(&x[n]);
- int16x4_t int_16x4_in = vld1_s16(&inptr[n]);
- int32x4_t int_32x4 = vmull_s16(int_16x4_x, int_16x4_in);
- int_32x4 = vshlq_s32(int_32x4, int_32x4_scale);
- int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
- }
-
- // Use vector store to avoid long stall from data trasferring
- // from vector to general register.
- vst1q_s32(vbuff, int_32x4_sum);
- csum32 = vbuff[0] + vbuff[1];
- csum32 += vbuff[2];
- csum32 += vbuff[3];
- }
-#else
- csum32 = 0;
- if(scaling == 0) {
- for (n = 0; n < PITCH_CORR_LEN2; n++) {
- csum32 += x[n] * inptr[n];
- }
- } else {
- for (n = 0; n < PITCH_CORR_LEN2; n++) {
- csum32 += (x[n] * inptr[n]) >> scaling;
- }
- }
-#endif
-
- logcorQ8--;
-
- lys=Log2Q8((uint32_t)ysum32); // Q8
- lys=WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
-
- if (csum32>0) {
-
- lcs=Log2Q8((uint32_t) csum32); // 2log(csum) in Q8
-
- if (lcs>(lys + oneQ8) ){ // csum/sqrt(ysum) > 2
- *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
- } else {
- *logcorQ8 = oneQ8; // 1.00
- }
-
- } else {
- *logcorQ8 = 0;
- }
- }
-}
+extern void WebRtcIsacfix_PCorr2Q32(const int16_t *in, int32_t *logcorQ8);
@@ -311,12 +209,13 @@
/* compute correlation for first and second half of the frame */
- PCorr2Q32(buf_dec16, crrvecQ8_1);
- PCorr2Q32(buf_dec16 + PITCH_CORR_STEP2, crrvecQ8_2);
+ WebRtcIsacfix_PCorr2Q32(buf_dec16, crrvecQ8_1);
+ WebRtcIsacfix_PCorr2Q32(buf_dec16 + PITCH_CORR_STEP2, crrvecQ8_2);
/* bias towards pitch lag of previous frame */
- tmp32a = Log2Q8((uint32_t) old_lagQ8) - 2304; // log2(0.5*oldlag) in Q8
+ tmp32a = WebRtcIsacfix_Log2Q8((uint32_t) old_lagQ8) - 2304;
+ // log2(0.5*oldlag) in Q8
tmp32b = WEBRTC_SPL_MUL_16_16_RSFT(oldgQ12,oldgQ12, 10); //Q12 & * 4.0;
gain_bias16 = (int16_t) tmp32b; //Q12
if (gain_bias16 > 3276) gain_bias16 = 3276; // 0.8 in Q12
@@ -325,7 +224,7 @@
for (k = 0; k < PITCH_LAG_SPAN2; k++)
{
if (crrvecQ8_1[k]>0) {
- tmp32b = Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
+ tmp32b = WebRtcIsacfix_Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
tmp16a = (int16_t) (tmp32b - tmp32a); // Q8 & fabs(ratio)<4
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT(tmp16a,tmp16a, 6); //Q10
tmp16b = (int16_t) tmp32c; // Q10 & <8
@@ -334,7 +233,8 @@
tmp16d = Exp2Q10((int16_t) -tmp16c); //Q10
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT(gain_bias16,tmp16d,13); // Q10 & * 0.5
bias16 = (int16_t) (1024 + tmp32c); // Q10
- tmp32b = Log2Q8((uint32_t) bias16) - 2560; // Q10 in -> Q8 out with 10*2^8 offset
+ tmp32b = WebRtcIsacfix_Log2Q8((uint32_t)bias16) - 2560;
+ // Q10 in -> Q8 out with 10*2^8 offset
crrvecQ8_1[k] += tmp32b ; // -10*2^8 offset
}
}
@@ -407,7 +307,7 @@
xq[0] = WEBRTC_SPL_LSHIFT_W32(xq[0], 8);
Intrp1DQ8(xq, fxq, yq, fyq);
- tmp32a= Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+ tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
/* Bias towards short lags */
/* log(pow(0.8, log(2.0 * *y )))/log(2.0) */
tmp32b= WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32a, -42, 8);
@@ -437,10 +337,13 @@
tmp32b = (int32_t) (WEBRTC_SPL_LSHIFT_W32(tmp32a, 1)) - ratq; // Q8
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32b, (int16_t) tmp32b, 8); // Q8
- tmp32b = (int32_t) tmp32c + (int32_t) WEBRTC_SPL_RSHIFT_W32(ratq, 1); // (k-r)^2 + 0.5 * r Q8
- tmp32c = Log2Q8((uint32_t) tmp32a) - 2048; // offset 8*2^8 , log2(0.5*k) Q8
- tmp32d = Log2Q8((uint32_t) tmp32b) - 2048; // offset 8*2^8 , log2(0.5*k) Q8
- tmp32e = tmp32c -tmp32d;
+ tmp32b = (int32_t)tmp32c + (int32_t)WEBRTC_SPL_RSHIFT_W32(ratq, 1);
+ // (k-r)^2 + 0.5 * r Q8
+ tmp32c = WebRtcIsacfix_Log2Q8((uint32_t)tmp32a) - 2048;
+ // offset 8*2^8 , log2(0.5*k) Q8
+ tmp32d = WebRtcIsacfix_Log2Q8((uint32_t)tmp32b) - 2048;
+ // offset 8*2^8 , log2(0.5*k) Q8
+ tmp32e = tmp32c - tmp32d;
cv2q[k] += WEBRTC_SPL_RSHIFT_W32(tmp32e, 1);
@@ -481,7 +384,7 @@
/* Bias towards short lags */
/* log(pow(0.8, log(2.0f * *y )))/log(2.0f) */
- tmp32a= Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+ tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
tmp32b= WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32a, -82, 8);
tmp32c= tmp32b + 256;
*fyq += tmp32c;
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
index 93c81c8..da401e5 100644
--- a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
@@ -58,4 +58,8 @@
int16_t N, /* number of input samples */
int16_t *out); /* array of size N/2 */
+int32_t WebRtcIsacfix_Log2Q8( uint32_t x );
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8);
+
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
new file mode 100644
index 0000000..82155d2
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+#ifdef WEBRTC_ARCH_ARM_NEON
+#include <arm_neon.h>
+#endif
+
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+ int16_t scaling,n,k;
+ int32_t ysum32,csum32, lys, lcs;
+ int32_t oneQ8;
+ const int16_t* x;
+ const int16_t* inptr;
+
+ oneQ8 = WEBRTC_SPL_LSHIFT_W32((int32_t)1, 8); // 1.00 in Q8
+
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+ PITCH_CORR_LEN2,
+ PITCH_CORR_LEN2);
+ ysum32 = 1;
+ csum32 = 0;
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ ysum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[n],
+ (int16_t)in[n],
+ scaling); // Q0
+ csum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)x[n],
+ (int16_t)in[n],
+ scaling); // Q0
+ }
+ logcorQ8 += PITCH_LAG_SPAN2 - 1;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+
+
+ for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+ inptr = &in[k];
+ ysum32 -= WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[k - 1],
+ (int16_t)in[k - 1],
+ scaling);
+ ysum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[PITCH_CORR_LEN2 + k - 1],
+ (int16_t)in[PITCH_CORR_LEN2 + k - 1],
+ scaling);
+#ifdef WEBRTC_ARCH_ARM_NEON
+ {
+ int32_t vbuff[4];
+ int32x4_t int_32x4_sum = vmovq_n_s32(0);
+ // Can't shift a Neon register to right with a non-constant shift value.
+ int32x4_t int_32x4_scale = vdupq_n_s32(-scaling);
+ // Assert a codition used in loop unrolling at compile-time.
+ COMPILE_ASSERT(PITCH_CORR_LEN2 %4 == 0);
+
+ for (n = 0; n < PITCH_CORR_LEN2; n += 4) {
+ int16x4_t int_16x4_x = vld1_s16(&x[n]);
+ int16x4_t int_16x4_in = vld1_s16(&inptr[n]);
+ int32x4_t int_32x4 = vmull_s16(int_16x4_x, int_16x4_in);
+ int_32x4 = vshlq_s32(int_32x4, int_32x4_scale);
+ int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
+ }
+
+ // Use vector store to avoid long stall from data trasferring
+ // from vector to general register.
+ vst1q_s32(vbuff, int_32x4_sum);
+ csum32 = vbuff[0] + vbuff[1];
+ csum32 += vbuff[2];
+ csum32 += vbuff[3];
+ }
+#else
+ csum32 = 0;
+ if(scaling == 0) {
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ csum32 += x[n] * inptr[n];
+ }
+ } else {
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ csum32 += (x[n] * inptr[n]) >> scaling;
+ }
+ }
+#endif
+
+ logcorQ8--;
+
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+ }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
new file mode 100644
index 0000000..fa426e9
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+ int16_t scaling,n,k;
+ int32_t ysum32,csum32, lys, lcs;
+ int32_t oneQ8;
+ const int16_t* x;
+ const int16_t* inptr;
+
+ oneQ8 = WEBRTC_SPL_LSHIFT_W32((int32_t)1, 8); // 1.00 in Q8
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+ PITCH_CORR_LEN2,
+ PITCH_CORR_LEN2);
+ ysum32 = 1;
+ csum32 = 0;
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ {
+ const int16_t* tmp_x = x;
+ const int16_t* tmp_in = in;
+ int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ n = PITCH_CORR_LEN2;
+ COMPILE_ASSERT(PITCH_CORR_LEN2 % 4 == 0);
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lh %[tmp1], 0(%[tmp_in]) \n\t"
+ "lh %[tmp2], 2(%[tmp_in]) \n\t"
+ "lh %[tmp3], 4(%[tmp_in]) \n\t"
+ "lh %[tmp4], 6(%[tmp_in]) \n\t"
+ "lh %[tmp5], 0(%[tmp_x]) \n\t"
+ "lh %[tmp6], 2(%[tmp_x]) \n\t"
+ "lh %[tmp7], 4(%[tmp_x]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp5], %[tmp1], %[tmp5] \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp1] \n\t"
+ "mul %[tmp6], %[tmp2], %[tmp6] \n\t"
+ "mul %[tmp2], %[tmp2], %[tmp2] \n\t"
+ "mul %[tmp7], %[tmp3], %[tmp7] \n\t"
+ "mul %[tmp3], %[tmp3], %[tmp3] \n\t"
+ "mul %[tmp8], %[tmp4], %[tmp8] \n\t"
+ "mul %[tmp4], %[tmp4], %[tmp4] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "srav %[tmp5], %[tmp5], %[scaling] \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp6], %[tmp6], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "srav %[tmp7], %[tmp7], %[scaling] \n\t"
+ "srav %[tmp3], %[tmp3], %[scaling] \n\t"
+ "srav %[tmp8], %[tmp8], %[scaling] \n\t"
+ "srav %[tmp4], %[tmp4], %[scaling] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp5] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp6] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp3] \n\t"
+ "addu %[csum32], %[csum32], %[tmp7] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp4] \n\t"
+ "addu %[csum32], %[csum32], %[tmp8] \n\t"
+ "addiu %[tmp_in], %[tmp_in], 8 \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ ".set pop \n\t"
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+ [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+ [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [tmp_in] "+r" (tmp_in),
+ [ysum32] "+r" (ysum32), [tmp_x] "+r" (tmp_x), [csum32] "+r" (csum32),
+ [n] "+r" (n)
+ : [scaling] "r" (scaling)
+ : "memory", "hi", "lo"
+ );
+ }
+ logcorQ8 += PITCH_LAG_SPAN2 - 1;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+
+ for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+ inptr = &in[k];
+ const int16_t* tmp_in1 = &in[k - 1];
+ const int16_t* tmp_in2 = &in[PITCH_CORR_LEN2 + k - 1];
+ const int16_t* tmp_x = x;
+ int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ n = PITCH_CORR_LEN2;
+ csum32 = 0;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp1], 0(%[tmp_in1]) \n\t"
+ "lh %[tmp2], 0(%[tmp_in2]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp1] \n\t"
+ "mul %[tmp2], %[tmp2], %[tmp2] \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "subu %[ysum32], %[ysum32], %[tmp1] \n\t"
+ "bnez %[scaling], 2f \n\t"
+ " addu %[ysum32], %[ysum32], %[tmp2] \n\t"
+ "1: \n\t"
+ "lh %[tmp1], 0(%[inptr]) \n\t"
+ "lh %[tmp2], 0(%[tmp_x]) \n\t"
+ "lh %[tmp3], 2(%[inptr]) \n\t"
+ "lh %[tmp4], 2(%[tmp_x]) \n\t"
+ "lh %[tmp5], 4(%[inptr]) \n\t"
+ "lh %[tmp6], 4(%[tmp_x]) \n\t"
+ "lh %[tmp7], 6(%[inptr]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp2], %[tmp3], %[tmp4] \n\t"
+ "mul %[tmp3], %[tmp5], %[tmp6] \n\t"
+ "mul %[tmp4], %[tmp7], %[tmp8] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "addiu %[inptr], %[inptr], 8 \n\t"
+ "addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ "addu %[csum32], %[csum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp3] \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addu %[csum32], %[csum32], %[tmp4] \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "lh %[tmp1], 0(%[inptr]) \n\t"
+ "lh %[tmp2], 0(%[tmp_x]) \n\t"
+ "lh %[tmp3], 2(%[inptr]) \n\t"
+ "lh %[tmp4], 2(%[tmp_x]) \n\t"
+ "lh %[tmp5], 4(%[inptr]) \n\t"
+ "lh %[tmp6], 4(%[tmp_x]) \n\t"
+ "lh %[tmp7], 6(%[inptr]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp2], %[tmp3], %[tmp4] \n\t"
+ "mul %[tmp3], %[tmp5], %[tmp6] \n\t"
+ "mul %[tmp4], %[tmp7], %[tmp8] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "addiu %[inptr], %[inptr], 8 \n\t"
+ "addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "srav %[tmp3], %[tmp3], %[scaling] \n\t"
+ "srav %[tmp4], %[tmp4], %[scaling] \n\t"
+ "addu %[csum32], %[csum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp3] \n\t"
+ "bgtz %[n], 2b \n\t"
+ " addu %[csum32], %[csum32], %[tmp4] \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+ [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+ [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [inptr] "+r" (inptr),
+ [csum32] "+r" (csum32), [tmp_x] "+r" (tmp_x), [ysum32] "+r" (ysum32),
+ [n] "+r" (n)
+ : [tmp_in1] "r" (tmp_in1), [tmp_in2] "r" (tmp_in2),
+ [scaling] "r" (scaling)
+ : "memory", "hi", "lo"
+ );
+
+ logcorQ8--;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+ }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform.c b/modules/audio_coding/codecs/isac/fix/source/transform.c
index 67e513c..24ccc82 100644
--- a/modules/audio_coding/codecs/isac/fix/source/transform.c
+++ b/modules/audio_coding/codecs/isac/fix/source/transform.c
@@ -19,89 +19,13 @@
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/fft.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
-#if (defined WEBRTC_DETECT_ARM_NEON || defined WEBRTC_ARCH_ARM_NEON)
-/* Tables are defined in ARM assembly files. */
+/* Tables are defined in transform_tables.c file or ARM assembly files. */
/* Cosine table 1 in Q14 */
extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
/* Sine table 1 in Q14 */
extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
/* Sine table 2 in Q14 */
extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
-#else
-/* Cosine table 1 in Q14 */
-static const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2] = {
- 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
- 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
- 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
- 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
- 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
- 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
- 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
- 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
- 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
- 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
- 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
- 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214,
- 0, -214, -429, -643, -857, -1072, -1285, -1499, -1713, -1926,
- -2139, -2351, -2563, -2775, -2986, -3196, -3406, -3616, -3825, -4033,
- -4240, -4447, -4653, -4859, -5063, -5266, -5469, -5671, -5872, -6071,
- -6270, -6467, -6664, -6859, -7053, -7246, -7438, -7629, -7818, -8006,
- -8192, -8377, -8561, -8743, -8923, -9102, -9280, -9456, -9630, -9803,
- -9974, -10143, -10311, -10477, -10641, -10803, -10963, -11121, -11278, -11433,
- -11585, -11736, -11885, -12031, -12176, -12318, -12458, -12597, -12733,
- -12867, -12998, -13128, -13255, -13380, -13502, -13623, -13741, -13856,
- -13970, -14081, -14189, -14295, -14399, -14500, -14598, -14694, -14788,
- -14879, -14968, -15053, -15137, -15218, -15296, -15371, -15444, -15515,
- -15582, -15647, -15709, -15769, -15826, -15880, -15931, -15980, -16026,
- -16069, -16110, -16147, -16182, -16214, -16244, -16270, -16294, -16315,
- -16333, -16349, -16362, -16371, -16378, -16383
-};
-
-/* Sine table 1 in Q14 */
-static const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2] = {
- 0, 214, 429, 643, 857, 1072, 1285, 1499, 1713, 1926,
- 2139, 2351, 2563, 2775, 2986, 3196, 3406, 3616, 3825, 4033,
- 4240, 4447, 4653, 4859, 5063, 5266, 5469, 5671, 5872, 6071,
- 6270, 6467, 6664, 6859, 7053, 7246, 7438, 7629, 7818, 8006,
- 8192, 8377, 8561, 8743, 8923, 9102, 9280, 9456, 9630, 9803,
- 9974, 10143, 10311, 10477, 10641, 10803, 10963, 11121, 11278, 11433,
- 11585, 11736, 11885, 12031, 12176, 12318, 12458, 12597, 12733, 12867,
- 12998, 13128, 13255, 13380, 13502, 13623, 13741, 13856, 13970, 14081,
- 14189, 14295, 14399, 14500, 14598, 14694, 14788, 14879, 14968, 15053,
- 15137, 15218, 15296, 15371, 15444, 15515, 15582, 15647, 15709, 15769,
- 15826, 15880, 15931, 15980, 16026, 16069, 16110, 16147, 16182, 16214,
- 16244, 16270, 16294, 16315, 16333, 16349, 16362, 16371, 16378, 16383,
- 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
- 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
- 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
- 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
- 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
- 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
- 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
- 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
- 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
- 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
- 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
- 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214
-};
-
-
-/* Sine table 2 in Q14 */
-static const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4] = {
- 16384, -16381, 16375, -16367, 16356, -16342, 16325, -16305, 16283, -16257,
- 16229, -16199, 16165, -16129, 16090, -16048, 16003, -15956, 15906, -15853,
- 15798, -15739, 15679, -15615, 15549, -15480, 15408, -15334, 15257, -15178,
- 15095, -15011, 14924, -14834, 14741, -14647, 14549, -14449, 14347, -14242,
- 14135, -14025, 13913, -13799, 13682, -13563, 13441, -13318, 13192, -13063,
- 12933, -12800, 12665, -12528, 12389, -12247, 12104, -11958, 11810, -11661,
- 11509, -11356, 11200, -11042, 10883, -10722, 10559, -10394, 10227, -10059,
- 9889, -9717, 9543, -9368, 9191, -9013, 8833, -8652, 8469, -8285,
- 8099, -7912, 7723, -7534, 7342, -7150, 6957, -6762, 6566, -6369,
- 6171, -5971, 5771, -5570, 5368, -5165, 4961, -4756, 4550, -4344,
- 4137, -3929, 3720, -3511, 3301, -3091, 2880, -2669, 2457, -2245,
- 2032, -1819, 1606, -1392, 1179, -965, 750, -536, 322, -107
-};
-#endif // WEBRTC_DETECT_ARM_NEON || WEBRTC_ARCH_ARM_NEON
void WebRtcIsacfix_Time2SpecC(int16_t *inre1Q9,
int16_t *inre2Q9,
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_mips.c b/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
new file mode 100644
index 0000000..bf95ee5
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
@@ -0,0 +1,1287 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// The tables are defined in transform_tables.c file.
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4];
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+// MIPS DSPr2 version of the WebRtcIsacfix_Time2Spec function
+// is not bit-exact with the C version.
+// The accuracy of the MIPS DSPr2 version is same or better.
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outreQ7,
+ int16_t* outimQ7) {
+ int k = FRAMESAMPLES / 2;
+ int32_t tmpreQ16[FRAMESAMPLES / 2], tmpimQ16[FRAMESAMPLES / 2];
+ int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
+ int32_t inre1, inre2, tmpre, tmpim, factor, max, max1;
+ int16_t* cosptr;
+ int16_t* sinptr;
+
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "addiu %[tmpre], %[tmpreQ16], 0 \n\t"
+ "addiu %[tmpim], %[tmpimQ16], 0 \n\t"
+ "addiu %[factor], $zero, 16921 \n\t"
+ "mul %[max], $zero, $zero \n\t"
+ // Multiply with complex exponentials and combine into one complex vector.
+ // Also, calculate the maximal absolute value in the same loop.
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "lwl %[r0], 0(%[inre1]) \n\t"
+ "lwl %[r2], 0(%[cosptr]) \n\t"
+ "lwl %[r3], 0(%[sinptr]) \n\t"
+ "lwl %[r1], 0(%[inre2]) \n\t"
+ "lwr %[r0], 0(%[inre1]) \n\t"
+ "lwr %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r3], 0(%[sinptr]) \n\t"
+ "lwr %[r1], 0(%[inre2]) \n\t"
+ "muleq_s.w.phr %[r4], %[r2], %[r0] \n\t"
+ "muleq_s.w.phr %[r5], %[r3], %[r0] \n\t"
+ "muleq_s.w.phr %[r6], %[r3], %[r1] \n\t"
+ "muleq_s.w.phr %[r7], %[r2], %[r1] \n\t"
+ "muleq_s.w.phl %[r8], %[r2], %[r0] \n\t"
+ "muleq_s.w.phl %[r0], %[r3], %[r0] \n\t"
+ "muleq_s.w.phl %[r3], %[r3], %[r1] \n\t"
+ "muleq_s.w.phl %[r1], %[r2], %[r1] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "subu %[r5], %[r7], %[r5] \n\t"
+ "sra %[r4], %[r4], 8 \n\t"
+ "sra %[r5], %[r5], 8 \n\t"
+ "mult $ac0, %[factor], %[r4] \n\t"
+ "mult $ac1, %[factor], %[r5] \n\t"
+ "addu %[r3], %[r8], %[r3] \n\t"
+ "subu %[r0], %[r1], %[r0] \n\t"
+ "sra %[r3], %[r3], 8 \n\t"
+ "sra %[r0], %[r0], 8 \n\t"
+ "mult $ac2, %[factor], %[r3] \n\t"
+ "mult $ac3, %[factor], %[r0] \n\t"
+ "extr_r.w %[r4], $ac0, 16 \n\t"
+ "extr_r.w %[r5], $ac1, 16 \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "addiu %[inre2], %[inre2], 4 \n\t"
+ "extr_r.w %[r6], $ac2, 16 \n\t"
+ "extr_r.w %[r7], $ac3, 16 \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "shra_r.w %[r4], %[r4], 3 \n\t"
+ "shra_r.w %[r5], %[r5], 3 \n\t"
+ "sw %[r4], 0(%[tmpre]) \n\t"
+ "absq_s.w %[r4], %[r4] \n\t"
+ "sw %[r5], 0(%[tmpim]) \n\t"
+ "absq_s.w %[r5], %[r5] \n\t"
+ "shra_r.w %[r6], %[r6], 3 \n\t"
+ "shra_r.w %[r7], %[r7], 3 \n\t"
+ "sw %[r6], 4(%[tmpre]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "sw %[r7], 4(%[tmpim]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "slt %[r0], %[r4], %[r5] \n\t"
+ "movn %[r4], %[r5], %[r0] \n\t"
+ "slt %[r1], %[r6], %[r7] \n\t"
+ "movn %[r6], %[r7], %[r1] \n\t"
+ "slt %[r0], %[max], %[r4] \n\t"
+ "movn %[max], %[r4], %[r0] \n\t"
+ "slt %[r1], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r1] \n\t"
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 8 \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "mul %[r4], %[r0], %[r2] \n\t"
+ "mul %[r5], %[r1], %[r3] \n\t"
+ "mul %[r0], %[r0], %[r3] \n\t"
+ "mul %[r2], %[r1], %[r2] \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "addu %[r1], %[r4], %[r5] \n\t"
+ "sra %[r1], %[r1], 7 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r1], %[factor], %[r1] \n\t"
+ "mul %[r3], %[factor], %[r3] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "sra %[r0], %[r0], 7 \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "mul %[r0], %[factor], %[r0] \n\t"
+ "mul %[r2], %[factor], %[r2] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r3], %[r1] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r1], %[r1], 3 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r1], %[r1], 4 \n\t"
+ "sra %[r1], %[r1], 3 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sw %[r1], 0(%[tmpre]) \n\t"
+ "addiu %[tmpre], %[tmpre], 4 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "absq_s.w %[r1], %[r1] \n\t"
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "negu %[r4], %[r1] \n\t"
+ "slt %[r3], %[r1], $zero \n\t"
+ "movn %[r1], %[r4], %[r3] \n\t"
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 3 \n\t"
+ "sw %[r0], 0(%[tmpim]) \n\t"
+ "absq_s.w %[r0], %[r0] \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 4 \n\t"
+ "sra %[r0], %[r0], 3 \n\t"
+ "sw %[r0], 0(%[tmpim]) \n\t"
+ "negu %[r2], %[r0] \n\t"
+ "slt %[r3], %[r0], $zero \n\t"
+ "movn %[r0], %[r2], %[r3] \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "slt %[r2], %[max], %[r1] \n\t"
+ "movn %[max], %[r1], %[r2] \n\t"
+ "slt %[r2], %[max], %[r0] \n\t"
+ "movn %[max], %[r0], %[r2] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ // Calculate WebRtcSpl_NormW32(max).
+ // If max gets value >=0, we should shift max steps to the left, and the
+ // domain will be Q(16+shift). If max gets value <0, we should shift -max
+ // steps to the right, and the domain will be Q(16+max)
+ "clz %[max], %[max] \n\t"
+ "addiu %[max], %[max], -25 \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [inre1] "=&r" (inre1), [inre2] "=&r" (inre2),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [tmpre] "=&r" (tmpre),
+ [tmpim] "=&r" (tmpim), [max] "=&r" (max), [factor] "=&r" (factor),
+#if defined(MIPS_DSP_R2_LE)
+ [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8),
+#endif // #if defined(MIPS_DSP_R2_LE)
+ [r5] "=&r" (r5)
+ : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+ [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+ [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+ : "hi", "lo", "memory"
+ );
+
+ // "Fastest" vectors
+ k = FRAMESAMPLES / 4;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmpre], %[tmpreQ16], 0 \n\t"
+ "addiu %[tmpim], %[tmpimQ16], 0 \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "blez %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[tmpre]) \n\t"
+ "lw %[r1], 0(%[tmpim]) \n\t"
+ "lw %[r2], 4(%[tmpre]) \n\t"
+ "lw %[r3], 4(%[tmpim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "sllv %[r0], %[r0], %[max] \n\t"
+ "sllv %[r1], %[r1], %[max] \n\t"
+ "sllv %[r2], %[r2], %[max] \n\t"
+ "sllv %[r3], %[r3], %[max] \n\t"
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "addiu %[tmpim], %[tmpim], 8 \n\t"
+ "sh %[r0], 0(%[inre1]) \n\t"
+ "sh %[r1], 0(%[inre2]) \n\t"
+ "sh %[r2], 2(%[inre1]) \n\t"
+ "sh %[r3], 2(%[inre2]) \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[inre2], %[inre2], 4 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[max1], -1 \n\t"
+ "addiu %[r5], $zero, 1 \n\t"
+ "sllv %[r4], %[r5], %[r4] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "3: \n\t"
+ "lw %[r0], 0(%[tmpre]) \n\t"
+ "lw %[r1], 0(%[tmpim]) \n\t"
+ "lw %[r2], 4(%[tmpre]) \n\t"
+ "lw %[r3], 4(%[tmpim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shrav_r.w %[r0], %[r0], %[max1] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max1] \n\t"
+ "shrav_r.w %[r2], %[r2], %[max1] \n\t"
+ "shrav_r.w %[r3], %[r3], %[max1] \n\t"
+#else // #if !defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r4] \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+ "addu %[r2], %[r2], %[r4] \n\t"
+ "addu %[r3], %[r3], %[r4] \n\t"
+ "srav %[r0], %[r0], %[max1] \n\t"
+ "srav %[r1], %[r1], %[max1] \n\t"
+ "srav %[r2], %[r2], %[max1] \n\t"
+ "srav %[r3], %[r3], %[max1] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "addiu %[tmpim], %[tmpim], 8 \n\t"
+ "sh %[r0], 0(%[inre1]) \n\t"
+ "sh %[r1], 0(%[inre2]) \n\t"
+ "sh %[r2], 2(%[inre1]) \n\t"
+ "sh %[r3], 2(%[inre2]) \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "bgtz %[k], 3b \n\t"
+ " addiu %[inre2], %[inre2], 4 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [tmpre] "=&r" (tmpre), [tmpim] "=&r" (tmpim), [inre1] "=&r" (inre1),
+ [inre2] "=&r" (inre2), [k] "+r" (k), [max1] "=&r" (max1),
+#if !defined(MIPS_DSP_R1_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+ [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9), [max] "r" (max)
+ : "memory"
+ );
+
+ // Get DFT
+ WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1); // real call
+
+ // "Fastest" vectors and
+ // Use symmetry to separate into two complex vectors
+ // and center frames in time around zero
+ // merged into one loop
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+ k = FRAMESAMPLES / 4;
+ factor = FRAMESAMPLES - 2; // offset for FRAMESAMPLES / 2 - 1 array member
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "addiu %[tmpre], %[outreQ7], 0 \n\t"
+ "addiu %[tmpim], %[outimQ7], 0 \n\t"
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addu %[r4], %[inre1], %[offset] \n\t"
+ "addu %[r5], %[inre2], %[offset] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "lhx %[r2], %[offset](%[inre1]) \n\t"
+ "lhx %[r3], %[offset](%[inre2]) \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r2], 0(%[r4]) \n\t"
+ "lh %[r3], 0(%[r5]) \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "srav %[r0], %[r0], %[max] \n\t"
+ "srav %[r1], %[r1], %[max] \n\t"
+ "srav %[r2], %[r2], %[max] \n\t"
+ "srav %[r3], %[r3], %[max] \n\t"
+ "addu %[r4], %[r0], %[r2] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "subu %[r2], %[r1], %[r3] \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "lh %[r3], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "andi %[r6], %[r4], 0xFFFF \n\t"
+ "sra %[r4], %[r4], 16 \n\t"
+ "mul %[r7], %[r3], %[r6] \n\t"
+ "mul %[r8], %[r3], %[r4] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r4], %[r5], %[r4] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r4], %[r4], 2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "andi %[r6], %[r2], 0xFFFF \n\t"
+ "sra %[r2], %[r2], 16 \n\t"
+ "mul %[r7], %[r5], %[r6] \n\t"
+ "mul %[r9], %[r5], %[r2] \n\t"
+ "mul %[r6], %[r3], %[r6] \n\t"
+ "mul %[r2], %[r3], %[r2] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r7], %[r9] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r2], %[r2], 2 \n\t"
+ "addu %[r2], %[r6], %[r2] \n\t"
+ "subu %[r8], %[r8], %[r9] \n\t"
+ "sra %[r8], %[r8], 9 \n\t"
+ "addu %[r2], %[r4], %[r2] \n\t"
+ "sra %[r2], %[r2], 9 \n\t"
+ "sh %[r8], 0(%[tmpre]) \n\t"
+ "sh %[r2], 0(%[tmpim]) \n\t"
+
+ "andi %[r4], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 16 \n\t"
+ "andi %[r6], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r7], %[r5], %[r4] \n\t"
+ "mul %[r9], %[r5], %[r1] \n\t"
+ "mul %[r4], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r3], %[r1] \n\t"
+ "mul %[r8], %[r3], %[r0] \n\t"
+ "mul %[r3], %[r3], %[r6] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r0], %[r5], %[r0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r9], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r4], %[r4], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r1], %[r1], 2 \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r3], %[r3], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r3], %[r3], 0x2000 \n\t"
+ "sra %[r3], %[r3], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r3] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r0], %[r0], 2 \n\t"
+ "addu %[r0], %[r0], %[r6] \n\t"
+ "addu %[r3], %[tmpre], %[offset] \n\t"
+ "addu %[r2], %[tmpim], %[offset] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "negu %[r9], %[r9] \n\t"
+ "sra %[r9], %[r9], 9 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "sh %[r9], 0(%[r3]) \n\t"
+ "sh %[r0], 0(%[r2]) \n\t"
+ "addiu %[tmpre], %[tmpre], 2 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 2 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addu %[r4], %[inre1], %[offset] \n\t"
+ "addu %[r5], %[inre2], %[offset] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "lhx %[r2], %[offset](%[inre1]) \n\t"
+ "lhx %[r3], %[offset](%[inre2]) \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r2], 0(%[r4]) \n\t"
+ "lh %[r3], 0(%[r5]) \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "sllv %[r2], %[r2], %[max1] \n\t"
+ "sllv %[r3], %[r3], %[max1] \n\t"
+ "addu %[r4], %[r0], %[r2] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "subu %[r2], %[r1], %[r3] \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "lh %[r3], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "andi %[r6], %[r4], 0xFFFF \n\t"
+ "sra %[r4], %[r4], 16 \n\t"
+ "mul %[r7], %[r3], %[r6] \n\t"
+ "mul %[r8], %[r3], %[r4] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r4], %[r5], %[r4] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r4], %[r4], 2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "andi %[r6], %[r2], 0xFFFF \n\t"
+ "sra %[r2], %[r2], 16 \n\t"
+ "mul %[r7], %[r5], %[r6] \n\t"
+ "mul %[r9], %[r5], %[r2] \n\t"
+ "mul %[r6], %[r3], %[r6] \n\t"
+ "mul %[r2], %[r3], %[r2] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r7], %[r9] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r2], %[r2], 2 \n\t"
+ "addu %[r2], %[r6], %[r2] \n\t"
+ "subu %[r8], %[r8], %[r9] \n\t"
+ "sra %[r8], %[r8], 9 \n\t"
+ "addu %[r2], %[r4], %[r2] \n\t"
+ "sra %[r2], %[r2], 9 \n\t"
+ "sh %[r8], 0(%[tmpre]) \n\t"
+ "sh %[r2], 0(%[tmpim]) \n\t"
+ "andi %[r4], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 16 \n\t"
+ "andi %[r6], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r7], %[r5], %[r4] \n\t"
+ "mul %[r9], %[r5], %[r1] \n\t"
+ "mul %[r4], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r3], %[r1] \n\t"
+ "mul %[r8], %[r3], %[r0] \n\t"
+ "mul %[r3], %[r3], %[r6] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r0], %[r5], %[r0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r9], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r4], %[r4], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+#endif
+ "sll %[r1], %[r1], 2 \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r3], %[r3], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r3], %[r3], 0x2000 \n\t"
+ "sra %[r3], %[r3], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r3] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r0], %[r0], 2 \n\t"
+ "addu %[r0], %[r0], %[r6] \n\t"
+ "addu %[r3], %[tmpre], %[offset] \n\t"
+ "addu %[r2], %[tmpim], %[offset] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "negu %[r9], %[r9] \n\t"
+ "sra %[r9], %[r9], 9 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "sra %[r0], %[r0], 9 \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "sh %[r9], 0(%[r3]) \n\t"
+ "sh %[r0], 0(%[r2]) \n\t"
+ "addiu %[tmpre], %[tmpre], 2 \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[tmpim], %[tmpim], 2 \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [inre1] "=&r" (inre1), [inre2] "=&r" (inre2), [tmpre] "=&r" (tmpre),
+ [tmpim] "=&r" (tmpim), [offset] "+r" (factor), [k] "+r" (k),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1)
+ : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+ [outreQ7] "r" (outreQ7), [outimQ7] "r" (outimQ7),
+ [max] "r" (max), [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+ : "hi", "lo", "memory"
+ );
+}
+
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t *inreQ7,
+ int16_t *inimQ7,
+ int32_t *outre1Q16,
+ int32_t *outre2Q16) {
+ int k = FRAMESAMPLES / 4;
+ int16_t* inre;
+ int16_t* inim;
+ int32_t* outre1;
+ int32_t* outre2;
+ int16_t* cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+ int16_t* sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+ int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, max, max1;
+#if defined(MIPS_DSP_R1_LE)
+ int32_t offset = FRAMESAMPLES - 4;
+#else // #if defined(MIPS_DSP_R1_LE)
+ int32_t offset = FRAMESAMPLES - 2;
+#endif // #if defined(MIPS_DSP_R1_LE)
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim] , %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "mul %[max], $zero, $zero \n\t"
+ "1: \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ // Process two samples in one iteration avoiding left shift before
+ // multiplication. MaxAbsValueW32 function inlined into the loop.
+ "addu %[r8], %[inre], %[offset] \n\t"
+ "addu %[r9], %[inim], %[offset] \n\t"
+ "lwl %[r4], 0(%[r8]) \n\t"
+ "lwl %[r5], 0(%[r9]) \n\t"
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lwl %[r2], 0(%[cosptr]) \n\t"
+ "lwl %[r3], 0(%[sinptr]) \n\t"
+ "lwr %[r4], 0(%[r8]) \n\t"
+ "lwr %[r5], 0(%[r9]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lwr %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r3], 0(%[sinptr]) \n\t"
+ "packrl.ph %[r4], %[r4], %[r4] \n\t"
+ "packrl.ph %[r5], %[r5], %[r5] \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r2] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r3] \n\t"
+ "muleq_s.w.phr %[r8], %[r4], %[r2] \n\t"
+ "muleq_s.w.phr %[r9], %[r5], %[r3] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "subu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r6], %[r9] \n\t"
+ "sll %[r10], %[offset], 1 \n\t"
+ "addu %[r10], %[outre1], %[r10] \n\t"
+ "sw %[r7], 0(%[outre1]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 4(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phl %[r6], %[r0], %[r2] \n\t"
+ "muleq_s.w.phl %[r7], %[r1], %[r3] \n\t"
+ "muleq_s.w.phl %[r8], %[r4], %[r2] \n\t"
+ "muleq_s.w.phl %[r9], %[r5], %[r3] \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "subu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r6], %[r9] \n\t"
+ "sw %[r7], 4(%[outre1]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 0(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phr %[r6], %[r1], %[r2] \n\t"
+ "muleq_s.w.phr %[r7], %[r0], %[r3] \n\t"
+ "muleq_s.w.phr %[r8], %[r5], %[r2] \n\t"
+ "muleq_s.w.phr %[r9], %[r4], %[r3] \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r9], %[r6] \n\t"
+ "negu %[r6], %[r6] \n\t"
+ "sll %[r10], %[offset], 1 \n\t"
+ "addu %[r10], %[outre2], %[r10] \n\t"
+ "sw %[r7], 0(%[outre2]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 4(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phl %[r6], %[r1], %[r2] \n\t"
+ "muleq_s.w.phl %[r7], %[r0], %[r3] \n\t"
+ "muleq_s.w.phl %[r8], %[r5], %[r2] \n\t"
+ "muleq_s.w.phl %[r9], %[r4], %[r3] \n\t"
+ "addiu %[offset], %[offset], -8 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r9], %[r6] \n\t"
+ "negu %[r6], %[r6] \n\t"
+ "sw %[r7], 4(%[outre2]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 0(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "lh %[r4], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "mul %[r2], %[r0], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "mul %[r3], %[r1], %[r5] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "addu %[r8], %[inre], %[offset] \n\t"
+ "addu %[r9], %[inim], %[offset] \n\t"
+ "addiu %[r2], %[r2], 16 \n\t"
+ "sra %[r2], %[r2], 5 \n\t"
+ "addiu %[r0], %[r0], 16 \n\t"
+ "sra %[r0], %[r0], 5 \n\t"
+ "addiu %[r3], %[r3], 16 \n\t"
+ "sra %[r3], %[r3], 5 \n\t"
+ "lh %[r6], 0(%[r8]) \n\t"
+ "lh %[r7], 0(%[r9]) \n\t"
+ "addiu %[r1], %[r1], 16 \n\t"
+ "sra %[r1], %[r1], 5 \n\t"
+ "mul %[r8], %[r7], %[r4] \n\t"
+ "mul %[r7], %[r7], %[r5] \n\t"
+ "mul %[r9], %[r6], %[r4] \n\t"
+ "mul %[r6], %[r6], %[r5] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "subu %[r1], %[r1], %[r0] \n\t"
+ "sll %[r0], %[offset], 1 \n\t"
+ "addu %[r4], %[outre1], %[r0] \n\t"
+ "addu %[r5], %[outre2], %[r0] \n\t"
+ "addiu %[r8], %[r8], 16 \n\t"
+ "sra %[r8], %[r8], 5 \n\t"
+ "addiu %[r7], %[r7], 16 \n\t"
+ "sra %[r7], %[r7], 5 \n\t"
+ "addiu %[r6], %[r6], 16 \n\t"
+ "sra %[r6], %[r6], 5 \n\t"
+ "addiu %[r9], %[r9], 16 \n\t"
+ "sra %[r9], %[r9], 5 \n\t"
+ "addu %[r8], %[r8], %[r6] \n\t"
+ "negu %[r8], %[r8] \n\t"
+ "subu %[r7], %[r7], %[r9] \n\t"
+ "subu %[r6], %[r2], %[r7] \n\t"
+ "addu %[r0], %[r2], %[r7] \n\t"
+ "addu %[r3], %[r1], %[r8] \n\t"
+ "subu %[r1], %[r8], %[r1] \n\t"
+ "sw %[r6], 0(%[outre1]) \n\t"
+ "sw %[r0], 0(%[r4]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "sw %[r1], 0(%[r5]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ // Inlined WebRtcSpl_MaxAbsValueW32
+ "negu %[r5], %[r6] \n\t"
+ "slt %[r2], %[r6], $zero \n\t"
+ "movn %[r6], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r0] \n\t"
+ "slt %[r2], %[r0], $zero \n\t"
+ "movn %[r0], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r3] \n\t"
+ "slt %[r2], %[r3], $zero \n\t"
+ "movn %[r3], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r1] \n\t"
+ "slt %[r2], %[r1], $zero \n\t"
+ "movn %[r1], %[r5], %[r2] \n\t"
+ "slt %[r2], %[r6], %[r0] \n\t"
+ "slt %[r5], %[r3], %[r1] \n\t"
+ "movn %[r6], %[r0], %[r2] \n\t"
+ "movn %[r3], %[r1], %[r5] \n\t"
+ "slt %[r2], %[r6], %[r3] \n\t"
+ "movn %[r6], %[r3], %[r2] \n\t"
+ "slt %[r2], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r2] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "clz %[max], %[max] \n\t"
+ "addiu %[max], %[max], -25 \n\t"
+ ".set pop \n\t"
+ : [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+ [offset] "+r" (offset), [k] "+r" (k), [r0] "=&r" (r0),
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6),
+ [r7] "=&r" (r7), [r10] "=&r" (r10),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max] "=&r" (max)
+ : [inreQ7] "r" (inreQ7), [inimQ7] "r" (inimQ7),
+ [cosptr] "r" (cosptr), [sinptr] "r" (sinptr),
+ [outre1Q16] "r" (outre1Q16), [outre2Q16] "r" (outre2Q16)
+ : "hi", "lo", "memory"
+ );
+
+ // "Fastest" vectors
+ k = FRAMESAMPLES / 4;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim], %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[outre1]) \n\t"
+ "lw %[r1], 0(%[outre2]) \n\t"
+ "lw %[r2], 4(%[outre1]) \n\t"
+ "lw %[r3], 4(%[outre2]) \n\t"
+ "sllv %[r0], %[r0], %[max] \n\t"
+ "sllv %[r1], %[r1], %[max] \n\t"
+ "sllv %[r2], %[r2], %[max] \n\t"
+ "sllv %[r3], %[r3], %[max] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "addiu %[outre2], %[outre2], 8 \n\t"
+ "sh %[r0], 0(%[inre]) \n\t"
+ "sh %[r1], 0(%[inim]) \n\t"
+ "sh %[r2], 2(%[inre]) \n\t"
+ "sh %[r3], 2(%[inim]) \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[inim], %[inim], 4 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], $zero, 1 \n\t"
+ "addiu %[r5], %[max1], -1 \n\t"
+ "sllv %[r4], %[r4], %[r5] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "3: \n\t"
+ "lw %[r0], 0(%[outre1]) \n\t"
+ "lw %[r1], 0(%[outre2]) \n\t"
+ "lw %[r2], 4(%[outre1]) \n\t"
+ "lw %[r3], 4(%[outre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shrav_r.w %[r0], %[r0], %[max1] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max1] \n\t"
+ "shrav_r.w %[r2], %[r2], %[max1] \n\t"
+ "shrav_r.w %[r3], %[r3], %[max1] \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r4] \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+ "addu %[r2], %[r2], %[r4] \n\t"
+ "addu %[r3], %[r3], %[r4] \n\t"
+ "srav %[r0], %[r0], %[max1] \n\t"
+ "srav %[r1], %[r1], %[max1] \n\t"
+ "srav %[r2], %[r2], %[max1] \n\t"
+ "srav %[r3], %[r3], %[max1] \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "addiu %[outre2], %[outre2], 8 \n\t"
+ "sh %[r0], 0(%[inre]) \n\t"
+ "sh %[r1], 0(%[inim]) \n\t"
+ "sh %[r2], 2(%[inre]) \n\t"
+ "sh %[r3], 2(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "bgtz %[k], 3b \n\t"
+ " addiu %[inim], %[inim], 4 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [max1] "=&r" (max1), [r0] "=&r" (r0),
+ [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+#if !defined(MIPS_DSP_R1_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [max] "r" (max), [inreQ7] "r" (inreQ7),
+ [inimQ7] "r" (inimQ7), [outre1Q16] "r" (outre1Q16),
+ [outre2Q16] "r" (outre2Q16)
+ : "memory"
+ );
+
+ WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1); // real call
+
+ // All the remaining processing is done inside a single loop to avoid
+ // unnecessary memory accesses. MIPS DSPr2 version processes two samples
+ // at a time.
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+ k = FRAMESAMPLES / 2;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim], %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "addiu %[r4], $zero, 273 \n\t"
+ "addiu %[r5], $zero, 31727 \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max], %[max], 16 \n\t"
+ "replv.ph %[r4], %[r4] \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max], %[max], 1 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r4] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r4] \n\t"
+ "muleq_s.w.phl %[r0], %[r0], %[r4] \n\t"
+ "muleq_s.w.phl %[r1], %[r1], %[r4] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "shrav_r.w %[r6], %[r6], %[max] \n\t"
+ "shrav_r.w %[r7], %[r7], %[max] \n\t"
+ "mult $ac0, %[r2], %[r6] \n\t"
+ "mult $ac1, %[r3], %[r7] \n\t"
+ "mult $ac2, %[r2], %[r7] \n\t"
+ "mult $ac3, %[r3], %[r6] \n\t"
+ "lh %[r2], 2(%[cosptr]) \n\t"
+ "lh %[r3], 2(%[sinptr]) \n\t"
+ "extr_r.w %[r6], $ac0, 14 \n\t"
+ "extr_r.w %[r7], $ac1, 14 \n\t"
+ "extr_r.w %[r8], $ac2, 14 \n\t"
+ "extr_r.w %[r9], $ac3, 14 \n\t"
+ "shrav_r.w %[r0], %[r0], %[max] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max] \n\t"
+ "mult $ac0, %[r2], %[r0] \n\t"
+ "mult $ac1, %[r3], %[r1] \n\t"
+ "mult $ac2, %[r2], %[r1] \n\t"
+ "mult $ac3, %[r3], %[r0] \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "extr_r.w %[r0], $ac0, 14 \n\t"
+ "extr_r.w %[r1], $ac1, 14 \n\t"
+ "extr_r.w %[r2], $ac2, 14 \n\t"
+ "extr_r.w %[r3], $ac3, 14 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r8], %[r8], %[r9] \n\t"
+ "mult $ac0, %[r5], %[r6] \n\t"
+ "mult $ac1, %[r5], %[r8] \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "extr_r.w %[r1], $ac0, 11 \n\t"
+ "extr_r.w %[r3], $ac1, 11 \n\t"
+ "mult $ac2, %[r5], %[r0] \n\t"
+ "mult $ac3, %[r5], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "extr_r.w %[r0], $ac2, 11 \n\t"
+ "extr_r.w %[r2], $ac3, 11 \n\t"
+ "sw %[r0], -4(%[outre1]) \n\t"
+ "sw %[r2], 4(%[outre2]) \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+ "b 3f \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "srav %[r0], %[r0], %[max] \n\t"
+ "srav %[r1], %[r1], %[max] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r2], %[r2], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r4] \n\t"
+ "mul %[r3], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ "lh %[r6], 0(%[cosptr]) \n\t"
+ "lh %[r7], 0(%[sinptr]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r2], %[r0] \n\t"
+ "addu %[r1], %[r3], %[r1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r9], %[r2], %[r6] \n\t"
+ "mul %[r2], %[r2], %[r7] \n\t"
+ "mul %[r8], %[r0], %[r6] \n\t"
+ "mul %[r0], %[r0], %[r7] \n\t"
+ "sra %[r3], %[r3], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sll %[r9], %[r9], 2 \n\t"
+ "sll %[r2], %[r2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r0], %[r0], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "addu %[r2], %[r2], %[r0] \n\t"
+ "mul %[r0], %[r3], %[r6] \n\t"
+ "mul %[r3], %[r3], %[r7] \n\t"
+ "mul %[r8], %[r1], %[r6] \n\t"
+ "mul %[r1], %[r1], %[r8] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "sll %[r0], %[r0], 2 \n\t"
+ "sll %[r3], %[r3], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r1], %[r1], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r1], %[r1], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r1], %[r1], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r8] \n\t"
+ "addu %[r3], %[r3], %[r1] \n\t"
+ "subu %[r9], %[r9], %[r3] \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sra %[r1], %[r9], 16 \n\t"
+ "andi %[r9], %[r9], 0xFFFF \n\t"
+ "mul %[r1], %[r1], %[r5] \n\t"
+ "mul %[r9], %[r9], %[r5] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r2], %[r2], %[r5] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "sll %[r1], %[r1], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r9], %[r9], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r9], %[r9], 0x400 \n\t"
+ "sra %[r9], %[r9], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r1], %[r9] \n\t"
+ "sll %[r2], %[r2], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x400 \n\t"
+ "sra %[r0], %[r0], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "sw %[r0], 0(%[outre2]) \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "2: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max1], %[max1], -1 \n\t"
+ "21: \n\t"
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r4] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r4] \n\t"
+ "muleq_s.w.phl %[r0], %[r0], %[r4] \n\t"
+ "muleq_s.w.phl %[r1], %[r1], %[r4] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "sllv %[r6], %[r6], %[max1] \n\t"
+ "sllv %[r7], %[r7], %[max1] \n\t"
+ "mult $ac0, %[r2], %[r6] \n\t"
+ "mult $ac1, %[r3], %[r7] \n\t"
+ "mult $ac2, %[r2], %[r7] \n\t"
+ "mult $ac3, %[r3], %[r6] \n\t"
+ "lh %[r2], 2(%[cosptr]) \n\t"
+ "lh %[r3], 2(%[sinptr]) \n\t"
+ "extr_r.w %[r6], $ac0, 14 \n\t"
+ "extr_r.w %[r7], $ac1, 14 \n\t"
+ "extr_r.w %[r8], $ac2, 14 \n\t"
+ "extr_r.w %[r9], $ac3, 14 \n\t"
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "mult $ac0, %[r2], %[r0] \n\t"
+ "mult $ac1, %[r3], %[r1] \n\t"
+ "mult $ac2, %[r2], %[r1] \n\t"
+ "mult $ac3, %[r3], %[r0] \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "extr_r.w %[r0], $ac0, 14 \n\t"
+ "extr_r.w %[r1], $ac1, 14 \n\t"
+ "extr_r.w %[r2], $ac2, 14 \n\t"
+ "extr_r.w %[r3], $ac3, 14 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r8], %[r8], %[r9] \n\t"
+ "mult $ac0, %[r5], %[r6] \n\t"
+ "mult $ac1, %[r5], %[r8] \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "extr_r.w %[r1], $ac0, 11 \n\t"
+ "extr_r.w %[r3], $ac1, 11 \n\t"
+ "mult $ac2, %[r5], %[r0] \n\t"
+ "mult $ac3, %[r5], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "extr_r.w %[r0], $ac2, 11 \n\t"
+ "extr_r.w %[r2], $ac3, 11 \n\t"
+ "sw %[r0], -4(%[outre1]) \n\t"
+ "sw %[r2], 4(%[outre2]) \n\t"
+ "bgtz %[k], 21b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r2], %[r2], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r4] \n\t"
+ "mul %[r3], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ "lh %[r6], 0(%[cosptr]) \n\t"
+ "lh %[r7], 0(%[sinptr]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r2], %[r0] \n\t"
+ "addu %[r1], %[r3], %[r1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r9], %[r2], %[r6] \n\t"
+ "mul %[r2], %[r2], %[r7] \n\t"
+ "mul %[r8], %[r0], %[r6] \n\t"
+ "mul %[r0], %[r0], %[r7] \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sll %[r9], %[r9], 2 \n\t"
+ "sll %[r2], %[r2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r0], %[r0], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "addu %[r2], %[r2], %[r0] \n\t"
+ "mul %[r0], %[r3], %[r6] \n\t"
+ "mul %[r3], %[r3], %[r7] \n\t"
+ "mul %[r8], %[r1], %[r6] \n\t"
+ "mul %[r1], %[r1], %[r7] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "sll %[r0], %[r0], 2 \n\t"
+ "sll %[r3], %[r3], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r1], %[r1], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r1], %[r1], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r1], %[r1], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r8] \n\t"
+ "addu %[r3], %[r3], %[r1] \n\t"
+ "subu %[r9], %[r9], %[r3] \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sra %[r1], %[r9], 16 \n\t"
+ "andi %[r9], %[r9], 0xFFFF \n\t"
+ "mul %[r1], %[r1], %[r5] \n\t"
+ "mul %[r9], %[r9], %[r5] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r2], %[r2], %[r5] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "sll %[r1], %[r1], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r9], %[r9], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r9], %[r9], 0x400 \n\t"
+ "sra %[r9], %[r9], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r1], %[r9] \n\t"
+ "sll %[r2], %[r2], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x400 \n\t"
+ "sra %[r0], %[r0], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "sw %[r0], 0(%[outre2]) \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "3: \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [r0] "=&r" (r0), [r1] "=&r" (r1),
+ [r2] "=&r" (r2), [r3] "=&r" (r3), [r4] "=&r" (r4),
+ [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1),
+ [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2)
+ : [max] "r" (max), [inreQ7] "r" (inreQ7),
+ [inimQ7] "r" (inimQ7), [cosptr] "r" (cosptr),
+ [sinptr] "r" (sinptr), [outre1Q16] "r" (outre1Q16),
+ [outre2Q16] "r" (outre2Q16)
+ : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+ , "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ );
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_tables.c b/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
new file mode 100644
index 0000000..ee96b8e
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains trigonometric functions look-up tables used in
+ * transform functions WebRtcIsacfix_Time2Spec and WebRtcIsacfix_Spec2Time.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "webrtc/typedefs.h"
+
+#if !(defined WEBRTC_DETECT_ARM_NEON || defined WEBRTC_ARCH_ARM_NEON)
+/* Cosine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2] = {
+ 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
+ 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
+ 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
+ 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
+ 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
+ 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
+ 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
+ 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
+ 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
+ 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
+ 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
+ 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214,
+ 0, -214, -429, -643, -857, -1072, -1285, -1499, -1713, -1926,
+ -2139, -2351, -2563, -2775, -2986, -3196, -3406, -3616, -3825, -4033,
+ -4240, -4447, -4653, -4859, -5063, -5266, -5469, -5671, -5872, -6071,
+ -6270, -6467, -6664, -6859, -7053, -7246, -7438, -7629, -7818, -8006,
+ -8192, -8377, -8561, -8743, -8923, -9102, -9280, -9456, -9630, -9803,
+ -9974, -10143, -10311, -10477, -10641, -10803, -10963, -11121, -11278, -11433,
+ -11585, -11736, -11885, -12031, -12176, -12318, -12458, -12597, -12733,
+ -12867, -12998, -13128, -13255, -13380, -13502, -13623, -13741, -13856,
+ -13970, -14081, -14189, -14295, -14399, -14500, -14598, -14694, -14788,
+ -14879, -14968, -15053, -15137, -15218, -15296, -15371, -15444, -15515,
+ -15582, -15647, -15709, -15769, -15826, -15880, -15931, -15980, -16026,
+ -16069, -16110, -16147, -16182, -16214, -16244, -16270, -16294, -16315,
+ -16333, -16349, -16362, -16371, -16378, -16383
+};
+
+/* Sine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2] = {
+ 0, 214, 429, 643, 857, 1072, 1285, 1499, 1713, 1926,
+ 2139, 2351, 2563, 2775, 2986, 3196, 3406, 3616, 3825, 4033,
+ 4240, 4447, 4653, 4859, 5063, 5266, 5469, 5671, 5872, 6071,
+ 6270, 6467, 6664, 6859, 7053, 7246, 7438, 7629, 7818, 8006,
+ 8192, 8377, 8561, 8743, 8923, 9102, 9280, 9456, 9630, 9803,
+ 9974, 10143, 10311, 10477, 10641, 10803, 10963, 11121, 11278, 11433,
+ 11585, 11736, 11885, 12031, 12176, 12318, 12458, 12597, 12733, 12867,
+ 12998, 13128, 13255, 13380, 13502, 13623, 13741, 13856, 13970, 14081,
+ 14189, 14295, 14399, 14500, 14598, 14694, 14788, 14879, 14968, 15053,
+ 15137, 15218, 15296, 15371, 15444, 15515, 15582, 15647, 15709, 15769,
+ 15826, 15880, 15931, 15980, 16026, 16069, 16110, 16147, 16182, 16214,
+ 16244, 16270, 16294, 16315, 16333, 16349, 16362, 16371, 16378, 16383,
+ 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
+ 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
+ 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
+ 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
+ 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
+ 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
+ 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
+ 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
+ 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
+ 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
+ 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
+ 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214
+};
+
+
+/* Sine table 2 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4] = {
+ 16384, -16381, 16375, -16367, 16356, -16342, 16325, -16305, 16283, -16257,
+ 16229, -16199, 16165, -16129, 16090, -16048, 16003, -15956, 15906, -15853,
+ 15798, -15739, 15679, -15615, 15549, -15480, 15408, -15334, 15257, -15178,
+ 15095, -15011, 14924, -14834, 14741, -14647, 14549, -14449, 14347, -14242,
+ 14135, -14025, 13913, -13799, 13682, -13563, 13441, -13318, 13192, -13063,
+ 12933, -12800, 12665, -12528, 12389, -12247, 12104, -11958, 11810, -11661,
+ 11509, -11356, 11200, -11042, 10883, -10722, 10559, -10394, 10227, -10059,
+ 9889, -9717, 9543, -9368, 9191, -9013, 8833, -8652, 8469, -8285,
+ 8099, -7912, 7723, -7534, 7342, -7150, 6957, -6762, 6566, -6369,
+ 6171, -5971, 5771, -5570, 5368, -5165, 4961, -4756, 4550, -4344,
+ 4137, -3929, 3720, -3511, 3301, -3091, 2880, -2669, 2457, -2245,
+ 2032, -1819, 1606, -1392, 1179, -965, 750, -536, 322, -107
+};
+#endif
+
+#if defined(MIPS32_LE)
+/* Cosine table 2 in Q14. Used only on MIPS platforms. */
+const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4] = {
+ 107, -322, 536, -750, 965, -1179, 1392, -1606, 1819, -2032,
+ 2245, -2457, 2669, -2880, 3091, -3301, 3511, -3720, 3929, -4137,
+ 4344, -4550, 4756, -4961, 5165, -5368, 5570, -5771, 5971, -6171,
+ 6369, -6566, 6762, -6957, 7150, -7342, 7534, -7723, 7912, -8099,
+ 8285, -8469, 8652, -8833, 9013, -9191, 9368, -9543, 9717, -9889,
+ 10059, -10227, 10394, -10559, 10722, -10883, 11042, -11200, 11356, -11509,
+ 11661, -11810, 11958, -12104, 12247, -12389, 12528, -12665, 12800, -12933,
+ 13063, -13192, 13318, -13441, 13563, -13682, 13799, -13913, 14025, -14135,
+ 14242, -14347, 14449, -14549, 14647, -14741, 14834, -14924, 15011, -15095,
+ 15178, -15257, 15334, -15408, 15480, -15549, 15615, -15679, 15739, -15798,
+ 15853, -15906, 15956, -16003, 16048, -16090, 16129, -16165, 16199, -16229,
+ 16257, -16283, 16305, -16325, 16342, -16356, 16367, -16375, 16381, -16384
+};
+#endif
diff --git a/modules/audio_coding/codecs/opus/opus_fec_test.cc b/modules/audio_coding/codecs/opus/opus_fec_test.cc
index fb4cb04..ee027e8 100644
--- a/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -32,8 +32,7 @@
};
const int kOpusBlockDurationMs = 20;
-const int kOpusInputSamplingKhz = 48;
-const int kOpusOutputSamplingKhz = 32;
+const int kOpusSamplingKhz = 48;
class OpusFecTest : public TestWithParam<coding_param> {
protected:
@@ -47,14 +46,8 @@
virtual void DecodeABlock(bool lost_previous, bool lost_current);
int block_duration_ms_;
- int input_sampling_khz_;
- int output_sampling_khz_;
-
- // Number of samples-per-channel in a frame.
- int input_length_sample_;
-
- // Expected output number of samples-per-channel in a frame.
- int output_length_sample_;
+ int sampling_khz_;
+ int block_length_sample_;
int channels_;
int bit_rate_;
@@ -91,7 +84,7 @@
// Allocate memory to contain the whole file.
in_data_.reset(new int16_t[loop_length_samples_ +
- input_length_sample_ * channels_]);
+ block_length_sample_ * channels_]);
// Copy the file into the buffer.
ASSERT_EQ(fread(&in_data_[0], sizeof(int16_t), loop_length_samples_, fp),
@@ -104,12 +97,12 @@
// beginning of the array. Audio frames cross the end of the excerpt always
// appear as a continuum of memory.
memcpy(&in_data_[loop_length_samples_], &in_data_[0],
- input_length_sample_ * channels_ * sizeof(int16_t));
+ block_length_sample_ * channels_ * sizeof(int16_t));
// Maximum number of bytes in output bitstream.
- max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
+ max_bytes_ = block_length_sample_ * channels_ * sizeof(int16_t);
- out_data_.reset(new int16_t[2 * output_length_sample_ * channels_]);
+ out_data_.reset(new int16_t[2 * block_length_sample_ * channels_]);
bit_stream_.reset(new uint8_t[max_bytes_]);
// Create encoder memory.
@@ -127,10 +120,8 @@
OpusFecTest::OpusFecTest()
: block_duration_ms_(kOpusBlockDurationMs),
- input_sampling_khz_(kOpusInputSamplingKhz),
- output_sampling_khz_(kOpusOutputSamplingKhz),
- input_length_sample_(block_duration_ms_ * input_sampling_khz_),
- output_length_sample_(block_duration_ms_ * output_sampling_khz_),
+ sampling_khz_(kOpusSamplingKhz),
+ block_length_sample_(block_duration_ms_ * sampling_khz_),
data_pointer_(0),
max_bytes_(0),
encoded_bytes_(0),
@@ -141,7 +132,7 @@
void OpusFecTest::EncodeABlock() {
int16_t value = WebRtcOpus_Encode(opus_encoder_,
&in_data_[data_pointer_],
- input_length_sample_,
+ block_length_sample_,
max_bytes_, &bit_stream_[0]);
EXPECT_GT(value, 0);
@@ -162,7 +153,7 @@
} else {
value_1 = WebRtcOpus_DecodePlc(opus_decoder_, &out_data_[0], 1);
}
- EXPECT_EQ(output_length_sample_, value_1);
+ EXPECT_EQ(block_length_sample_, value_1);
}
if (!lost_current) {
@@ -171,7 +162,7 @@
encoded_bytes_,
&out_data_[value_1 * channels_],
&audio_type);
- EXPECT_EQ(output_length_sample_, value_2);
+ EXPECT_EQ(block_length_sample_, value_2);
}
}
@@ -224,7 +215,7 @@
// |data_pointer_| is incremented and wrapped across
// |loop_length_samples_|.
- data_pointer_ = (data_pointer_ + input_length_sample_ * channels_) %
+ data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
loop_length_samples_;
}
if (mode_set[i].fec) {
diff --git a/modules/audio_coding/codecs/opus/opus_interface.c b/modules/audio_coding/codecs/opus/opus_interface.c
index 24fc4fc..ea535ea 100644
--- a/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/modules/audio_coding/codecs/opus/opus_interface.c
@@ -15,9 +15,6 @@
#include "opus.h"
-#include "webrtc/common_audio/signal_processing/resample_by_2_internal.h"
-#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-
enum {
/* Maximum supported frame size in WebRTC is 60 ms. */
kWebRtcOpusMaxEncodeFrameSizeMs = 60,
@@ -31,17 +28,6 @@
* milliseconds. */
kWebRtcOpusMaxFrameSizePerChannel = 48 * kWebRtcOpusMaxDecodeFrameSizeMs,
- /* Maximum sample count per frame is 48 kHz * maximum frame size in
- * milliseconds * maximum number of channels. */
- kWebRtcOpusMaxFrameSize = kWebRtcOpusMaxFrameSizePerChannel * 2,
-
- /* Maximum sample count per channel for output resampled to 32 kHz,
- * 32 kHz * maximum frame size in milliseconds. */
- kWebRtcOpusMaxFrameSizePerChannel32kHz = 32 * kWebRtcOpusMaxDecodeFrameSizeMs,
-
- /* Number of samples in resampler state. */
- kWebRtcOpusStateSize = 7,
-
/* Default frame size, 20 ms @ 48 kHz, in samples (for one channel). */
kWebRtcOpusDefaultFrameSize = 960,
};
@@ -143,8 +129,6 @@
}
struct WebRtcOpusDecInst {
- int16_t state_48_32_left[8];
- int16_t state_48_32_right[8];
OpusDecoder* decoder_left;
OpusDecoder* decoder_right;
int prev_decoded_samples;
@@ -205,8 +189,6 @@
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
- memset(inst->state_48_32_left, 0, sizeof(inst->state_48_32_left));
- memset(inst->state_48_32_right, 0, sizeof(inst->state_48_32_right));
return 0;
}
return -1;
@@ -215,7 +197,6 @@
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
- memset(inst->state_48_32_left, 0, sizeof(inst->state_48_32_left));
return 0;
}
return -1;
@@ -224,7 +205,6 @@
int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_right, OPUS_RESET_STATE);
if (error == OPUS_OK) {
- memset(inst->state_48_32_right, 0, sizeof(inst->state_48_32_right));
return 0;
}
return -1;
@@ -267,124 +247,29 @@
return -1;
}
-/* Resample from 48 to 32 kHz. Length of state is assumed to be
- * kWebRtcOpusStateSize (7).
- */
-static int WebRtcOpus_Resample48to32(const int16_t* samples_in, int length,
- int16_t* state, int16_t* samples_out) {
- int i;
- int blocks;
- int16_t output_samples;
- int32_t buffer32[kWebRtcOpusMaxFrameSizePerChannel + kWebRtcOpusStateSize];
-
- /* Resample from 48 kHz to 32 kHz. */
- for (i = 0; i < kWebRtcOpusStateSize; i++) {
- buffer32[i] = state[i];
- state[i] = samples_in[length - kWebRtcOpusStateSize + i];
- }
- for (i = 0; i < length; i++) {
- buffer32[kWebRtcOpusStateSize + i] = samples_in[i];
- }
- /* Resampling 3 samples to 2. Function divides the input in |blocks| number
- * of 3-sample groups, and output is |blocks| number of 2-sample groups.
- * When this is removed, the compensation in WebRtcOpus_DurationEst should be
- * removed too. */
- blocks = length / 3;
- WebRtcSpl_Resample48khzTo32khz(buffer32, buffer32, blocks);
- output_samples = (int16_t) (blocks * 2);
- WebRtcSpl_VectorBitShiftW32ToW16(samples_out, output_samples, buffer32, 15);
-
- return output_samples;
-}
-
-static int WebRtcOpus_DeInterleaveResample(OpusDecInst* inst, int16_t* input,
- int sample_pairs, int16_t* output) {
- int i;
- int16_t buffer_left[kWebRtcOpusMaxFrameSizePerChannel];
- int16_t buffer_right[kWebRtcOpusMaxFrameSizePerChannel];
- int16_t buffer_out[kWebRtcOpusMaxFrameSizePerChannel32kHz];
- int resampled_samples;
-
- /* De-interleave the signal in left and right channel. */
- for (i = 0; i < sample_pairs; i++) {
- /* Take every second sample, starting at the first sample. */
- buffer_left[i] = input[i * 2];
- buffer_right[i] = input[i * 2 + 1];
- }
-
- /* Resample from 48 kHz to 32 kHz for left channel. */
- resampled_samples = WebRtcOpus_Resample48to32(
- buffer_left, sample_pairs, inst->state_48_32_left, buffer_out);
-
- /* Add samples interleaved to output vector. */
- for (i = 0; i < resampled_samples; i++) {
- output[i * 2] = buffer_out[i];
- }
-
- /* Resample from 48 kHz to 32 kHz for right channel. */
- resampled_samples = WebRtcOpus_Resample48to32(
- buffer_right, sample_pairs, inst->state_48_32_right, buffer_out);
-
- /* Add samples interleaved to output vector. */
- for (i = 0; i < resampled_samples; i++) {
- output[i * 2 + 1] = buffer_out[i];
- }
-
- return resampled_samples;
-}
-
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer| is big enough for 120 ms (the largest Opus packet size) of stereo
- * audio at 48 kHz. */
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int16_t* coded = (int16_t*)encoded;
int decoded_samples;
- int resampled_samples;
- /* If mono case, just do a regular call to the decoder.
- * If stereo, we need to de-interleave the stereo output into blocks with
- * left and right channel. Each block is resampled to 32 kHz, and then
- * interleaved again. */
-
- /* Decode to a temporary buffer. */
decoded_samples = DecodeNative(inst->decoder_left, coded, encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel,
- buffer, audio_type);
+ decoded, audio_type);
if (decoded_samples < 0) {
return -1;
}
- if (inst->channels == 2) {
- /* De-interleave and resample. */
- resampled_samples = WebRtcOpus_DeInterleaveResample(inst,
- buffer,
- decoded_samples,
- decoded);
- } else {
- /* Resample from 48 kHz to 32 kHz. Filter state memory for left channel is
- * used for mono signals. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- }
-
/* Update decoded sample memory, to be used by the PLC in case of losses. */
inst->prev_decoded_samples = decoded_samples;
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer16| is big enough for 120 ms (the largestOpus packet size) of
- * stereo audio at 48 kHz. */
- int16_t buffer16[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int16_t output_samples;
int i;
/* If mono case, just do a regular call to the decoder.
@@ -393,120 +278,82 @@
* This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
- /* Decode to a temporary buffer. */
decoded_samples = DecodeNative(inst->decoder_left, encoded, encoded_bytes,
- kWebRtcOpusMaxFrameSizePerChannel, buffer16,
+ kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
- * case of stereo. Number of samples in |buffer16| equals |decoded_samples|
+ * case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
- buffer16[i] = buffer16[i * 2];
+ decoded[i] = decoded[i * 2];
}
}
- /* Resample from 48 kHz to 32 kHz. */
- output_samples = WebRtcOpus_Resample48to32(buffer16, decoded_samples,
- inst->state_48_32_left, decoded);
-
/* Update decoded sample memory, to be used by the PLC in case of losses. */
inst->prev_decoded_samples = decoded_samples;
- return output_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer16| is big enough for 120 ms (the largestOpus packet size) of
- * stereo audio at 48 kHz. */
- int16_t buffer16[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int16_t output_samples;
int i;
- /* Decode to a temporary buffer. */
decoded_samples = DecodeNative(inst->decoder_right, encoded, encoded_bytes,
- kWebRtcOpusMaxFrameSizePerChannel, buffer16,
+ kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
- * case of stereo. Number of samples in |buffer16| equals |decoded_samples|
+ * case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
- buffer16[i] = buffer16[i * 2 + 1];
+ decoded[i] = decoded[i * 2 + 1];
}
} else {
/* Decode slave should never be called for mono packets. */
return -1;
}
- /* Resample from 48 kHz to 32 kHz. */
- output_samples = WebRtcOpus_Resample48to32(buffer16, decoded_samples,
- inst->state_48_32_right, decoded);
- return output_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int16_t audio_type = 0;
int decoded_samples;
- int resampled_samples;
int plc_samples;
- /* If mono case, just do a regular call to the plc function, before
- * resampling.
- * If stereo, we need to de-interleave the stereo output into blocks with
- * left and right channel. Each block is resampled to 32 kHz, and then
- * interleaved again. */
-
- /* Decode to a temporary buffer. The number of samples we ask for is
- * |number_of_lost_frames| times |prev_decoded_samples_|. Limit the number
- * of samples to maximum |kWebRtcOpusMaxFrameSizePerChannel|. */
+ /* The number of samples we ask for is |number_of_lost_frames| times
+ * |prev_decoded_samples_|. Limit the number of samples to maximum
+ * |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
- buffer, &audio_type);
+ decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
- if (inst->channels == 2) {
- /* De-interleave and resample. */
- resampled_samples = WebRtcOpus_DeInterleaveResample(inst,
- buffer,
- decoded_samples,
- decoded);
- } else {
- /* Resample from 48 kHz to 32 kHz. Filter state memory for left channel is
- * used for mono signals. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- }
-
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcMaster(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int resampled_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
@@ -517,42 +364,35 @@
* output. This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
- /* Decode to a temporary buffer. The number of samples we ask for is
- * |number_of_lost_frames| times |prev_decoded_samples_|. Limit the number
- * of samples to maximum |kWebRtcOpusMaxFrameSizePerChannel|. */
+ /* The number of samples we ask for is |number_of_lost_frames| times
+ * |prev_decoded_samples_|. Limit the number of samples to maximum
+ * |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
- buffer, &audio_type);
+ decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of sample pairs, in
- * case of stereo. The original number of samples in |buffer| equals
+ * case of stereo. The original number of samples in |decoded| equals
* |decoded_samples| times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
- buffer[i] = buffer[i * 2];
+ decoded[i] = decoded[i * 2];
}
}
- /* Resample from 48 kHz to 32 kHz for left channel. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcSlave(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int resampled_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
@@ -563,44 +403,35 @@
return -1;
}
- /* Decode to a temporary buffer. The number of samples we ask for is
- * |number_of_lost_frames| times |prev_decoded_samples_|. Limit the number
- * of samples to maximum |kWebRtcOpusMaxFrameSizePerChannel|. */
+ /* The number of samples we ask for is |number_of_lost_frames| times
+ * |prev_decoded_samples_|. Limit the number of samples to maximum
+ * |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel)
? plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_right, NULL, 0, plc_samples,
- buffer, &audio_type);
+ decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
/* The parameter |decoded_samples| holds the number of sample pairs,
- * The original number of samples in |buffer| equals |decoded_samples|
+ * The original number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
- buffer[i] = buffer[i * 2 + 1];
+ decoded[i] = decoded[i * 2 + 1];
}
- /* Resample from 48 kHz to 32 kHz for left channel. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_right,
- decoded);
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer| is big enough for 120 ms (the largest Opus packet size) of stereo
- * audio at 48 kHz. */
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int16_t* coded = (int16_t*)encoded;
int decoded_samples;
- int resampled_samples;
int fec_samples;
if (WebRtcOpus_PacketHasFec(encoded, encoded_bytes) != 1) {
@@ -609,33 +440,13 @@
fec_samples = opus_packet_get_samples_per_frame(encoded, 48000);
- /* Decode to a temporary buffer. */
decoded_samples = DecodeFec(inst->decoder_left, coded, encoded_bytes,
- fec_samples, buffer, audio_type);
+ fec_samples, decoded, audio_type);
if (decoded_samples < 0) {
return -1;
}
- /* If mono case, just do a regular call to the decoder.
- * If stereo, we need to de-interleave the stereo output into blocks with
- * left and right channel. Each block is resampled to 32 kHz, and then
- * interleaved again. */
- if (inst->channels == 2) {
- /* De-interleave and resample. */
- resampled_samples = WebRtcOpus_DeInterleaveResample(inst,
- buffer,
- decoded_samples,
- decoded);
- } else {
- /* Resample from 48 kHz to 32 kHz. Filter state memory for left channel is
- * used for mono signals. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- }
-
- return resampled_samples;
+ return decoded_samples;
}
int WebRtcOpus_DurationEst(OpusDecInst* inst,
@@ -652,10 +463,6 @@
/* Invalid payload duration. */
return 0;
}
- /* Compensate for the down-sampling from 48 kHz to 32 kHz.
- * This should be removed when the resampling in WebRtcOpus_Decode is
- * removed. */
- samples = samples * 2 / 3;
return samples;
}
@@ -671,10 +478,6 @@
/* Invalid payload duration. */
return 0;
}
- /* Compensate for the down-sampling from 48 kHz to 32 kHz.
- * This should be removed when the resampling in WebRtcOpus_Decode is
- * removed. */
- samples = samples * 2 / 3;
return samples;
}
diff --git a/modules/audio_coding/codecs/opus/opus_speed_test.cc b/modules/audio_coding/codecs/opus/opus_speed_test.cc
index 16099c6..e2439cf 100644
--- a/modules/audio_coding/codecs/opus/opus_speed_test.cc
+++ b/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -18,8 +18,7 @@
namespace webrtc {
static const int kOpusBlockDurationMs = 20;
-static const int kOpusInputSamplingKhz = 48;
-static const int kOpustOutputSamplingKhz = 32;
+static const int kOpusSamplingKhz = 48;
class OpusSpeedTest : public AudioCodecSpeedTest {
protected:
@@ -36,8 +35,8 @@
OpusSpeedTest::OpusSpeedTest()
: AudioCodecSpeedTest(kOpusBlockDurationMs,
- kOpusInputSamplingKhz,
- kOpustOutputSamplingKhz),
+ kOpusSamplingKhz,
+ kOpusSamplingKhz),
opus_encoder_(NULL),
opus_decoder_(NULL) {
}
diff --git a/modules/audio_coding/codecs/opus/opus_unittest.cc b/modules/audio_coding/codecs/opus/opus_unittest.cc
index ed876cd..2ec77a5 100644
--- a/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -19,9 +19,13 @@
namespace webrtc {
// Number of samples in a 60 ms stereo frame, sampled at 48 kHz.
-const int kOpusNumberOfSamples = 480 * 6 * 2;
+const int kOpusMaxFrameSamples = 48 * 60 * 2;
// Maximum number of bytes in output bitstream.
const size_t kMaxBytes = 1000;
+// Number of samples-per-channel in a 20 ms frame, sampled at 48 kHz.
+const int kOpus20msFrameSamples = 48 * 20;
+// Number of samples-per-channel in a 10 ms frame, sampled at 48 kHz.
+const int kOpus10msFrameSamples = 48 * 10;
class OpusTest : public ::testing::Test {
protected:
@@ -35,8 +39,8 @@
WebRtcOpusDecInst* opus_stereo_decoder_;
WebRtcOpusDecInst* opus_stereo_decoder_new_;
- int16_t speech_data_[kOpusNumberOfSamples];
- int16_t output_data_[kOpusNumberOfSamples];
+ int16_t speech_data_[kOpusMaxFrameSamples];
+ int16_t output_data_[kOpusMaxFrameSamples];
uint8_t bitstream_[kMaxBytes];
};
@@ -50,17 +54,14 @@
}
void OpusTest::SetUp() {
- // Read some samples from a speech file, to be used in the encode test.
- // In this test we do not care that the sampling frequency of the file is
- // really 32000 Hz. We pretend that it is 48000 Hz.
FILE* input_file;
const std::string file_name =
- webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ webrtc::test::ResourcePath("audio_coding/speech_mono_32_48kHz", "pcm");
input_file = fopen(file_name.c_str(), "rb");
ASSERT_TRUE(input_file != NULL);
- ASSERT_EQ(kOpusNumberOfSamples,
+ ASSERT_EQ(kOpusMaxFrameSamples,
static_cast<int32_t>(fread(speech_data_, sizeof(int16_t),
- kOpusNumberOfSamples, input_file)));
+ kOpusMaxFrameSamples, input_file)));
fclose(input_file);
input_file = NULL;
}
@@ -114,21 +115,24 @@
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_mono_decoder_, coded,
- encoded_bytes, output_data_decode,
- &audio_type));
+ encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_mono_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode|.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i], output_data_decode[i]);
}
@@ -154,26 +158,30 @@
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
- int16_t output_data_decode_slave[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
+ int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode_slave,
- &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
@@ -234,26 +242,30 @@
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
- int16_t output_data_decode_slave[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
+ int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode_slave,
- &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
@@ -262,20 +274,23 @@
EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderInitSlave(opus_stereo_decoder_));
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode_slave,
- &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
@@ -344,27 +359,31 @@
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_mono_decoder_, coded,
- encoded_bytes, output_data_decode,
- &audio_type));
+ encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_mono_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
// Call decoder PLC for both versions of the decoder.
- int16_t plc_buffer[kOpusNumberOfSamples];
- int16_t plc_buffer_new[kOpusNumberOfSamples];
- EXPECT_EQ(640, WebRtcOpus_DecodePlcMaster(opus_mono_decoder_, plc_buffer, 1));
- EXPECT_EQ(640, WebRtcOpus_DecodePlc(opus_mono_decoder_new_,
- plc_buffer_new, 1));
+ int16_t plc_buffer[kOpusMaxFrameSamples];
+ int16_t plc_buffer_new[kOpusMaxFrameSamples];
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlcMaster(opus_mono_decoder_, plc_buffer, 1));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlc(opus_mono_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer| should be the same as in |plc_buffer_new|.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer[i], plc_buffer_new[i]);
}
@@ -391,36 +410,42 @@
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
- int16_t output_data_decode_slave[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
+ int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes,
+ output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes,
- output_data_decode_slave,
- &audio_type));
// Call decoder PLC for both versions of the decoder.
- int16_t plc_buffer_left[kOpusNumberOfSamples];
- int16_t plc_buffer_right[kOpusNumberOfSamples];
- int16_t plc_buffer_new[kOpusNumberOfSamples];
- EXPECT_EQ(640, WebRtcOpus_DecodePlcMaster(opus_stereo_decoder_,
- plc_buffer_left, 1));
- EXPECT_EQ(640, WebRtcOpus_DecodePlcSlave(opus_stereo_decoder_,
- plc_buffer_right, 1));
- EXPECT_EQ(640, WebRtcOpus_DecodePlc(opus_stereo_decoder_new_, plc_buffer_new,
- 1));
+ int16_t plc_buffer_left[kOpusMaxFrameSamples];
+ int16_t plc_buffer_right[kOpusMaxFrameSamples];
+ int16_t plc_buffer_new[kOpusMaxFrameSamples];
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlcMaster(opus_stereo_decoder_,
+ plc_buffer_left, 1));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlcSlave(opus_stereo_decoder_,
+ plc_buffer_right, 1));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlc(opus_stereo_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer_left| and |plc_buffer_right|should be the same as the
// interleaved samples in |plc_buffer_new|.
- for (int i = 0, j = 0; i < 640; i++) {
+ for (int i = 0, j = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer_left[i], plc_buffer_new[j++]);
EXPECT_EQ(plc_buffer_right[i], plc_buffer_new[j++]);
}
@@ -437,21 +462,23 @@
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
- // Encode with different packet sizes (input 48 kHz, output in 32 kHz).
int16_t encoded_bytes;
// 10 ms.
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 480,
- kMaxBytes, bitstream_);
- EXPECT_EQ(320, WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
- encoded_bytes));
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus10msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus10msFrameSamples,
+ WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
+ encoded_bytes));
// 20 ms
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
- encoded_bytes));
-
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
+ encoded_bytes));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index f2410b7..26f5b54 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -1618,14 +1618,8 @@
int codec_id = receiver_.last_audio_codec_id();
- int sample_rate_hz;
- if (codec_id < 0)
- sample_rate_hz = receiver_.current_sample_rate_hz();
- else
- sample_rate_hz = ACMCodecDB::database_[codec_id].plfreq;
-
- // TODO(tlegrand): Remove this option when we have full 48 kHz support.
- return (sample_rate_hz > 32000) ? 32000 : sample_rate_hz;
+ return codec_id < 0 ? receiver_.current_sample_rate_hz() :
+ ACMCodecDB::database_[codec_id].plfreq;
}
// Get current playout frequency.
diff --git a/modules/audio_coding/main/test/opus_test.cc b/modules/audio_coding/main/test/opus_test.cc
index 261eb61..398d59d 100644
--- a/modules/audio_coding/main/test/opus_test.cc
+++ b/modules/audio_coding/main/test/opus_test.cc
@@ -218,6 +218,8 @@
int written_samples = 0;
int read_samples = 0;
int decoded_samples = 0;
+ bool first_packet = true;
+ uint32_t start_time_stamp = 0;
channel->reset_payload_size();
counter_ = 0;
@@ -324,6 +326,10 @@
// Send data to the channel. "channel" will handle the loss simulation.
channel->SendData(kAudioFrameSpeech, payload_type_, rtp_timestamp_,
bitstream, bitstream_len_byte, NULL);
+ if (first_packet) {
+ first_packet = false;
+ start_time_stamp = rtp_timestamp_;
+ }
rtp_timestamp_ += frame_length;
read_samples += frame_length * channels;
}
@@ -344,9 +350,11 @@
// Write stand-alone speech to file.
out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
- // Number of channels should be the same for both stand-alone and
- // ACM-decoding.
- EXPECT_EQ(audio_frame.num_channels_, channels);
+ if (audio_frame.timestamp_ > start_time_stamp) {
+ // Number of channels should be the same for both stand-alone and
+ // ACM-decoding.
+ EXPECT_EQ(audio_frame.num_channels_, channels);
+ }
decoded_samples = 0;
}
@@ -367,13 +375,13 @@
file_stream << webrtc::test::OutputPath() << "opustest_out_"
<< test_number << ".pcm";
file_name = file_stream.str();
- out_file_.Open(file_name, 32000, "wb");
+ out_file_.Open(file_name, 48000, "wb");
file_stream.str("");
file_name = file_stream.str();
file_stream << webrtc::test::OutputPath() << "opusstandalone_out_"
<< test_number << ".pcm";
file_name = file_stream.str();
- out_file_standalone_.Open(file_name, 32000, "wb");
+ out_file_standalone_.Open(file_name, 48000, "wb");
}
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/audio_decoder.cc b/modules/audio_coding/neteq/audio_decoder.cc
index f539bb2..0fdaa44 100644
--- a/modules/audio_coding/neteq/audio_decoder.cc
+++ b/modules/audio_coding/neteq/audio_decoder.cc
@@ -162,7 +162,7 @@
#ifdef WEBRTC_CODEC_OPUS
case kDecoderOpus:
case kDecoderOpus_2ch: {
- return 32000;
+ return 48000;
}
#endif
case kDecoderCNGswb48kHz: {
diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc
index 05684ac..687a733 100644
--- a/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -602,7 +602,7 @@
class AudioDecoderOpusTest : public AudioDecoderTest {
protected:
AudioDecoderOpusTest() : AudioDecoderTest() {
- frame_size_ = 320;
+ frame_size_ = 480;
data_length_ = 10 * frame_size_;
decoder_ = new AudioDecoderOpus(kDecoderOpus);
assert(decoder_);
@@ -613,75 +613,69 @@
WebRtcOpus_EncoderFree(encoder_);
}
+ virtual void SetUp() OVERRIDE {
+ AudioDecoderTest::SetUp();
+ // Upsample from 32 to 48 kHz.
+ // Because Opus is 48 kHz codec but the input file is 32 kHz, so the data
+ // read in |AudioDecoderTest::SetUp| has to be upsampled.
+ // |AudioDecoderTest::SetUp| has read |data_length_| samples, which is more
+ // than necessary after upsampling, so the end of audio that has been read
+ // is unused and the end of the buffer is overwritten by the resampled data.
+ Resampler rs;
+ rs.Reset(32000, 48000, kResamplerSynchronous);
+ const int before_resamp_len_samples = static_cast<int>(data_length_) * 2
+ / 3;
+ int16_t* before_resamp_input = new int16_t[before_resamp_len_samples];
+ memcpy(before_resamp_input, input_,
+ sizeof(int16_t) * before_resamp_len_samples);
+ int resamp_len_samples;
+ EXPECT_EQ(0, rs.Push(before_resamp_input, before_resamp_len_samples,
+ input_, static_cast<int>(data_length_),
+ resamp_len_samples));
+ EXPECT_EQ(static_cast<int>(data_length_), resamp_len_samples);
+ delete[] before_resamp_input;
+ }
+
virtual void InitEncoder() {}
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
- uint8_t* output) {
- // Upsample from 32 to 48 kHz.
- Resampler rs;
- rs.Reset(32000, 48000, kResamplerSynchronous);
- const int max_resamp_len_samples = static_cast<int>(input_len_samples) *
- 3 / 2;
- int16_t* resamp_input = new int16_t[max_resamp_len_samples];
- int resamp_len_samples;
- EXPECT_EQ(0, rs.Push(input, static_cast<int>(input_len_samples),
- resamp_input, max_resamp_len_samples,
- resamp_len_samples));
- EXPECT_EQ(max_resamp_len_samples, resamp_len_samples);
- int enc_len_bytes =
- WebRtcOpus_Encode(encoder_, resamp_input, resamp_len_samples,
- static_cast<int>(data_length_), output);
+ uint8_t* output) OVERRIDE {
+ int enc_len_bytes = WebRtcOpus_Encode(encoder_, const_cast<int16_t*>(input),
+ static_cast<int16_t>(input_len_samples),
+ static_cast<int16_t>(data_length_), output);
EXPECT_GT(enc_len_bytes, 0);
- delete [] resamp_input;
return enc_len_bytes;
}
OpusEncInst* encoder_;
};
-class AudioDecoderOpusStereoTest : public AudioDecoderTest {
+class AudioDecoderOpusStereoTest : public AudioDecoderOpusTest {
protected:
- AudioDecoderOpusStereoTest() : AudioDecoderTest() {
+ AudioDecoderOpusStereoTest() : AudioDecoderOpusTest() {
channels_ = 2;
- frame_size_ = 320;
- data_length_ = 10 * frame_size_;
+ WebRtcOpus_EncoderFree(encoder_);
+ delete decoder_;
decoder_ = new AudioDecoderOpus(kDecoderOpus_2ch);
assert(decoder_);
WebRtcOpus_EncoderCreate(&encoder_, 2);
}
- ~AudioDecoderOpusStereoTest() {
- WebRtcOpus_EncoderFree(encoder_);
- }
-
- virtual void InitEncoder() {}
-
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
- uint8_t* output) {
+ uint8_t* output) OVERRIDE {
// Create stereo by duplicating each sample in |input|.
const int input_stereo_samples = static_cast<int>(input_len_samples) * 2;
int16_t* input_stereo = new int16_t[input_stereo_samples];
for (size_t i = 0; i < input_len_samples; i++)
input_stereo[i * 2] = input_stereo[i * 2 + 1] = input[i];
- // Upsample from 32 to 48 kHz.
- Resampler rs;
- rs.Reset(32000, 48000, kResamplerSynchronousStereo);
- const int max_resamp_len_samples = input_stereo_samples * 3 / 2;
- int16_t* resamp_input = new int16_t[max_resamp_len_samples];
- int resamp_len_samples;
- EXPECT_EQ(0, rs.Push(input_stereo, input_stereo_samples, resamp_input,
- max_resamp_len_samples, resamp_len_samples));
- EXPECT_EQ(max_resamp_len_samples, resamp_len_samples);
- int enc_len_bytes =
- WebRtcOpus_Encode(encoder_, resamp_input, resamp_len_samples / 2,
- static_cast<int16_t>(data_length_), output);
+
+ int enc_len_bytes = WebRtcOpus_Encode(
+ encoder_, input_stereo, static_cast<int16_t>(input_len_samples),
+ static_cast<int16_t>(data_length_), output);
EXPECT_GT(enc_len_bytes, 0);
- delete [] resamp_input;
- delete [] input_stereo;
+ delete[] input_stereo;
return enc_len_bytes;
}
-
- OpusEncInst* encoder_;
};
TEST_F(AudioDecoderPcmUTest, EncodeDecode) {
@@ -871,11 +865,11 @@
EXPECT_EQ(8000, AudioDecoder::CodecSampleRateHz(kDecoderCNGnb));
EXPECT_EQ(16000, AudioDecoder::CodecSampleRateHz(kDecoderCNGwb));
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCNGswb32kHz));
+ EXPECT_EQ(48000, AudioDecoder::CodecSampleRateHz(kDecoderOpus));
+ EXPECT_EQ(48000, AudioDecoder::CodecSampleRateHz(kDecoderOpus_2ch));
// TODO(tlegrand): Change 32000 to 48000 below once ACM has 48 kHz support.
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCNGswb48kHz));
EXPECT_EQ(-1, AudioDecoder::CodecSampleRateHz(kDecoderArbitrary));
- EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderOpus));
- EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderOpus_2ch));
#ifdef WEBRTC_CODEC_CELT
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCELT_32));
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCELT_32_2ch));
diff --git a/modules/audio_coding/neteq/payload_splitter_unittest.cc b/modules/audio_coding/neteq/payload_splitter_unittest.cc
index 5cde1bd..9d0aaa1 100644
--- a/modules/audio_coding/neteq/payload_splitter_unittest.cc
+++ b/modules/audio_coding/neteq/payload_splitter_unittest.cc
@@ -743,7 +743,7 @@
// Check first packet.
packet = packet_list.front();
EXPECT_EQ(0, packet->header.payloadType);
- EXPECT_EQ(kBaseTimestamp - 20 * 32, packet->header.timestamp);
+ EXPECT_EQ(kBaseTimestamp - 20 * 48, packet->header.timestamp);
EXPECT_EQ(10, packet->payload_length);
EXPECT_FALSE(packet->primary);
delete [] packet->payload;
diff --git a/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc b/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc
index e8fd06a..dee99b8 100644
--- a/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc
@@ -21,8 +21,7 @@
namespace test {
static const int kOpusBlockDurationMs = 20;
-static const int kOpusInputSamplingKhz = 48;
-static const int kOpusOutputSamplingKhz = 32;
+static const int kOpusSamplingKhz = 48;
// Define switch for input file name.
static bool ValidateInFilename(const char* flagname, const string& value) {
@@ -128,8 +127,8 @@
};
NetEqOpusFecQualityTest::NetEqOpusFecQualityTest()
- : NetEqQualityTest(kOpusBlockDurationMs, kOpusInputSamplingKhz,
- kOpusOutputSamplingKhz,
+ : NetEqQualityTest(kOpusBlockDurationMs, kOpusSamplingKhz,
+ kOpusSamplingKhz,
(FLAGS_channels == 1) ? kDecoderOpus : kDecoderOpus_2ch,
FLAGS_channels,
FLAGS_in_filename,
diff --git a/modules/audio_coding/neteq/timestamp_scaler.cc b/modules/audio_coding/neteq/timestamp_scaler.cc
index 0189013..1809324 100644
--- a/modules/audio_coding/neteq/timestamp_scaler.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler.cc
@@ -48,8 +48,6 @@
denominator_ = 1;
break;
}
- case kDecoderOpus:
- case kDecoderOpus_2ch:
case kDecoderISACfb:
case kDecoderCNGswb48kHz: {
// Use timestamp scaling with factor 2/3 (32 kHz sample rate, but RTP
diff --git a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
index 8cbbfa3..1cbbf7f 100644
--- a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
@@ -252,10 +252,14 @@
EXPECT_CALL(db, Die()); // Called when database object is deleted.
}
+// TODO(minyue): This test becomes trivial since Opus does not need a timestamp
+// scaler. Therefore, this test may be removed in future. There is no harm to
+// keep it, since it can be taken as a test case for the situation of a trivial
+// timestamp scaler.
TEST(TimestampScaler, TestOpusLargeStep) {
MockDecoderDatabase db;
DecoderDatabase::DecoderInfo info;
- info.codec_type = kDecoderOpus; // Uses a factor 2/3 scaling.
+ info.codec_type = kDecoderOpus;
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -273,8 +277,7 @@
scaler.ToInternal(external_timestamp, kRtpPayloadType));
// Scale back.
EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
- // Internal timestamp should be incremented with twice the step.
- internal_timestamp += 2 * kStep / 3;
+ internal_timestamp += kStep;
}
EXPECT_CALL(db, Die()); // Called when database object is deleted.
@@ -283,7 +286,7 @@
TEST(TimestampScaler, TestIsacFbLargeStep) {
MockDecoderDatabase db;
DecoderDatabase::DecoderInfo info;
- info.codec_type = kDecoderISACfb; // Uses a factor 2/3 scaling.
+ info.codec_type = kDecoderISACfb;
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -301,7 +304,7 @@
scaler.ToInternal(external_timestamp, kRtpPayloadType));
// Scale back.
EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
- // Internal timestamp should be incremented with twice the step.
+ // Internal timestamp should be incremented with two-thirds the step.
internal_timestamp += 2 * kStep / 3;
}
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index a80b1f8..8328855 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -158,7 +158,7 @@
return false;
}
-UniformLoss::UniformLoss(int loss_rate)
+UniformLoss::UniformLoss(double loss_rate)
: loss_rate_(loss_rate) {
}
@@ -204,8 +204,6 @@
// a full packet duration is drawn with a loss, |unit_loss_rate| fulfills
// (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
// 1 - packet_loss_rate.
- // |unit_loss_rate| is usually small. To increase its resolution, we
- // magnify it by |RAND_MAX|.
double unit_loss_rate = (1.0f - pow(1.0f - 0.01f * packet_loss_rate_,
1.0f / units));
loss_model_.reset(new UniformLoss(unit_loss_rate));
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.h b/modules/audio_coding/neteq/tools/neteq_quality_test.h
index 75d19ae..e0a43b6 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -38,9 +38,10 @@
class UniformLoss : public LossModel {
public:
- UniformLoss(int loss_rate);
+ UniformLoss(double loss_rate);
virtual bool Lost() OVERRIDE;
void set_loss_rate(double loss_rate) { loss_rate_ = loss_rate; }
+
private:
double loss_rate_;
};
@@ -49,6 +50,7 @@
public:
GilbertElliotLoss(double prob_trans_11, double prob_trans_01);
virtual bool Lost() OVERRIDE;
+
private:
// Prob. of losing current packet, when previous packet is lost.
double prob_trans_11_;
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index 26ef3e8..6ef6166 100644
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -32,10 +32,13 @@
// stereo at most.
//
// TODO(andrew): consider not modifying |frame| here.
-void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame) {
+void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
assert(mixed_frame->num_channels_ >= frame->num_channels_);
- // Divide by two to avoid saturation in the mixing.
- *frame >>= 1;
+ if (use_limiter) {
+ // Divide by two to avoid saturation in the mixing.
+ // This is only meaningful if the limiter will be used.
+ *frame >>= 1;
+ }
if (mixed_frame->num_channels_ > frame->num_channels_) {
// We only support mono-to-stereo.
assert(mixed_frame->num_channels_ == 2 &&
@@ -131,6 +134,7 @@
_participantList(),
_additionalParticipantList(),
_numMixedParticipants(0),
+ use_limiter_(true),
_timeStamp(0),
_timeScheduler(kProcessPeriodicityInMs),
_mixedAudioLevel(),
@@ -308,6 +312,11 @@
_timeStamp += _sampleSize;
+ // We only use the limiter if it supports the output sample rate and
+ // we're actually mixing multiple streams.
+ use_limiter_ = _numMixedParticipants > 1 &&
+ _outputFrequency <= kAudioProcMaxNativeSampleRateHz;
+
MixFromList(*mixedAudio, &mixList);
MixAnonomouslyFromList(*mixedAudio, &additionalFramesList);
MixAnonomouslyFromList(*mixedAudio, &rampOutList);
@@ -946,16 +955,8 @@
if(audioFrameList->empty()) return 0;
uint32_t position = 0;
- if(_numMixedParticipants == 1) {
- // No mixing required here; skip the saturation protection.
- AudioFrame* audioFrame = audioFrameList->front();
- mixedAudio.CopyFrom(*audioFrame);
- SetParticipantStatistics(&_scratchMixedParticipants[position],
- *audioFrame);
- return 0;
- }
- if (audioFrameList->size() == 1) {
+ if (_numMixedParticipants == 1) {
mixedAudio.timestamp_ = audioFrameList->front()->timestamp_;
mixedAudio.elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_;
} else {
@@ -979,7 +980,7 @@
assert(false);
position = 0;
}
- MixFrames(&mixedAudio, (*iter));
+ MixFrames(&mixedAudio, (*iter), use_limiter_);
SetParticipantStatistics(&_scratchMixedParticipants[position],
**iter);
@@ -999,24 +1000,17 @@
if(audioFrameList->empty()) return 0;
- if(_numMixedParticipants == 1) {
- // No mixing required here; skip the saturation protection.
- AudioFrame* audioFrame = audioFrameList->front();
- mixedAudio.CopyFrom(*audioFrame);
- return 0;
- }
-
for (AudioFrameList::const_iterator iter = audioFrameList->begin();
iter != audioFrameList->end();
++iter) {
- MixFrames(&mixedAudio, *iter);
+ MixFrames(&mixedAudio, *iter, use_limiter_);
}
return 0;
}
bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
- if(_numMixedParticipants == 1) {
- return true;
+ if (!use_limiter_) {
+ return true;
}
// Smoothly limit the mixed frame.
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
index 31dc71e..44f4ff0 100644
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
+++ b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
@@ -192,6 +192,9 @@
MixerParticipantList _additionalParticipantList;
size_t _numMixedParticipants;
+ // Determines if we will use a limiter for clipping protection during
+ // mixing.
+ bool use_limiter_;
uint32_t _timeStamp;
diff --git a/modules/audio_device/audio_device.gypi b/modules/audio_device/audio_device.gypi
index a64856b..23f417f 100644
--- a/modules/audio_device/audio_device.gypi
+++ b/modules/audio_device/audio_device.gypi
@@ -98,7 +98,7 @@
'linux/audio_mixer_manager_alsa_linux.h',
'linux/latebindingsymboltable_linux.cc',
'linux/latebindingsymboltable_linux.h',
- 'ios/audio_device_ios.cc',
+ 'ios/audio_device_ios.mm',
'ios/audio_device_ios.h',
'ios/audio_device_utility_ios.cc',
'ios/audio_device_utility_ios.h',
@@ -175,7 +175,7 @@
}],
],
}],
- ['OS=="mac" or OS=="ios"', {
+ ['OS=="mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
@@ -183,6 +183,19 @@
],
},
}],
+ ['OS=="ios"', {
+ 'xcode_settings': {
+ 'CLANG_ENABLE_OBJC_ARC': 'YES',
+ },
+ 'link_settings': {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-framework AudioToolbox',
+ '-framework AVFoundation',
+ ],
+ },
+ },
+ }],
['OS=="win"', {
'link_settings': {
'libraries': [
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index 58411e3..a814560 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -349,15 +349,15 @@
#if defined(WEBRTC_IOS)
if (audioLayer == kPlatformDefaultAudio)
{
- // Create *iPhone Audio* implementation
- ptrAudioDevice = new AudioDeviceIPhone(Id());
+ // Create iOS Audio Device implementation.
+ ptrAudioDevice = new AudioDeviceIOS(Id());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "iPhone Audio APIs will be utilized");
}
if (ptrAudioDevice != NULL)
{
- // Create the Mac implementation of the Device Utility.
- ptrAudioDeviceUtility = new AudioDeviceUtilityIPhone(Id());
+ // Create iOS Device Utility implementation.
+ ptrAudioDeviceUtility = new AudioDeviceUtilityIOS(Id());
}
// END #if defined(WEBRTC_IOS)
diff --git a/modules/audio_device/audio_device_utility.h b/modules/audio_device/audio_device_utility.h
index eb3e623..ebe06d1 100644
--- a/modules/audio_device/audio_device_utility.h
+++ b/modules/audio_device/audio_device_utility.h
@@ -18,15 +18,15 @@
class AudioDeviceUtility
{
-public:
- static uint32_t GetTimeInMS();
- static void WaitForKey();
- static bool StringCompare(const char* str1,
- const char* str2,
- const uint32_t length);
- virtual int32_t Init() = 0;
+ public:
+ static uint32_t GetTimeInMS();
+ static void WaitForKey();
+ static bool StringCompare(const char* str1,
+ const char* str2,
+ const uint32_t length);
+ virtual int32_t Init() = 0;
- virtual ~AudioDeviceUtility() {}
+ virtual ~AudioDeviceUtility() {}
};
} // namespace webrtc
diff --git a/modules/audio_device/ios/audio_device_ios.h b/modules/audio_device/ios/audio_device_ios.h
index 011b6ac..2a48845 100644
--- a/modules/audio_device/ios/audio_device_ios.h
+++ b/modules/audio_device/ios/audio_device_ios.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IPHONE_H
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IPHONE_H
+#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
+#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
#include <AudioUnit/AudioUnit.h>
@@ -32,244 +32,241 @@
// Number of 10 ms recording blocks in recording buffer
const uint16_t N_REC_BUFFERS = 20;
-class AudioDeviceIPhone : public AudioDeviceGeneric {
-public:
- AudioDeviceIPhone(const int32_t id);
- ~AudioDeviceIPhone();
+class AudioDeviceIOS : public AudioDeviceGeneric {
+ public:
+ AudioDeviceIOS(const int32_t id);
+ ~AudioDeviceIOS();
- // Retrieve the currently utilized audio layer
- virtual int32_t
- ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const;
- // Main initializaton and termination
- virtual int32_t Init();
- virtual int32_t Terminate();
- virtual bool Initialized() const;
+ // Main initializaton and termination
+ virtual int32_t Init();
+ virtual int32_t Terminate();
+ virtual bool Initialized() const;
- // Device enumeration
- virtual int16_t PlayoutDevices();
- virtual int16_t RecordingDevices();
- virtual int32_t PlayoutDeviceName(uint16_t index,
+ // Device enumeration
+ virtual int16_t PlayoutDevices();
+ virtual int16_t RecordingDevices();
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]);
- virtual int32_t RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
- // Device selection
- virtual int32_t SetPlayoutDevice(uint16_t index);
- virtual int32_t
- SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
- virtual int32_t SetRecordingDevice(uint16_t index);
- virtual int32_t SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType device);
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index);
+ virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index);
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device);
- // Audio transport initialization
- virtual int32_t PlayoutIsAvailable(bool& available);
- virtual int32_t InitPlayout();
- virtual bool PlayoutIsInitialized() const;
- virtual int32_t RecordingIsAvailable(bool& available);
- virtual int32_t InitRecording();
- virtual bool RecordingIsInitialized() const;
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available);
+ virtual int32_t InitPlayout();
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool& available);
+ virtual int32_t InitRecording();
+ virtual bool RecordingIsInitialized() const;
- // Audio transport control
- virtual int32_t StartPlayout();
- virtual int32_t StopPlayout();
- virtual bool Playing() const;
- virtual int32_t StartRecording();
- virtual int32_t StopRecording();
- virtual bool Recording() const;
+ // Audio transport control
+ virtual int32_t StartPlayout();
+ virtual int32_t StopPlayout();
+ virtual bool Playing() const;
+ virtual int32_t StartRecording();
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
- // Microphone Automatic Gain Control (AGC)
- virtual int32_t SetAGC(bool enable);
- virtual bool AGC() const;
+ // Microphone Automatic Gain Control (AGC)
+ virtual int32_t SetAGC(bool enable);
+ virtual bool AGC() const;
- // Volume control based on the Windows Wave API (Windows only)
- virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
- virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
- uint16_t& volumeRight) const;
+ // Volume control based on the Windows Wave API (Windows only)
+ virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
+ virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
+ uint16_t& volumeRight) const;
- // Audio mixer initialization
- virtual int32_t InitSpeaker();
- virtual bool SpeakerIsInitialized() const;
- virtual int32_t InitMicrophone();
- virtual bool MicrophoneIsInitialized() const;
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker();
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t InitMicrophone();
+ virtual bool MicrophoneIsInitialized() const;
- // Speaker volume controls
- virtual int32_t SpeakerVolumeIsAvailable(bool& available);
- virtual int32_t SetSpeakerVolume(uint32_t volume);
- virtual int32_t SpeakerVolume(uint32_t& volume) const;
- virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
- virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
- virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available);
+ virtual int32_t SetSpeakerVolume(uint32_t volume);
+ virtual int32_t SpeakerVolume(uint32_t& volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
- // Microphone volume controls
- virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
- virtual int32_t SetMicrophoneVolume(uint32_t volume);
- virtual int32_t MicrophoneVolume(uint32_t& volume) const;
- virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
- virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
- virtual int32_t
- MicrophoneVolumeStepSize(uint16_t& stepSize) const;
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume);
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
- // Microphone mute control
- virtual int32_t MicrophoneMuteIsAvailable(bool& available);
- virtual int32_t SetMicrophoneMute(bool enable);
- virtual int32_t MicrophoneMute(bool& enabled) const;
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool& enabled) const;
- // Speaker mute control
- virtual int32_t SpeakerMuteIsAvailable(bool& available);
- virtual int32_t SetSpeakerMute(bool enable);
- virtual int32_t SpeakerMute(bool& enabled) const;
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available);
+ virtual int32_t SetSpeakerMute(bool enable);
+ virtual int32_t SpeakerMute(bool& enabled) const;
- // Microphone boost control
- virtual int32_t MicrophoneBoostIsAvailable(bool& available);
- virtual int32_t SetMicrophoneBoost(bool enable);
- virtual int32_t MicrophoneBoost(bool& enabled) const;
+ // Microphone boost control
+ virtual int32_t MicrophoneBoostIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneBoost(bool enable);
+ virtual int32_t MicrophoneBoost(bool& enabled) const;
- // Stereo support
- virtual int32_t StereoPlayoutIsAvailable(bool& available);
- virtual int32_t SetStereoPlayout(bool enable);
- virtual int32_t StereoPlayout(bool& enabled) const;
- virtual int32_t StereoRecordingIsAvailable(bool& available);
- virtual int32_t SetStereoRecording(bool enable);
- virtual int32_t StereoRecording(bool& enabled) const;
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available);
+ virtual int32_t SetStereoPlayout(bool enable);
+ virtual int32_t StereoPlayout(bool& enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool& available);
+ virtual int32_t SetStereoRecording(bool enable);
+ virtual int32_t StereoRecording(bool& enabled) const;
- // Delay information and control
- virtual int32_t
- SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS);
- virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const;
- virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
- virtual int32_t RecordingDelay(uint16_t& delayMS) const;
+ // Delay information and control
+ virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS);
+ virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const;
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
+ virtual int32_t RecordingDelay(uint16_t& delayMS) const;
- // CPU load
- virtual int32_t CPULoad(uint16_t& load) const;
+ // CPU load
+ virtual int32_t CPULoad(uint16_t& load) const;
-public:
- virtual bool PlayoutWarning() const;
- virtual bool PlayoutError() const;
- virtual bool RecordingWarning() const;
- virtual bool RecordingError() const;
- virtual void ClearPlayoutWarning();
- virtual void ClearPlayoutError();
- virtual void ClearRecordingWarning();
- virtual void ClearRecordingError();
+ public:
+ virtual bool PlayoutWarning() const;
+ virtual bool PlayoutError() const;
+ virtual bool RecordingWarning() const;
+ virtual bool RecordingError() const;
+ virtual void ClearPlayoutWarning();
+ virtual void ClearPlayoutError();
+ virtual void ClearRecordingWarning();
+ virtual void ClearRecordingError();
-public:
- virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+ public:
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
- // Reset Audio Deivce (for mobile devices only)
- virtual int32_t ResetAudioDevice();
+ // Reset Audio Device (for mobile devices only)
+ virtual int32_t ResetAudioDevice();
- // enable or disable loud speaker (for iphone only)
- virtual int32_t SetLoudspeakerStatus(bool enable);
- virtual int32_t GetLoudspeakerStatus(bool& enabled) const;
+ // enable or disable loud speaker (for iphone only)
+ virtual int32_t SetLoudspeakerStatus(bool enable);
+ virtual int32_t GetLoudspeakerStatus(bool& enabled) const;
-private:
- void Lock() {
- _critSect.Enter();
- }
+ private:
+ void Lock() {
+ _critSect.Enter();
+ }
- void UnLock() {
- _critSect.Leave();
- }
+ void UnLock() {
+ _critSect.Leave();
+ }
- int32_t Id() {
- return _id;
- }
+ int32_t Id() {
+ return _id;
+ }
- // Init and shutdown
- int32_t InitPlayOrRecord();
- int32_t ShutdownPlayOrRecord();
+ // Init and shutdown
+ int32_t InitPlayOrRecord();
+ int32_t ShutdownPlayOrRecord();
- void UpdateRecordingDelay();
- void UpdatePlayoutDelay();
+ void UpdateRecordingDelay();
+ void UpdatePlayoutDelay();
- static OSStatus RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- static OSStatus PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames);
-
- OSStatus PlayoutProcessImpl(uint32_t inNumberFrames,
+ static OSStatus RecordProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *timeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
AudioBufferList *ioData);
- static bool RunCapture(void* ptrThis);
- bool CaptureWorkerThread();
+ static OSStatus PlayoutProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *timeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData);
-private:
- AudioDeviceBuffer* _ptrAudioBuffer;
+ OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *timeStamp,
+ uint32_t inBusNumber,
+ uint32_t inNumberFrames);
- CriticalSectionWrapper& _critSect;
+ OSStatus PlayoutProcessImpl(uint32_t inNumberFrames,
+ AudioBufferList *ioData);
- ThreadWrapper* _captureWorkerThread;
- uint32_t _captureWorkerThreadId;
+ static bool RunCapture(void* ptrThis);
+ bool CaptureWorkerThread();
- int32_t _id;
+ private:
+ AudioDeviceBuffer* _ptrAudioBuffer;
- AudioUnit _auVoiceProcessing;
+ CriticalSectionWrapper& _critSect;
-private:
- bool _initialized;
- bool _isShutDown;
- bool _recording;
- bool _playing;
- bool _recIsInitialized;
- bool _playIsInitialized;
+ ThreadWrapper* _captureWorkerThread;
+ uint32_t _captureWorkerThreadId;
- bool _recordingDeviceIsSpecified;
- bool _playoutDeviceIsSpecified;
- bool _micIsInitialized;
- bool _speakerIsInitialized;
+ int32_t _id;
- bool _AGC;
+ AudioUnit _auVoiceProcessing;
+ void* _audioInterruptionObserver;
- // The sampling rate to use with Audio Device Buffer
- uint32_t _adbSampFreq;
+ private:
+ bool _initialized;
+ bool _isShutDown;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
- // Delay calculation
- uint32_t _recordingDelay;
- uint32_t _playoutDelay;
- uint32_t _playoutDelayMeasurementCounter;
- uint32_t _recordingDelayHWAndOS;
- uint32_t _recordingDelayMeasurementCounter;
+ bool _recordingDeviceIsSpecified;
+ bool _playoutDeviceIsSpecified;
+ bool _micIsInitialized;
+ bool _speakerIsInitialized;
- // Errors and warnings count
- uint16_t _playWarning;
- uint16_t _playError;
- uint16_t _recWarning;
- uint16_t _recError;
+ bool _AGC;
- // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
- int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
- uint32_t _playoutBufferUsed; // How much is filled
+ // The sampling rate to use with Audio Device Buffer
+ uint32_t _adbSampFreq;
- // Recording buffers
- int16_t
- _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES];
- uint32_t _recordingLength[N_REC_BUFFERS];
- uint32_t _recordingSeqNumber[N_REC_BUFFERS];
- uint32_t _recordingCurrentSeq;
+ // Delay calculation
+ uint32_t _recordingDelay;
+ uint32_t _playoutDelay;
+ uint32_t _playoutDelayMeasurementCounter;
+ uint32_t _recordingDelayHWAndOS;
+ uint32_t _recordingDelayMeasurementCounter;
- // Current total size all data in buffers, used for delay estimate
- uint32_t _recordingBufferTotalSize;
+ // Errors and warnings count
+ uint16_t _playWarning;
+ uint16_t _playError;
+ uint16_t _recWarning;
+ uint16_t _recError;
+
+ // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
+ int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
+ uint32_t _playoutBufferUsed; // How much is filled
+
+ // Recording buffers
+ int16_t _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES];
+ uint32_t _recordingLength[N_REC_BUFFERS];
+ uint32_t _recordingSeqNumber[N_REC_BUFFERS];
+ uint32_t _recordingCurrentSeq;
+
+ // Current total size all data in buffers, used for delay estimate
+ uint32_t _recordingBufferTotalSize;
};
} // namespace webrtc
-#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_MAC_AUDIO_DEVICE_IPHONE_H_
+#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
diff --git a/modules/audio_device/ios/audio_device_ios.cc b/modules/audio_device/ios/audio_device_ios.mm
similarity index 79%
rename from modules/audio_device/ios/audio_device_ios.cc
rename to modules/audio_device/ios/audio_device_ios.mm
index 7a7189a..19dcfdf 100644
--- a/modules/audio_device/ios/audio_device_ios.cc
+++ b/modules/audio_device/ios/audio_device_ios.mm
@@ -8,7 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <AudioToolbox/AudioServices.h> // AudioSession
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
@@ -16,7 +17,7 @@
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
-AudioDeviceIPhone::AudioDeviceIPhone(const int32_t id)
+AudioDeviceIOS::AudioDeviceIOS(const int32_t id)
:
_ptrAudioBuffer(NULL),
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
@@ -24,6 +25,7 @@
_captureWorkerThreadId(0),
_id(id),
_auVoiceProcessing(NULL),
+ _audioInterruptionObserver(NULL),
_initialized(false),
_isShutDown(false),
_recording(false),
@@ -57,7 +59,7 @@
memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
}
-AudioDeviceIPhone::~AudioDeviceIPhone() {
+AudioDeviceIOS::~AudioDeviceIOS() {
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s destroyed", __FUNCTION__);
@@ -71,7 +73,7 @@
// API
// ============================================================================
-void AudioDeviceIPhone::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -86,7 +88,7 @@
_ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
}
-int32_t AudioDeviceIPhone::ActiveAudioLayer(
+int32_t AudioDeviceIOS::ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -94,7 +96,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::Init() {
+int32_t AudioDeviceIOS::Init() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -137,7 +139,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::Terminate() {
+int32_t AudioDeviceIOS::Terminate() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -169,13 +171,13 @@
return 0;
}
-bool AudioDeviceIPhone::Initialized() const {
+bool AudioDeviceIOS::Initialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return (_initialized);
}
-int32_t AudioDeviceIPhone::InitSpeaker() {
+int32_t AudioDeviceIOS::InitSpeaker() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -205,7 +207,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::InitMicrophone() {
+int32_t AudioDeviceIOS::InitMicrophone() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -236,19 +238,19 @@
return 0;
}
-bool AudioDeviceIPhone::SpeakerIsInitialized() const {
+bool AudioDeviceIOS::SpeakerIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return _speakerIsInitialized;
}
-bool AudioDeviceIPhone::MicrophoneIsInitialized() const {
+bool AudioDeviceIOS::MicrophoneIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return _micIsInitialized;
}
-int32_t AudioDeviceIPhone::SpeakerVolumeIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -257,16 +259,16 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetSpeakerVolume(uint32_t volume) {
+int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetSpeakerVolume(volume=%u)", volume);
+ "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
" API call not supported on this platform");
return -1;
}
-int32_t AudioDeviceIPhone::SpeakerVolume(uint32_t& volume) const {
+int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -276,13 +278,13 @@
}
int32_t
- AudioDeviceIPhone::SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight) {
+ AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft,
+ uint16_t volumeRight) {
WEBRTC_TRACE(
kTraceModuleCall,
kTraceAudioDevice,
_id,
- "AudioDeviceIPhone::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
+ "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
volumeLeft, volumeRight);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -292,7 +294,7 @@
}
int32_t
-AudioDeviceIPhone::WaveOutVolume(uint16_t& /*volumeLeft*/,
+AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/,
uint16_t& /*volumeRight*/) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -303,7 +305,7 @@
}
int32_t
- AudioDeviceIPhone::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -312,7 +314,7 @@
return -1;
}
-int32_t AudioDeviceIPhone::MinSpeakerVolume(
+int32_t AudioDeviceIOS::MinSpeakerVolume(
uint32_t& minVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -323,7 +325,7 @@
}
int32_t
- AudioDeviceIPhone::SpeakerVolumeStepSize(uint16_t& stepSize) const {
+ AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -332,7 +334,7 @@
return -1;
}
-int32_t AudioDeviceIPhone::SpeakerMuteIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -341,7 +343,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetSpeakerMute(bool enable) {
+int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -350,7 +352,7 @@
return -1;
}
-int32_t AudioDeviceIPhone::SpeakerMute(bool& enabled) const {
+int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -359,7 +361,7 @@
return -1;
}
-int32_t AudioDeviceIPhone::MicrophoneMuteIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -368,7 +370,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetMicrophoneMute(bool enable) {
+int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -377,7 +379,7 @@
return -1;
}
-int32_t AudioDeviceIPhone::MicrophoneMute(bool& enabled) const {
+int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -386,7 +388,7 @@
return -1;
}
-int32_t AudioDeviceIPhone::MicrophoneBoostIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -395,9 +397,9 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetMicrophoneBoost(bool enable) {
+int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetMicrophoneBoost(enable=%u)", enable);
+ "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable);
if (!_micIsInitialized) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -414,7 +416,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::MicrophoneBoost(bool& enabled) const {
+int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
if (!_micIsInitialized) {
@@ -428,7 +430,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::StereoRecordingIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -437,9 +439,9 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetStereoRecording(bool enable) {
+int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetStereoRecording(enable=%u)", enable);
+ "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable);
if (enable) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -449,7 +451,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::StereoRecording(bool& enabled) const {
+int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -457,7 +459,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::StereoPlayoutIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -466,9 +468,9 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetStereoPlayout(bool enable) {
+int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetStereoPlayout(enable=%u)", enable);
+ "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable);
if (enable) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -478,7 +480,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::StereoPlayout(bool& enabled) const {
+int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -486,23 +488,23 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetAGC(bool enable) {
+int32_t AudioDeviceIOS::SetAGC(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetAGC(enable=%d)", enable);
+ "AudioDeviceIOS::SetAGC(enable=%d)", enable);
_AGC = enable;
return 0;
}
-bool AudioDeviceIPhone::AGC() const {
+bool AudioDeviceIOS::AGC() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return _AGC;
}
-int32_t AudioDeviceIPhone::MicrophoneVolumeIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -511,9 +513,9 @@
return 0;
}
-int32_t AudioDeviceIPhone::SetMicrophoneVolume(uint32_t volume) {
+int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetMicrophoneVolume(volume=%u)", volume);
+ "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
" API call not supported on this platform");
@@ -521,7 +523,7 @@
}
int32_t
- AudioDeviceIPhone::MicrophoneVolume(uint32_t& volume) const {
+ AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -531,7 +533,7 @@
}
int32_t
- AudioDeviceIPhone::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -541,7 +543,7 @@
}
int32_t
- AudioDeviceIPhone::MinMicrophoneVolume(uint32_t& minVolume) const {
+ AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -551,7 +553,7 @@
}
int32_t
- AudioDeviceIPhone::MicrophoneVolumeStepSize(
+ AudioDeviceIOS::MicrophoneVolumeStepSize(
uint16_t& stepSize) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -561,16 +563,16 @@
return -1;
}
-int16_t AudioDeviceIPhone::PlayoutDevices() {
+int16_t AudioDeviceIOS::PlayoutDevices() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return (int16_t)1;
}
-int32_t AudioDeviceIPhone::SetPlayoutDevice(uint16_t index) {
+int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetPlayoutDevice(index=%u)", index);
+ "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index);
if (_playIsInitialized) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -589,18 +591,18 @@
}
int32_t
- AudioDeviceIPhone::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
+ AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
"WindowsDeviceType not supported");
return -1;
}
int32_t
- AudioDeviceIPhone::PlayoutDeviceName(uint16_t index,
+ AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::PlayoutDeviceName(index=%u)", index);
+ "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index);
if (index != 0) {
return -1;
@@ -615,11 +617,11 @@
}
int32_t
- AudioDeviceIPhone::RecordingDeviceName(uint16_t index,
+ AudioDeviceIOS::RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::RecordingDeviceName(index=%u)", index);
+ "AudioDeviceIOS::RecordingDeviceName(index=%u)", index);
if (index != 0) {
return -1;
@@ -633,15 +635,15 @@
return 0;
}
-int16_t AudioDeviceIPhone::RecordingDevices() {
+int16_t AudioDeviceIOS::RecordingDevices() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (int16_t)1;
}
-int32_t AudioDeviceIPhone::SetRecordingDevice(uint16_t index) {
+int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetRecordingDevice(index=%u)", index);
+ "AudioDeviceIOS::SetRecordingDevice(index=%u)", index);
if (_recIsInitialized) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -661,7 +663,7 @@
}
int32_t
- AudioDeviceIPhone::SetRecordingDevice(
+ AudioDeviceIOS::SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"WindowsDeviceType not supported");
@@ -671,52 +673,55 @@
// ----------------------------------------------------------------------------
// SetLoudspeakerStatus
//
-// Overrides the receiver playout route to speaker instead. See
-// kAudioSessionProperty_OverrideCategoryDefaultToSpeaker in CoreAudio
-// documentation.
+// Change the default receiver playout route to speaker.
+//
// ----------------------------------------------------------------------------
-int32_t AudioDeviceIPhone::SetLoudspeakerStatus(bool enable) {
+int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetLoudspeakerStatus(enable=%d)", enable);
+ "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable);
- UInt32 doChangeDefaultRoute = enable ? 1 : 0;
- OSStatus err = AudioSessionSetProperty(
- kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
- sizeof(doChangeDefaultRoute), &doChangeDefaultRoute);
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ NSString* category = session.category;
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ // Respect old category options if category is
+ // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
+ // might not be valid for this category.
+ if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
+ if (enable) {
+ options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
+ } else {
+ options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
+ }
+ } else {
+ options = AVAudioSessionCategoryOptionDefaultToSpeaker;
+ }
- if (err != noErr) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Error changing default output route " \
- "(only available on iOS 3.1 or later)");
- return -1;
+ NSError* error = nil;
+ [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ withOptions:options
+ error:&error];
+ if (error != nil) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Error changing default output route ");
+ return -1;
}
return 0;
}
-int32_t AudioDeviceIPhone::GetLoudspeakerStatus(bool &enabled) const {
+int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetLoudspeakerStatus(enabled=?)");
+ "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)");
- UInt32 route(0);
- UInt32 size = sizeof(route);
- OSStatus err = AudioSessionGetProperty(
- kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
- &size, &route);
- if (err != noErr) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Error changing default output route " \
- "(only available on iOS 3.1 or later)");
- return -1;
- }
-
- enabled = route == 1 ? true: false;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
return 0;
}
-int32_t AudioDeviceIPhone::PlayoutIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
available = false;
@@ -734,7 +739,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::RecordingIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
available = false;
@@ -752,7 +757,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::InitPlayout() {
+int32_t AudioDeviceIOS::InitPlayout() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -803,12 +808,12 @@
return 0;
}
-bool AudioDeviceIPhone::PlayoutIsInitialized() const {
+bool AudioDeviceIOS::PlayoutIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (_playIsInitialized);
}
-int32_t AudioDeviceIPhone::InitRecording() {
+int32_t AudioDeviceIOS::InitRecording() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -861,12 +866,12 @@
return 0;
}
-bool AudioDeviceIPhone::RecordingIsInitialized() const {
+bool AudioDeviceIOS::RecordingIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (_recIsInitialized);
}
-int32_t AudioDeviceIPhone::StartRecording() {
+int32_t AudioDeviceIOS::StartRecording() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -913,7 +918,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::StopRecording() {
+int32_t AudioDeviceIOS::StopRecording() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -937,12 +942,12 @@
return 0;
}
-bool AudioDeviceIPhone::Recording() const {
+bool AudioDeviceIOS::Recording() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (_recording);
}
-int32_t AudioDeviceIPhone::StartPlayout() {
+int32_t AudioDeviceIOS::StartPlayout() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
// This lock is (among other things) needed to avoid concurrency issues
@@ -988,7 +993,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::StopPlayout() {
+int32_t AudioDeviceIOS::StopPlayout() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -1012,7 +1017,7 @@
return 0;
}
-bool AudioDeviceIPhone::Playing() const {
+bool AudioDeviceIOS::Playing() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return (_playing);
@@ -1025,7 +1030,7 @@
// and set enable states after shutdown to same as current.
// In capture thread audio device will be shutdown, then started again.
// ----------------------------------------------------------------------------
-int32_t AudioDeviceIPhone::ResetAudioDevice() {
+int32_t AudioDeviceIOS::ResetAudioDevice() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -1067,22 +1072,21 @@
return 0;
}
-int32_t AudioDeviceIPhone::PlayoutDelay(uint16_t& delayMS) const {
+int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
delayMS = _playoutDelay;
return 0;
}
-int32_t AudioDeviceIPhone::RecordingDelay(uint16_t& delayMS) const {
+int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
delayMS = _recordingDelay;
return 0;
}
int32_t
- AudioDeviceIPhone::SetPlayoutBuffer(
- const AudioDeviceModule::BufferType type,
- uint16_t sizeMS) {
+ AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetPlayoutBuffer(type=%u, sizeMS=%u)",
+ "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)",
type, sizeMS);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -1091,7 +1095,7 @@
}
int32_t
- AudioDeviceIPhone::PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
@@ -1102,7 +1106,7 @@
return 0;
}
-int32_t AudioDeviceIPhone::CPULoad(uint16_t& /*load*/) const {
+int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -1110,35 +1114,35 @@
return -1;
}
-bool AudioDeviceIPhone::PlayoutWarning() const {
+bool AudioDeviceIOS::PlayoutWarning() const {
return (_playWarning > 0);
}
-bool AudioDeviceIPhone::PlayoutError() const {
+bool AudioDeviceIOS::PlayoutError() const {
return (_playError > 0);
}
-bool AudioDeviceIPhone::RecordingWarning() const {
+bool AudioDeviceIOS::RecordingWarning() const {
return (_recWarning > 0);
}
-bool AudioDeviceIPhone::RecordingError() const {
+bool AudioDeviceIOS::RecordingError() const {
return (_recError > 0);
}
-void AudioDeviceIPhone::ClearPlayoutWarning() {
+void AudioDeviceIOS::ClearPlayoutWarning() {
_playWarning = 0;
}
-void AudioDeviceIPhone::ClearPlayoutError() {
+void AudioDeviceIOS::ClearPlayoutError() {
_playError = 0;
}
-void AudioDeviceIPhone::ClearRecordingWarning() {
+void AudioDeviceIOS::ClearRecordingWarning() {
_recWarning = 0;
}
-void AudioDeviceIPhone::ClearRecordingError() {
+void AudioDeviceIOS::ClearRecordingError() {
_recError = 0;
}
@@ -1146,7 +1150,7 @@
// Private Methods
// ============================================================================
-int32_t AudioDeviceIPhone::InitPlayOrRecord() {
+int32_t AudioDeviceIOS::InitPlayOrRecord() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
OSStatus result = -1;
@@ -1187,18 +1191,32 @@
}
// Set preferred hardware sample rate to 16 kHz
- Float64 sampleRate(16000.0);
- result = AudioSessionSetProperty(
- kAudioSessionProperty_PreferredHardwareSampleRate,
- sizeof(sampleRate), &sampleRate);
- if (0 != result) {
+ NSError* error = nil;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ Float64 preferredSampleRate(16000.0);
+ [session setPreferredSampleRate:preferredSampleRate
+ error:&error];
+ if (error != nil) {
+ const char* errorString = [[error localizedDescription] UTF8String];
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set preferred sample rate (result=%d)", result);
+ "Could not set preferred sample rate: %s", errorString);
}
-
- uint32_t voiceChat = kAudioSessionMode_VoiceChat;
- AudioSessionSetProperty(kAudioSessionProperty_Mode,
- sizeof(voiceChat), &voiceChat);
+ error = nil;
+ [session setMode:AVAudioSessionModeVoiceChat
+ error:&error];
+ if (error != nil) {
+ const char* errorString = [[error localizedDescription] UTF8String];
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "Could not set mode: %s", errorString);
+ }
+ error = nil;
+ [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ error:&error];
+ if (error != nil) {
+ const char* errorString = [[error localizedDescription] UTF8String];
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "Could not set category: %s", errorString);
+ }
//////////////////////
// Setup Voice Processing Audio Unit
@@ -1293,7 +1311,7 @@
" Audio Unit playout opened in sampling rate %f",
playoutDesc.mSampleRate);
- playoutDesc.mSampleRate = sampleRate;
+ playoutDesc.mSampleRate = preferredSampleRate;
// Store the sampling frequency to use towards the Audio Device Buffer
// todo: Add 48 kHz (increase buffer sizes). Other fs?
@@ -1362,7 +1380,7 @@
" Audio Unit recording opened in sampling rate %f",
recordingDesc.mSampleRate);
- recordingDesc.mSampleRate = sampleRate;
+ recordingDesc.mSampleRate = preferredSampleRate;
// Set stream format for out/1 (use same sampling frequency as for in/1)
recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
@@ -1392,25 +1410,73 @@
}
// Get hardware sample rate for logging (see if we get what we asked for)
- Float64 hardwareSampleRate = 0.0;
- size = sizeof(hardwareSampleRate);
- result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareSampleRate, &size,
- &hardwareSampleRate);
- if (0 != result) {
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Could not get current HW sample rate (result=%d)", result);
- }
+ double sampleRate = session.sampleRate;
WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
" Current HW sample rate is %f, ADB sample rate is %d",
- hardwareSampleRate, _adbSampFreq);
+ sampleRate, _adbSampFreq);
+
+ // Listen to audio interruptions.
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ id observer =
+ [center addObserverForName:AVAudioSessionInterruptionNotification
+ object:nil
+ queue:[NSOperationQueue mainQueue]
+ usingBlock:^(NSNotification* notification) {
+ NSNumber* typeNumber =
+ [notification userInfo][AVAudioSessionInterruptionTypeKey];
+ AVAudioSessionInterruptionType type =
+ (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue];
+ switch (type) {
+ case AVAudioSessionInterruptionTypeBegan:
+ // At this point our audio session has been deactivated and the
+ // audio unit render callbacks no longer occur. Nothing to do.
+ break;
+ case AVAudioSessionInterruptionTypeEnded: {
+ NSError* error = nil;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ [session setActive:YES
+ error:&error];
+ if (error != nil) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "Error activating audio session");
+ }
+ // Post interruption the audio unit render callbacks don't
+ // automatically continue, so we restart the unit manually here.
+ AudioOutputUnitStop(_auVoiceProcessing);
+ AudioOutputUnitStart(_auVoiceProcessing);
+ break;
+ }
+ }
+ }];
+ // Increment refcount on observer using ARC bridge. Instance variable is a
+ // void* instead of an id because header is included in other pure C++
+ // files.
+ _audioInterruptionObserver = (__bridge_retained void*)observer;
+
+ // Activate audio session.
+ error = nil;
+ [session setActive:YES
+ error:&error];
+ if (error != nil) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "Error activating audio session");
+ }
return 0;
}
-int32_t AudioDeviceIPhone::ShutdownPlayOrRecord() {
+int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
+ if (_audioInterruptionObserver != NULL) {
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ // Transfer ownership of observer back to ARC, which will dealloc the
+ // observer once it exits this scope.
+ id observer = (__bridge_transfer id)_audioInterruptionObserver;
+ [center removeObserver:observer];
+ _audioInterruptionObserver = NULL;
+ }
+
// Close and delete AU
OSStatus result = -1;
if (NULL != _auVoiceProcessing) {
@@ -1435,13 +1501,13 @@
// ============================================================================
OSStatus
- AudioDeviceIPhone::RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
+ AudioDeviceIOS::RecordProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
return ptrThis->RecordProcessImpl(ioActionFlags,
inTimeStamp,
@@ -1451,11 +1517,10 @@
OSStatus
- AudioDeviceIPhone::RecordProcessImpl(
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames) {
+ AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ uint32_t inBusNumber,
+ uint32_t inNumberFrames) {
// Setup some basic stuff
// Use temp buffer not to lock up recording buffer more than necessary
// todo: Make dataTmp a member variable with static size that holds
@@ -1561,20 +1626,20 @@
}
OSStatus
- AudioDeviceIPhone::PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
+ AudioDeviceIOS::PlayoutProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
}
OSStatus
- AudioDeviceIPhone::PlayoutProcessImpl(uint32_t inNumberFrames,
- AudioBufferList *ioData) {
+ AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
+ AudioBufferList *ioData) {
// Setup some basic stuff
// assert(sizeof(short) == 2); // Assumption for implementation
@@ -1677,7 +1742,7 @@
return 0;
}
-void AudioDeviceIPhone::UpdatePlayoutDelay() {
+void AudioDeviceIOS::UpdatePlayoutDelay() {
++_playoutDelayMeasurementCounter;
if (_playoutDelayMeasurementCounter >= 100) {
@@ -1686,36 +1751,25 @@
// Since this is eventually rounded to integral ms, add 0.5ms
// here to get round-to-nearest-int behavior instead of
// truncation.
- float totalDelaySeconds = 0.0005;
+ double totalDelaySeconds = 0.0005;
// HW output latency
- Float32 f32(0);
- UInt32 size = sizeof(f32);
- OSStatus result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareOutputLatency, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW latency (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ double latency = session.outputLatency;
+ assert(latency >= 0);
+ totalDelaySeconds += latency;
// HW buffer duration
- f32 = 0;
- result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW buffer duration (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ double ioBufferDuration = session.IOBufferDuration;
+ assert(ioBufferDuration >= 0);
+ totalDelaySeconds += ioBufferDuration;
// AU latency
Float64 f64(0);
- size = sizeof(f64);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0, &f64, &size);
+ UInt32 size = sizeof(f64);
+ OSStatus result = AudioUnitGetProperty(
+ _auVoiceProcessing, kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global, 0, &f64, &size);
if (0 != result) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"error AU latency (result=%d)", result);
@@ -1733,7 +1787,7 @@
// todo: Add playout buffer?
}
-void AudioDeviceIPhone::UpdateRecordingDelay() {
+void AudioDeviceIOS::UpdateRecordingDelay() {
++_recordingDelayMeasurementCounter;
if (_recordingDelayMeasurementCounter >= 100) {
@@ -1742,37 +1796,25 @@
// Since this is eventually rounded to integral ms, add 0.5ms
// here to get round-to-nearest-int behavior instead of
// truncation.
- float totalDelaySeconds = 0.0005;
+ double totalDelaySeconds = 0.0005;
// HW input latency
- Float32 f32(0);
- UInt32 size = sizeof(f32);
- OSStatus result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareInputLatency, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW latency (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ double latency = session.inputLatency;
+ assert(latency >= 0);
+ totalDelaySeconds += latency;
// HW buffer duration
- f32 = 0;
- result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW buffer duration (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ double ioBufferDuration = session.IOBufferDuration;
+ assert(ioBufferDuration >= 0);
+ totalDelaySeconds += ioBufferDuration;
// AU latency
Float64 f64(0);
- size = sizeof(f64);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_Latency,
- kAudioUnitScope_Global, 0, &f64, &size);
+ UInt32 size = sizeof(f64);
+ OSStatus result = AudioUnitGetProperty(
+ _auVoiceProcessing, kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global, 0, &f64, &size);
if (0 != result) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"error AU latency (result=%d)", result);
@@ -1799,11 +1841,11 @@
}
}
-bool AudioDeviceIPhone::RunCapture(void* ptrThis) {
- return static_cast<AudioDeviceIPhone*>(ptrThis)->CaptureWorkerThread();
+bool AudioDeviceIOS::RunCapture(void* ptrThis) {
+ return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
}
-bool AudioDeviceIPhone::CaptureWorkerThread() {
+bool AudioDeviceIOS::CaptureWorkerThread() {
if (_recording) {
int bufPos = 0;
unsigned int lowestSeq = 0;
diff --git a/modules/audio_device/ios/audio_device_utility_ios.cc b/modules/audio_device/ios/audio_device_utility_ios.cc
index 6bbceb9..3362817 100644
--- a/modules/audio_device/ios/audio_device_utility_ios.cc
+++ b/modules/audio_device/ios/audio_device_utility_ios.cc
@@ -15,7 +15,7 @@
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
-AudioDeviceUtilityIPhone::AudioDeviceUtilityIPhone(const int32_t id)
+AudioDeviceUtilityIOS::AudioDeviceUtilityIOS(const int32_t id)
:
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_id(id),
@@ -24,15 +24,16 @@
"%s created", __FUNCTION__);
}
-AudioDeviceUtilityIPhone::~AudioDeviceUtilityIPhone() {
+AudioDeviceUtilityIOS::~AudioDeviceUtilityIOS() {
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s destroyed", __FUNCTION__);
- CriticalSectionScoped lock(&_critSect);
-
+ {
+ CriticalSectionScoped lock(&_critSect);
+ }
delete &_critSect;
}
-int32_t AudioDeviceUtilityIPhone::Init() {
+int32_t AudioDeviceUtilityIOS::Init() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
diff --git a/modules/audio_device/ios/audio_device_utility_ios.h b/modules/audio_device/ios/audio_device_utility_ios.h
index 081ab82..1694868 100644
--- a/modules/audio_device/ios/audio_device_utility_ios.h
+++ b/modules/audio_device/ios/audio_device_utility_ios.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IPHONE_H
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IPHONE_H
+#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IOS_H
+#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IOS_H
#include "webrtc/modules/audio_device/audio_device_utility.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
@@ -17,15 +17,15 @@
namespace webrtc {
class CriticalSectionWrapper;
-class AudioDeviceUtilityIPhone: public AudioDeviceUtility {
-public:
- AudioDeviceUtilityIPhone(const int32_t id);
- AudioDeviceUtilityIPhone();
- virtual ~AudioDeviceUtilityIPhone();
+class AudioDeviceUtilityIOS: public AudioDeviceUtility {
+ public:
+ AudioDeviceUtilityIOS(const int32_t id);
+ AudioDeviceUtilityIOS();
+ virtual ~AudioDeviceUtilityIOS();
virtual int32_t Init();
-private:
+ private:
CriticalSectionWrapper& _critSect;
int32_t _id;
AudioDeviceModule::ErrorCode _lastError;
@@ -33,4 +33,4 @@
} // namespace webrtc
-#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IPHONE_H
+#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IOS_H
diff --git a/modules/audio_processing/aecm/aecm_core_mips.c b/modules/audio_processing/aecm/aecm_core_mips.c
index 31f232b..4c925ca 100644
--- a/modules/audio_processing/aecm/aecm_core_mips.c
+++ b/modules/audio_processing/aecm/aecm_core_mips.c
@@ -1485,7 +1485,7 @@
"mul %[tmp321], %[tmp321], %[tmp161] \n\t"
"sra %[nrsh1], %[tmp32], 14 \n\t"
"sra %[nrsh2], %[tmp321], 14 \n\t"
- : [nrsh1] "=r" (nrsh1), [nrsh2] "=r" (nrsh2)
+ : [nrsh1] "=&r" (nrsh1), [nrsh2] "=r" (nrsh2)
: [tmp16] "r" (tmp16), [tmp161] "r" (tmp161), [tmp32] "r" (tmp32),
[tmp321] "r" (tmp321)
: "memory", "hi", "lo"
diff --git a/modules/audio_processing/audio_buffer.cc b/modules/audio_processing/audio_buffer.cc
index 35e1eb7..fb2c200 100644
--- a/modules/audio_processing/audio_buffer.cc
+++ b/modules/audio_processing/audio_buffer.cc
@@ -51,7 +51,6 @@
return -1;
}
-
void StereoToMono(const float* left, const float* right, float* out,
int samples_per_channel) {
for (int i = 0; i < samples_per_channel; ++i) {
@@ -71,8 +70,9 @@
// One int16_t and one float ChannelBuffer that are kept in sync. The sync is
// broken when someone requests write access to either ChannelBuffer, and
// reestablished when someone requests the outdated ChannelBuffer. It is
-// therefore safe to use the return value of ibuf() and fbuf() until the next
-// call to the other method.
+// therefore safe to use the return value of ibuf_const() and fbuf_const()
+// until the next call to ibuf() or fbuf(), and the return value of ibuf() and
+// fbuf() until the next call to any of the other functions.
class IFChannelBuffer {
public:
IFChannelBuffer(int samples_per_channel, int num_channels)
@@ -81,19 +81,24 @@
fvalid_(true),
fbuf_(samples_per_channel, num_channels) {}
- ChannelBuffer<int16_t>* ibuf() {
+ ChannelBuffer<int16_t>* ibuf() { return ibuf(false); }
+ ChannelBuffer<float>* fbuf() { return fbuf(false); }
+ const ChannelBuffer<int16_t>* ibuf_const() { return ibuf(true); }
+ const ChannelBuffer<float>* fbuf_const() { return fbuf(true); }
+
+ private:
+ ChannelBuffer<int16_t>* ibuf(bool readonly) {
RefreshI();
- fvalid_ = false;
+ fvalid_ = readonly;
return &ibuf_;
}
- ChannelBuffer<float>* fbuf() {
+ ChannelBuffer<float>* fbuf(bool readonly) {
RefreshF();
- ivalid_ = false;
+ ivalid_ = readonly;
return &fbuf_;
}
- private:
void RefreshF() {
if (!fvalid_) {
assert(ivalid_);
@@ -126,24 +131,6 @@
ChannelBuffer<float> fbuf_;
};
-class SplitChannelBuffer {
- public:
- SplitChannelBuffer(int samples_per_split_channel, int num_channels)
- : low_(samples_per_split_channel, num_channels),
- high_(samples_per_split_channel, num_channels) {
- }
- ~SplitChannelBuffer() {}
-
- int16_t* low_channel(int i) { return low_.ibuf()->channel(i); }
- int16_t* high_channel(int i) { return high_.ibuf()->channel(i); }
- float* low_channel_f(int i) { return low_.fbuf()->channel(i); }
- float* high_channel_f(int i) { return high_.fbuf()->channel(i); }
-
- private:
- IFChannelBuffer low_;
- IFChannelBuffer high_;
-};
-
AudioBuffer::AudioBuffer(int input_samples_per_channel,
int num_input_channels,
int process_samples_per_channel,
@@ -155,8 +142,7 @@
num_proc_channels_(num_process_channels),
output_samples_per_channel_(output_samples_per_channel),
samples_per_split_channel_(proc_samples_per_channel_),
- num_mixed_channels_(0),
- num_mixed_low_pass_channels_(0),
+ mixed_low_pass_valid_(false),
reference_copied_(false),
activity_(AudioFrame::kVadUnknown),
keyboard_data_(NULL),
@@ -200,8 +186,10 @@
if (proc_samples_per_channel_ == kSamplesPer32kHzChannel) {
samples_per_split_channel_ = kSamplesPer16kHzChannel;
- split_channels_.reset(new SplitChannelBuffer(samples_per_split_channel_,
- num_proc_channels_));
+ split_channels_low_.reset(new IFChannelBuffer(samples_per_split_channel_,
+ num_proc_channels_));
+ split_channels_high_.reset(new IFChannelBuffer(samples_per_split_channel_,
+ num_proc_channels_));
filter_states_.reset(new SplitFilterStates[num_proc_channels_]);
}
}
@@ -278,89 +266,102 @@
void AudioBuffer::InitForNewData() {
keyboard_data_ = NULL;
- num_mixed_channels_ = 0;
- num_mixed_low_pass_channels_ = 0;
+ mixed_low_pass_valid_ = false;
reference_copied_ = false;
activity_ = AudioFrame::kVadUnknown;
}
const int16_t* AudioBuffer::data(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return channels_->ibuf()->channel(channel);
+ return channels_->ibuf_const()->channel(channel);
}
int16_t* AudioBuffer::data(int channel) {
- const AudioBuffer* t = this;
- return const_cast<int16_t*>(t->data(channel));
+ mixed_low_pass_valid_ = false;
+ return channels_->ibuf()->channel(channel);
}
const float* AudioBuffer::data_f(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return channels_->fbuf()->channel(channel);
+ return channels_->fbuf_const()->channel(channel);
}
float* AudioBuffer::data_f(int channel) {
- const AudioBuffer* t = this;
- return const_cast<float*>(t->data_f(channel));
+ mixed_low_pass_valid_ = false;
+ return channels_->fbuf()->channel(channel);
}
const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->low_channel(channel)
- : data(channel);
+ return split_channels_low_.get()
+ ? split_channels_low_->ibuf_const()->channel(channel)
+ : data(channel);
}
int16_t* AudioBuffer::low_pass_split_data(int channel) {
- const AudioBuffer* t = this;
- return const_cast<int16_t*>(t->low_pass_split_data(channel));
+ mixed_low_pass_valid_ = false;
+ return split_channels_low_.get()
+ ? split_channels_low_->ibuf()->channel(channel)
+ : data(channel);
}
const float* AudioBuffer::low_pass_split_data_f(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->low_channel_f(channel)
- : data_f(channel);
+ return split_channels_low_.get()
+ ? split_channels_low_->fbuf_const()->channel(channel)
+ : data_f(channel);
}
float* AudioBuffer::low_pass_split_data_f(int channel) {
- const AudioBuffer* t = this;
- return const_cast<float*>(t->low_pass_split_data_f(channel));
+ mixed_low_pass_valid_ = false;
+ return split_channels_low_.get()
+ ? split_channels_low_->fbuf()->channel(channel)
+ : data_f(channel);
}
const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->high_channel(channel) : NULL;
+ return split_channels_high_.get()
+ ? split_channels_high_->ibuf_const()->channel(channel)
+ : NULL;
}
int16_t* AudioBuffer::high_pass_split_data(int channel) {
- const AudioBuffer* t = this;
- return const_cast<int16_t*>(t->high_pass_split_data(channel));
+ return split_channels_high_.get()
+ ? split_channels_high_->ibuf()->channel(channel)
+ : NULL;
}
const float* AudioBuffer::high_pass_split_data_f(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->high_channel_f(channel)
- : NULL;
+ return split_channels_high_.get()
+ ? split_channels_high_->fbuf_const()->channel(channel)
+ : NULL;
}
float* AudioBuffer::high_pass_split_data_f(int channel) {
- const AudioBuffer* t = this;
- return const_cast<float*>(t->high_pass_split_data_f(channel));
+ return split_channels_high_.get()
+ ? split_channels_high_->fbuf()->channel(channel)
+ : NULL;
}
-const int16_t* AudioBuffer::mixed_data(int channel) const {
- assert(channel >= 0 && channel < num_mixed_channels_);
+const int16_t* AudioBuffer::mixed_low_pass_data() {
+ // Currently only mixing stereo to mono is supported.
+ assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
- return mixed_channels_->channel(channel);
-}
+ if (num_proc_channels_ == 1) {
+ return low_pass_split_data(0);
+ }
-const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
- assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
-
- return mixed_low_pass_channels_->channel(channel);
+ if (!mixed_low_pass_valid_) {
+ if (!mixed_low_pass_channels_.get()) {
+ mixed_low_pass_channels_.reset(
+ new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
+ }
+ StereoToMono(low_pass_split_data(0),
+ low_pass_split_data(1),
+ mixed_low_pass_channels_->data(),
+ samples_per_split_channel_);
+ mixed_low_pass_valid_ = true;
+ }
+ return mixed_low_pass_channels_->data();
}
const int16_t* AudioBuffer::low_pass_reference(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
if (!reference_copied_) {
return NULL;
}
@@ -444,42 +445,6 @@
}
}
-void AudioBuffer::CopyAndMix(int num_mixed_channels) {
- // We currently only support the stereo to mono case.
- assert(num_proc_channels_ == 2);
- assert(num_mixed_channels == 1);
- if (!mixed_channels_.get()) {
- mixed_channels_.reset(
- new ChannelBuffer<int16_t>(proc_samples_per_channel_,
- num_mixed_channels));
- }
-
- StereoToMono(channels_->ibuf()->channel(0),
- channels_->ibuf()->channel(1),
- mixed_channels_->channel(0),
- proc_samples_per_channel_);
-
- num_mixed_channels_ = num_mixed_channels;
-}
-
-void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
- // We currently only support the stereo to mono case.
- assert(num_proc_channels_ == 2);
- assert(num_mixed_channels == 1);
- if (!mixed_low_pass_channels_.get()) {
- mixed_low_pass_channels_.reset(
- new ChannelBuffer<int16_t>(samples_per_split_channel_,
- num_mixed_channels));
- }
-
- StereoToMono(low_pass_split_data(0),
- low_pass_split_data(1),
- mixed_low_pass_channels_->channel(0),
- samples_per_split_channel_);
-
- num_mixed_low_pass_channels_ = num_mixed_channels;
-}
-
void AudioBuffer::CopyLowPassToReference() {
reference_copied_ = true;
if (!low_pass_reference_channels_.get()) {
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index db24e95..acf5753 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -23,7 +23,6 @@
namespace webrtc {
class PushSincResampler;
-class SplitChannelBuffer;
class IFChannelBuffer;
struct SplitFilterStates {
@@ -56,15 +55,18 @@
int samples_per_split_channel() const;
int samples_per_keyboard_channel() const;
- // It can be assumed that channels are stored contiguously.
+ // Sample array accessors. Channels are guaranteed to be stored contiguously
+ // in memory. Prefer to use the const variants of each accessor when
+ // possible, since they incur less float<->int16 conversion overhead.
int16_t* data(int channel);
const int16_t* data(int channel) const;
int16_t* low_pass_split_data(int channel);
const int16_t* low_pass_split_data(int channel) const;
int16_t* high_pass_split_data(int channel);
const int16_t* high_pass_split_data(int channel) const;
- const int16_t* mixed_data(int channel) const;
- const int16_t* mixed_low_pass_data(int channel) const;
+ // Returns a pointer to the low-pass data downmixed to mono. If this data
+ // isn't already available it re-calculates it.
+ const int16_t* mixed_low_pass_data();
const int16_t* low_pass_reference(int channel) const;
// Float versions of the accessors, with automatic conversion back and forth
@@ -85,7 +87,6 @@
// Use for int16 interleaved data.
void DeinterleaveFrom(AudioFrame* audioFrame);
- void InterleaveTo(AudioFrame* audioFrame) const;
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
@@ -97,9 +98,6 @@
void CopyTo(int samples_per_channel,
AudioProcessing::ChannelLayout layout,
float* const* data);
-
- void CopyAndMix(int num_mixed_channels);
- void CopyAndMixLowPass(int num_mixed_channels);
void CopyLowPassToReference();
private:
@@ -112,16 +110,15 @@
const int num_proc_channels_;
const int output_samples_per_channel_;
int samples_per_split_channel_;
- int num_mixed_channels_;
- int num_mixed_low_pass_channels_;
+ bool mixed_low_pass_valid_;
bool reference_copied_;
AudioFrame::VADActivity activity_;
const float* keyboard_data_;
scoped_ptr<IFChannelBuffer> channels_;
- scoped_ptr<SplitChannelBuffer> split_channels_;
+ scoped_ptr<IFChannelBuffer> split_channels_low_;
+ scoped_ptr<IFChannelBuffer> split_channels_high_;
scoped_ptr<SplitFilterStates[]> filter_states_;
- scoped_ptr<ChannelBuffer<int16_t> > mixed_channels_;
scoped_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
scoped_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
scoped_ptr<ChannelBuffer<float> > input_buffer_;
diff --git a/modules/audio_processing/common.h b/modules/audio_processing/common.h
index 42454df..98e36cb 100644
--- a/modules/audio_processing/common.h
+++ b/modules/audio_processing/common.h
@@ -54,10 +54,14 @@
}
T* data() { return data_.get(); }
- T* channel(int i) {
- assert(i < num_channels_);
+ const T* channel(int i) const {
+ assert(i >= 0 && i < num_channels_);
return channels_[i];
}
+ T* channel(int i) {
+ const ChannelBuffer<T>* t = this;
+ return const_cast<T*>(t->channel(i));
+ }
T** channels() { return channels_.get(); }
int samples_per_channel() { return samples_per_channel_; }
diff --git a/modules/audio_processing/gain_control_impl.cc b/modules/audio_processing/gain_control_impl.cc
index a67b67e..cf7df16 100644
--- a/modules/audio_processing/gain_control_impl.cc
+++ b/modules/audio_processing/gain_control_impl.cc
@@ -59,17 +59,11 @@
assert(audio->samples_per_split_channel() <= 160);
- const int16_t* mixed_data = audio->low_pass_split_data(0);
- if (audio->num_channels() > 1) {
- audio->CopyAndMixLowPass(1);
- mixed_data = audio->mixed_low_pass_data(0);
- }
-
for (int i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
int err = WebRtcAgc_AddFarend(
my_handle,
- mixed_data,
+ audio->mixed_low_pass_data(),
static_cast<int16_t>(audio->samples_per_split_channel()));
if (err != apm_->kNoError) {
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index 6f4cc9e..30f0d9c 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -209,9 +209,6 @@
// ensures the options are applied immediately.
virtual void SetExtraOptions(const Config& config) = 0;
- virtual int EnableExperimentalNs(bool enable) { return kNoError; }
- virtual bool experimental_ns_enabled() const { return false; }
-
// DEPRECATED.
// TODO(ajm): Remove after Chromium has upgraded to using Initialize().
virtual int set_sample_rate_hz(int rate) = 0;
diff --git a/modules/audio_processing/utility/ring_buffer_unittest.cc b/modules/audio_processing/utility/ring_buffer_unittest.cc
index 5dacf0b..f5c36c2 100644
--- a/modules/audio_processing/utility/ring_buffer_unittest.cc
+++ b/modules/audio_processing/utility/ring_buffer_unittest.cc
@@ -52,8 +52,8 @@
// We use ASSERTs in this test to avoid obscuring the seed in the case of a
// failure.
static void RandomStressTest(int** data_ptr) {
- const int kNumTests = 100;
- const int kNumOps = 10000;
+ const int kNumTests = 10;
+ const int kNumOps = 1000;
const int kMaxBufferSize = 1000;
unsigned int seed = time(NULL);
diff --git a/modules/audio_processing/voice_detection_impl.cc b/modules/audio_processing/voice_detection_impl.cc
index c6e497f..31336b4 100644
--- a/modules/audio_processing/voice_detection_impl.cc
+++ b/modules/audio_processing/voice_detection_impl.cc
@@ -61,17 +61,11 @@
}
assert(audio->samples_per_split_channel() <= 160);
- const int16_t* mixed_data = audio->low_pass_split_data(0);
- if (audio->num_channels() > 1) {
- audio->CopyAndMixLowPass(1);
- mixed_data = audio->mixed_low_pass_data(0);
- }
-
// TODO(ajm): concatenate data in frame buffer here.
int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
apm_->proc_split_sample_rate_hz(),
- mixed_data,
+ audio->mixed_low_pass_data(),
frame_size_samples_);
if (vad_ret == 0) {
stream_has_voice_ = false;
diff --git a/modules/desktop_capture/win/cursor.cc b/modules/desktop_capture/win/cursor.cc
index 00055c4..e3c272c 100644
--- a/modules/desktop_capture/win/cursor.cc
+++ b/modules/desktop_capture/win/cursor.cc
@@ -197,7 +197,7 @@
// The XOR mask becomes the color bitmap.
memcpy(
- image->data(), mask_plane + (width * height), image->stride() * width);
+ image->data(), mask_plane + (width * height), image->stride() * height);
}
// Reconstruct transparency from the mask if the color image does not has
diff --git a/modules/media_file/source/media_file_unittest.cc b/modules/media_file/source/media_file_unittest.cc
index d658dc2..56d3544 100644
--- a/modules/media_file/source/media_file_unittest.cc
+++ b/modules/media_file/source/media_file_unittest.cc
@@ -10,6 +10,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/media_file/interface/media_file.h"
+#include "webrtc/system_wrappers/interface/compile_assert.h"
#include "webrtc/system_wrappers/interface/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
@@ -45,3 +46,50 @@
ASSERT_EQ(0, media_file_->StopPlaying());
}
+
+TEST_F(MediaFileTest, WriteWavFile) {
+ // Write file.
+ static const int kHeaderSize = 44;
+ static const int kPayloadSize = 320;
+ webrtc::CodecInst codec = {0, "L16", 16000, kPayloadSize, 1};
+ std::string outfile = webrtc::test::OutputPath() + "wavtest.wav";
+ ASSERT_EQ(0,
+ media_file_->StartRecordingAudioFile(
+ outfile.c_str(), webrtc::kFileFormatWavFile, codec));
+ static const int8_t kFakeData[kPayloadSize] = {0};
+ ASSERT_EQ(0, media_file_->IncomingAudioData(kFakeData, kPayloadSize));
+ ASSERT_EQ(0, media_file_->StopRecording());
+
+ // Check the file we just wrote.
+ static const uint8_t kExpectedHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x64, 0x1, 0, 0, // size of whole file - 8: 320 + 44 - 8
+ 'W', 'A', 'V', 'E',
+ 'f', 'm', 't', ' ',
+ 0x10, 0, 0, 0, // size of fmt block - 8: 24 - 8
+ 0x1, 0, // format: PCM (1)
+ 0x1, 0, // channels: 1
+ 0x80, 0x3e, 0, 0, // sample rate: 16000
+ 0, 0x7d, 0, 0, // byte rate: 2 * 16000
+ 0x2, 0, // block align: NumChannels * BytesPerSample
+ 0x10, 0, // bits per sample: 2 * 8
+ 'd', 'a', 't', 'a',
+ 0x40, 0x1, 0, 0, // size of payload: 320
+ };
+ COMPILE_ASSERT(sizeof(kExpectedHeader) == kHeaderSize, header_size);
+
+ EXPECT_EQ(size_t(kHeaderSize + kPayloadSize),
+ webrtc::test::GetFileSize(outfile));
+ FILE* f = fopen(outfile.c_str(), "rb");
+ ASSERT_TRUE(f);
+
+ uint8_t header[kHeaderSize];
+ ASSERT_EQ(1u, fread(header, kHeaderSize, 1, f));
+ EXPECT_EQ(0, memcmp(kExpectedHeader, header, kHeaderSize));
+
+ uint8_t payload[kPayloadSize];
+ ASSERT_EQ(1u, fread(payload, kPayloadSize, 1, f));
+ EXPECT_EQ(0, memcmp(kFakeData, payload, kPayloadSize));
+
+ EXPECT_EQ(0, fclose(f));
+}
diff --git a/modules/modules.gyp b/modules/modules.gyp
index d054fe9..2a3ba74 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -249,7 +249,6 @@
'video_processing/main/test/unit_test/color_enhancement_test.cc',
'video_processing/main/test/unit_test/content_metrics_test.cc',
'video_processing/main/test/unit_test/deflickering_test.cc',
- 'video_processing/main/test/unit_test/denoising_test.cc',
'video_processing/main/test/unit_test/video_processing_unittest.cc',
'video_processing/main/test/unit_test/video_processing_unittest.h',
],
diff --git a/modules/pacing/include/paced_sender.h b/modules/pacing/include/paced_sender.h
index b9151a5..ddd8e53 100644
--- a/modules/pacing/include/paced_sender.h
+++ b/modules/pacing/include/paced_sender.h
@@ -146,8 +146,8 @@
scoped_ptr<paced_sender::IntervalBudget> padding_budget_
GUARDED_BY(critsect_);
- int64_t time_last_update_ GUARDED_BY(critsect_);
- int64_t time_last_send_ GUARDED_BY(critsect_);
+ int64_t time_last_update_us_ GUARDED_BY(critsect_);
+ int64_t time_last_send_us_ GUARDED_BY(critsect_);
int64_t capture_time_ms_last_queued_ GUARDED_BY(critsect_);
int64_t capture_time_ms_last_sent_ GUARDED_BY(critsect_);
diff --git a/modules/pacing/paced_sender.cc b/modules/pacing/paced_sender.cc
index 52e9cfb..6204a9a 100644
--- a/modules/pacing/paced_sender.cc
+++ b/modules/pacing/paced_sender.cc
@@ -31,12 +31,11 @@
// Max time that the first packet in the queue can sit in the queue if no
// packets are sent, regardless of buffer state. In practice only in effect at
// low bitrates (less than 320 kbits/s).
-const int kMaxQueueTimeWithoutSendingMs = 30;
+const int kMaxQueueTimeWithoutSendingUs = 30000;
} // namespace
namespace webrtc {
-
namespace paced_sender {
struct Packet {
Packet(uint32_t ssrc,
@@ -142,7 +141,7 @@
max_queue_length_ms_(kDefaultMaxQueueLengthMs),
media_budget_(new paced_sender::IntervalBudget(max_bitrate_kbps)),
padding_budget_(new paced_sender::IntervalBudget(min_bitrate_kbps)),
- time_last_update_(clock->TimeInMilliseconds()),
+ time_last_update_us_(clock->TimeInMicroseconds()),
capture_time_ms_last_queued_(0),
capture_time_ms_last_sent_(0),
high_priority_packets_(new paced_sender::PacketList),
@@ -151,8 +150,7 @@
UpdateBytesPerInterval(kMinPacketLimitMs);
}
-PacedSender::~PacedSender() {
-}
+PacedSender::~PacedSender() {}
void PacedSender::Pause() {
CriticalSectionScoped cs(critsect_.get());
@@ -248,7 +246,8 @@
int32_t PacedSender::TimeUntilNextProcess() {
CriticalSectionScoped cs(critsect_.get());
- int64_t elapsed_time_ms = clock_->TimeInMilliseconds() - time_last_update_;
+ int64_t elapsed_time_ms = (clock_->TimeInMicroseconds() -
+ time_last_update_us_ + 500) / 1000;
if (elapsed_time_ms <= 0) {
return kMinPacketLimitMs;
}
@@ -259,10 +258,10 @@
}
int32_t PacedSender::Process() {
- int64_t now = clock_->TimeInMilliseconds();
+ int64_t now_us = clock_->TimeInMicroseconds();
CriticalSectionScoped cs(critsect_.get());
- int elapsed_time_ms = now - time_last_update_;
- time_last_update_ = now;
+ int elapsed_time_ms = (now_us - time_last_update_us_ + 500) / 1000;
+ time_last_update_us_ = now_us;
if (!enabled_) {
return 0;
}
@@ -291,7 +290,6 @@
return 0;
}
-// MUST have critsect_ when calling.
bool PacedSender::SendPacketFromList(paced_sender::PacketList* packet_list)
EXCLUSIVE_LOCKS_REQUIRED(critsect_.get()) {
paced_sender::Packet packet = GetNextPacketFromList(packet_list);
@@ -322,20 +320,18 @@
return true;
}
-// MUST have critsect_ when calling.
void PacedSender::UpdateBytesPerInterval(uint32_t delta_time_ms) {
media_budget_->IncreaseBudget(delta_time_ms);
padding_budget_->IncreaseBudget(delta_time_ms);
}
-// MUST have critsect_ when calling.
bool PacedSender::ShouldSendNextPacket(paced_sender::PacketList** packet_list) {
*packet_list = NULL;
if (media_budget_->bytes_remaining() <= 0) {
// All bytes consumed for this interval.
// Check if we have not sent in a too long time.
- if (clock_->TimeInMilliseconds() - time_last_send_ >
- kMaxQueueTimeWithoutSendingMs) {
+ if (clock_->TimeInMicroseconds() - time_last_send_us_ >
+ kMaxQueueTimeWithoutSendingUs) {
if (!high_priority_packets_->empty()) {
*packet_list = high_priority_packets_.get();
return true;
@@ -386,9 +382,8 @@
return packet;
}
-// MUST have critsect_ when calling.
void PacedSender::UpdateMediaBytesSent(int num_bytes) {
- time_last_send_ = clock_->TimeInMilliseconds();
+ time_last_send_us_ = clock_->TimeInMicroseconds();
media_budget_->UseBudget(num_bytes);
padding_budget_->UseBudget(num_bytes);
}
diff --git a/modules/rtp_rtcp/interface/rtp_rtcp_defines.h b/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
index e1bec5f..9980843 100644
--- a/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
+++ b/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
@@ -198,12 +198,14 @@
start_timestamp(0),
timestamp(0),
capture_time_ms(-1),
- last_timestamp_time_ms(-1) {}
+ last_timestamp_time_ms(-1),
+ media_has_been_sent(false) {}
uint16_t sequence_number;
uint32_t start_timestamp;
uint32_t timestamp;
int64_t capture_time_ms;
int64_t last_timestamp_time_ms;
+ bool media_has_been_sent;
};
class RtpData
diff --git a/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc b/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
index 0514277..5e580a3 100644
--- a/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
@@ -121,7 +121,8 @@
uint32_t SSRC = 456789;
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpNonCompound));
EXPECT_EQ(0, rtcp_sender_->SetREMBData(1234, 1, &SSRC));
- RTCPSender::FeedbackState feedback_state(dummy_rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state =
+ dummy_rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRemb));
}
@@ -129,7 +130,8 @@
uint32_t SSRCs[2] = {456789, 98765};
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
EXPECT_EQ(0, rtcp_sender_->SetREMBData(1234, 2, SSRCs));
- RTCPSender::FeedbackState feedback_state(dummy_rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state =
+ dummy_rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRemb));
}
} // namespace
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.cc b/modules/rtp_rtcp/source/rtcp_receiver.cc
index b38ae1f..54b991b 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -224,12 +224,12 @@
return true;
}
-int32_t
-RTCPReceiver::NTP(uint32_t *ReceivedNTPsecs,
- uint32_t *ReceivedNTPfrac,
- uint32_t *RTCPArrivalTimeSecs,
- uint32_t *RTCPArrivalTimeFrac,
- uint32_t *rtcp_timestamp) const
+// TODO(pbos): Make this fail when we haven't received NTP.
+bool RTCPReceiver::NTP(uint32_t* ReceivedNTPsecs,
+ uint32_t* ReceivedNTPfrac,
+ uint32_t* RTCPArrivalTimeSecs,
+ uint32_t* RTCPArrivalTimeFrac,
+ uint32_t* rtcp_timestamp) const
{
CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
if(ReceivedNTPsecs)
@@ -251,7 +251,7 @@
if (rtcp_timestamp) {
*rtcp_timestamp = _remoteSenderInfo.RTPtimeStamp;
}
- return 0;
+ return true;
}
bool RTCPReceiver::LastReceivedXrReferenceTimeInfo(
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.h b/modules/rtp_rtcp/source/rtcp_receiver.h
index ebffb7c..84eb24c 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.h
+++ b/modules/rtp_rtcp/source/rtcp_receiver.h
@@ -63,11 +63,11 @@
char cName[RTCP_CNAME_SIZE]) const;
// get received NTP
- int32_t NTP(uint32_t *ReceivedNTPsecs,
- uint32_t *ReceivedNTPfrac,
- uint32_t *RTCPArrivalTimeSecs,
- uint32_t *RTCPArrivalTimeFrac,
- uint32_t *rtcp_timestamp) const;
+ bool NTP(uint32_t* ReceivedNTPsecs,
+ uint32_t* ReceivedNTPfrac,
+ uint32_t* RTCPArrivalTimeSecs,
+ uint32_t* RTCPArrivalTimeFrac,
+ uint32_t* rtcp_timestamp) const;
bool LastReceivedXrReferenceTimeInfo(RtcpReceiveTimeInfo* info) const;
diff --git a/modules/rtp_rtcp/source/rtcp_sender.cc b/modules/rtp_rtcp/source/rtcp_sender.cc
index 2cf7e1c..1edbee4 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -65,30 +65,11 @@
return _stream.str();
}
-RTCPSender::FeedbackState::FeedbackState(ModuleRtpRtcpImpl* module)
- : send_payload_type(module->SendPayloadType()),
- frequency_hz(module->CurrentSendFrequencyHz()),
- packet_count_sent(module->PacketCountSent()),
- byte_count_sent(module->ByteCountSent()),
- module(module) {
- uint32_t last_ntp_secs = 0, last_ntp_frac = 0, last_remote_sr = 0;
- module->LastReceivedNTP(last_ntp_secs, last_ntp_frac, last_remote_sr);
- last_rr_ntp_secs = last_ntp_secs;
- last_rr_ntp_frac = last_ntp_frac;
- remote_sr = last_remote_sr;
-
- has_last_xr_rr = module->LastReceivedXrReferenceTimeInfo(&last_xr_rr);
-
- uint32_t send_bitrate = 0, tmp;
- module->BitrateSent(&send_bitrate, &tmp, &tmp, &tmp);
- this->send_bitrate = send_bitrate;
-}
-
RTCPSender::FeedbackState::FeedbackState()
: send_payload_type(0),
frequency_hz(0),
- packet_count_sent(0),
- byte_count_sent(0),
+ packets_sent(0),
+ media_bytes_sent(0),
send_bitrate(0),
last_rr_ntp_secs(0),
last_rr_ntp_frac(0),
@@ -654,12 +635,12 @@
//sender's packet count
RtpUtility::AssignUWord32ToBuffer(rtcpbuffer + pos,
- feedback_state.packet_count_sent);
+ feedback_state.packets_sent);
pos += 4;
//sender's octet count
RtpUtility::AssignUWord32ToBuffer(rtcpbuffer + pos,
- feedback_state.byte_count_sent);
+ feedback_state.media_bytes_sent);
pos += 4;
uint8_t numberOfReportBlocks = 0;
diff --git a/modules/rtp_rtcp/source/rtcp_sender.h b/modules/rtp_rtcp/source/rtcp_sender.h
index fad3b5e..26c44b0 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/modules/rtp_rtcp/source/rtcp_sender.h
@@ -51,13 +51,12 @@
{
public:
struct FeedbackState {
- explicit FeedbackState(ModuleRtpRtcpImpl* module);
FeedbackState();
uint8_t send_payload_type;
uint32_t frequency_hz;
- uint32_t packet_count_sent;
- uint32_t byte_count_sent;
+ uint32_t packets_sent;
+ uint32_t media_bytes_sent;
uint32_t send_bitrate;
uint32_t last_rr_ntp_secs;
diff --git a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
index cba1c34..b8d5395 100644
--- a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
@@ -337,7 +337,7 @@
TEST_F(RtcpSenderTest, RtcpOff) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpOff));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(-1, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr));
}
@@ -381,7 +381,7 @@
EXPECT_EQ(0, rtcp_sender_->SetIJStatus(true));
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRr));
// Transmission time offset packet should be received.
@@ -392,7 +392,7 @@
TEST_F(RtcpSenderTest, TestCompound_NoRtpReceived) {
EXPECT_EQ(0, rtcp_sender_->SetIJStatus(true));
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRr));
// Transmission time offset packet should not be received.
@@ -402,7 +402,7 @@
TEST_F(RtcpSenderTest, TestXrReceiverReferenceTime) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
@@ -413,7 +413,7 @@
TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfSending) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, true));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
@@ -424,7 +424,7 @@
TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfNotEnabled) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(false);
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
@@ -435,7 +435,7 @@
TEST_F(RtcpSenderTest, TestSendTimeOfXrRrReport) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
uint32_t ntp_sec;
@@ -475,7 +475,7 @@
TMMBRSet bounding_set;
EXPECT_EQ(0, rtcp_sender_->SetTMMBN(&bounding_set, 3));
ASSERT_EQ(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state,kRtcpSr));
// We now expect the packet to show up in the rtcp_packet_info_ of
// test_transport_.
@@ -498,7 +498,7 @@
EXPECT_EQ(0, rtcp_sender_->SetTMMBN(&bounding_set, 3));
ASSERT_EQ(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr));
// We now expect the packet to show up in the rtcp_packet_info_ of
// test_transport_.
diff --git a/modules/rtp_rtcp/source/rtp_header_extension.cc b/modules/rtp_rtcp/source/rtp_header_extension.cc
index 2e72d75..9a1836e 100644
--- a/modules/rtp_rtcp/source/rtp_header_extension.cc
+++ b/modules/rtp_rtcp/source/rtp_header_extension.cc
@@ -65,6 +65,16 @@
return 0;
}
+bool RtpHeaderExtensionMap::IsRegistered(RTPExtensionType type) const {
+ std::map<uint8_t, HeaderExtension*>::const_iterator it =
+ extensionMap_.begin();
+ for (; it != extensionMap_.end(); ++it) {
+ if (it->second->type == type)
+ return true;
+ }
+ return false;
+}
+
int32_t RtpHeaderExtensionMap::GetType(const uint8_t id,
RTPExtensionType* type) const {
assert(type);
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index 349340f..1a3b79c 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -230,8 +230,7 @@
}
if (rtcp_sender_.TimeToSendRTCPReport()) {
- RTCPSender::FeedbackState feedback_state(this);
- rtcp_sender_.SendRTCP(feedback_state, kRtcpReport);
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
}
}
@@ -418,12 +417,29 @@
return 0; // TODO(pwestin): change to void.
}
-uint32_t ModuleRtpRtcpImpl::PacketCountSent() const {
- return rtp_sender_.Packets();
-}
+// TODO(pbos): Handle media and RTX streams separately (separate RTCP
+// feedbacks).
+RTCPSender::FeedbackState ModuleRtpRtcpImpl::GetFeedbackState() {
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_.GetDataCounters(&rtp_stats, &rtx_stats);
-uint32_t ModuleRtpRtcpImpl::ByteCountSent() const {
- return rtp_sender_.Bytes();
+ RTCPSender::FeedbackState state;
+ state.send_payload_type = SendPayloadType();
+ state.frequency_hz = CurrentSendFrequencyHz();
+ state.packets_sent = rtp_stats.packets + rtx_stats.packets;
+ state.media_bytes_sent = rtp_stats.bytes + rtx_stats.bytes;
+ state.module = this;
+
+ LastReceivedNTP(&state.last_rr_ntp_secs,
+ &state.last_rr_ntp_frac,
+ &state.remote_sr);
+
+ state.has_last_xr_rr = LastReceivedXrReferenceTimeInfo(&state.last_xr_rr);
+
+ uint32_t tmp;
+ BitrateSent(&state.send_bitrate, &tmp, &tmp, &tmp);
+ return state;
}
int ModuleRtpRtcpImpl::CurrentSendFrequencyHz() const {
@@ -433,8 +449,7 @@
int32_t ModuleRtpRtcpImpl::SetSendingStatus(const bool sending) {
if (rtcp_sender_.Sending() != sending) {
// Sends RTCP BYE when going from true to false
- RTCPSender::FeedbackState feedback_state(this);
- if (rtcp_sender_.SetSendingStatus(feedback_state, sending) != 0) {
+ if (rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending) != 0) {
LOG(LS_WARNING) << "Failed to send RTCP BYE";
}
@@ -499,8 +514,7 @@
if (!IsDefaultModule()) {
// Don't send RTCP from default module.
if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) {
- RTCPSender::FeedbackState feedback_state(this);
- rtcp_sender_.SendRTCP(feedback_state, kRtcpReport);
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
}
return rtp_sender_.SendOutgoingData(frame_type,
payload_type,
@@ -599,14 +613,10 @@
}
} else {
CriticalSectionScoped lock(critical_section_module_ptrs_.get());
- // Decide what media stream to pad on based on a round-robin scheme.
for (size_t i = 0; i < child_modules_.size(); ++i) {
- padding_index_ = (padding_index_ + 1) % child_modules_.size();
// Send padding on one of the modules sending media.
- if (child_modules_[padding_index_]->SendingMedia() &&
- child_modules_[padding_index_]->rtp_sender_.GetTargetBitrate() > 0) {
- return child_modules_[padding_index_]->rtp_sender_.TimeToSendPadding(
- bytes);
+ if (child_modules_[i]->SendingMedia()) {
+ return child_modules_[i]->rtp_sender_.TimeToSendPadding(bytes);
}
}
}
@@ -751,7 +761,9 @@
received_ntpfrac,
rtcp_arrival_time_secs,
rtcp_arrival_time_frac,
- rtcp_timestamp);
+ rtcp_timestamp)
+ ? 0
+ : -1;
}
// Get RoundTripTime.
@@ -782,8 +794,7 @@
// Force a send of an RTCP packet.
// Normal SR and RR are triggered via the process function.
int32_t ModuleRtpRtcpImpl::SendRTCP(uint32_t rtcp_packet_type) {
- RTCPSender::FeedbackState feedback_state(this);
- return rtcp_sender_.SendRTCP(feedback_state, rtcp_packet_type);
+ return rtcp_sender_.SendRTCP(GetFeedbackState(), rtcp_packet_type);
}
int32_t ModuleRtpRtcpImpl::SetRTCPApplicationSpecificData(
@@ -811,11 +822,17 @@
int32_t ModuleRtpRtcpImpl::DataCountersRTP(
uint32_t* bytes_sent,
uint32_t* packets_sent) const {
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_.GetDataCounters(&rtp_stats, &rtx_stats);
+
if (bytes_sent) {
- *bytes_sent = rtp_sender_.Bytes();
+ *bytes_sent = rtp_stats.bytes + rtp_stats.padding_bytes +
+ rtp_stats.header_bytes + rtx_stats.bytes +
+ rtx_stats.padding_bytes + rtx_stats.header_bytes;
}
if (packets_sent) {
- *packets_sent = rtp_sender_.Packets();
+ *packets_sent = rtp_stats.packets + rtx_stats.packets;
}
return 0;
}
@@ -955,9 +972,8 @@
}
nack_last_seq_number_sent_ = nack_list[start_id + nackLength - 1];
- RTCPSender::FeedbackState feedback_state(this);
return rtcp_sender_.SendRTCP(
- feedback_state, kRtcpNack, nackLength, &nack_list[start_id]);
+ GetFeedbackState(), kRtcpNack, nackLength, &nack_list[start_id]);
}
// Store the sent packets, needed to answer to a Negative acknowledgment
@@ -1074,9 +1090,8 @@
int32_t ModuleRtpRtcpImpl::SendRTCPSliceLossIndication(
const uint8_t picture_id) {
- RTCPSender::FeedbackState feedback_state(this);
return rtcp_sender_.SendRTCP(
- feedback_state, kRtcpSli, 0, 0, false, picture_id);
+ GetFeedbackState(), kRtcpSli, 0, 0, false, picture_id);
}
int32_t ModuleRtpRtcpImpl::SetCameraDelay(const int32_t delay_ms) {
@@ -1245,9 +1260,8 @@
int32_t ModuleRtpRtcpImpl::SendRTCPReferencePictureSelection(
const uint64_t picture_id) {
- RTCPSender::FeedbackState feedback_state(this);
return rtcp_sender_.SendRTCP(
- feedback_state, kRtcpRpsi, 0, 0, false, picture_id);
+ GetFeedbackState(), kRtcpRpsi, 0, 0, false, picture_id);
}
uint32_t ModuleRtpRtcpImpl::SendTimeOfSendReport(
@@ -1274,23 +1288,24 @@
rtp_sender_.OnReceivedNACK(nack_sequence_numbers, rtt);
}
-int32_t ModuleRtpRtcpImpl::LastReceivedNTP(
- uint32_t& rtcp_arrival_time_secs, // When we got the last report.
- uint32_t& rtcp_arrival_time_frac,
- uint32_t& remote_sr) {
+bool ModuleRtpRtcpImpl::LastReceivedNTP(
+ uint32_t* rtcp_arrival_time_secs, // When we got the last report.
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* remote_sr) const {
// Remote SR: NTP inside the last received (mid 16 bits from sec and frac).
uint32_t ntp_secs = 0;
uint32_t ntp_frac = 0;
- if (-1 == rtcp_receiver_.NTP(&ntp_secs,
- &ntp_frac,
- &rtcp_arrival_time_secs,
- &rtcp_arrival_time_frac,
- NULL)) {
- return -1;
+ if (!rtcp_receiver_.NTP(&ntp_secs,
+ &ntp_frac,
+ rtcp_arrival_time_secs,
+ rtcp_arrival_time_frac,
+ NULL)) {
+ return false;
}
- remote_sr = ((ntp_secs & 0x0000ffff) << 16) + ((ntp_frac & 0xffff0000) >> 16);
- return 0;
+ *remote_sr =
+ ((ntp_secs & 0x0000ffff) << 16) + ((ntp_frac & 0xffff0000) >> 16);
+ return true;
}
bool ModuleRtpRtcpImpl::LastReceivedXrReferenceTimeInfo(
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index 7e7ea02..4a23dd4 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -89,12 +89,10 @@
virtual int32_t SetCSRCStatus(const bool include) OVERRIDE;
- virtual uint32_t PacketCountSent() const;
+ virtual RTCPSender::FeedbackState GetFeedbackState();
virtual int CurrentSendFrequencyHz() const;
- virtual uint32_t ByteCountSent() const;
-
virtual void SetRTXSendStatus(const int mode) OVERRIDE;
virtual void RTXSendStatus(int* mode, uint32_t* ssrc,
@@ -328,9 +326,9 @@
const FecProtectionParams* delta_params,
const FecProtectionParams* key_params) OVERRIDE;
- virtual int32_t LastReceivedNTP(uint32_t& NTPsecs,
- uint32_t& NTPfrac,
- uint32_t& remote_sr);
+ virtual bool LastReceivedNTP(uint32_t* NTPsecs,
+ uint32_t* NTPfrac,
+ uint32_t* remote_sr) const;
virtual bool LastReceivedXrReferenceTimeInfo(RtcpReceiveTimeInfo* info) const;
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 930778c..3101a1f 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -388,7 +388,7 @@
VideoCodec codec_;
};
-TEST_F(RtpSendingTest, RoundRobinPadding) {
+TEST_F(RtpSendingTest, DISABLED_RoundRobinPadding) {
// We have to send on an SSRC to be allowed to pad, since a marker bit must
// be sent prior to padding packets.
const uint8_t payload[200] = {0};
@@ -410,9 +410,13 @@
ExpectPadding(expected_padding);
}
-TEST_F(RtpSendingTest, RoundRobinPaddingRtx) {
+TEST_F(RtpSendingTest, DISABLED_RoundRobinPaddingRtx) {
// Enable RTX to allow padding to be sent prior to media.
for (int i = 1; i < codec_.numberOfSimulcastStreams + 1; ++i) {
+ // Abs-send-time is needed to be allowed to send padding prior to media,
+ // as otherwise the timestmap used for BWE will be broken.
+ senders_[i]->RegisterSendRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ 1);
senders_[i]->SetRtxSendPayloadType(96);
senders_[i]->SetRtxSsrc(kSenderRtxSsrc + i);
senders_[i]->SetRTXSendStatus(kRtxRetransmitted);
@@ -436,7 +440,7 @@
ExpectPadding(expected_padding);
}
-TEST_F(RtpSendingTest, RoundRobinPaddingRtxRedundantPayloads) {
+TEST_F(RtpSendingTest, DISABLED_RoundRobinPaddingRtxRedundantPayloads) {
for (int i = 1; i < codec_.numberOfSimulcastStreams + 1; ++i) {
senders_[i]->SetRtxSendPayloadType(96);
senders_[i]->SetRtxSsrc(kSenderRtxSsrc + i);
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index c24b15a..eb88be7 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -87,6 +87,7 @@
timestamp_(0),
capture_time_ms_(0),
last_timestamp_time_ms_(0),
+ media_has_been_sent_(false),
last_packet_marker_bit_(false),
num_csrcs_(0),
csrcs_(),
@@ -430,14 +431,9 @@
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
- if (frame_type == kFrameEmpty) {
- if (paced_sender_->Enabled()) {
- // Padding is driven by the pacer and not by the encoder.
- return 0;
- }
- return SendPaddingAccordingToBitrate(payload_type, capture_timestamp,
- capture_time_ms) ? 0 : -1;
- }
+ if (frame_type == kFrameEmpty)
+ return 0;
+
ret_val = video_->SendVideo(video_type, frame_type, payload_type,
capture_timestamp, capture_time_ms,
payload_data, payload_size,
@@ -475,45 +471,6 @@
return bytes_to_send - bytes_left;
}
-bool RTPSender::SendPaddingAccordingToBitrate(
- int8_t payload_type, uint32_t capture_timestamp,
- int64_t capture_time_ms) {
- // Current bitrate since last estimate(1 second) averaged with the
- // estimate since then, to get the most up to date bitrate.
- uint32_t current_bitrate = bitrate_sent_.BitrateNow();
- uint32_t target_bitrate = GetTargetBitrate();
- int bitrate_diff = target_bitrate - current_bitrate;
- if (bitrate_diff <= 0) {
- return true;
- }
- int bytes = 0;
- if (current_bitrate == 0) {
- // Start up phase. Send one 33.3 ms batch to start with.
- bytes = (bitrate_diff / 8) / 30;
- } else {
- bytes = (bitrate_diff / 8);
- // Cap at 200 ms of target send data.
- int bytes_cap = target_bitrate / 1000 * 25; // 1000 / 8 / 5.
- if (bytes > bytes_cap) {
- bytes = bytes_cap;
- }
- }
- uint32_t timestamp;
- {
- CriticalSectionScoped cs(send_critsect_);
- // Add the random RTP timestamp offset and store the capture time for
- // later calculation of the send time offset.
- timestamp = start_timestamp_ + capture_timestamp;
- timestamp_ = timestamp;
- capture_time_ms_ = capture_time_ms;
- last_timestamp_time_ms_ = clock_->TimeInMilliseconds();
- }
- int bytes_sent = SendPadData(payload_type, timestamp, capture_time_ms,
- bytes, false, false);
- // We did not manage to send all bytes. Comparing with 31 due to modulus 32.
- return bytes - bytes_sent < 31;
-}
-
int RTPSender::BuildPaddingPacket(uint8_t* packet, int header_length,
int32_t bytes) {
int padding_bytes_in_packet = kMaxPaddingLength;
@@ -536,9 +493,7 @@
int RTPSender::SendPadData(int payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
- int32_t bytes,
- bool force_full_size_packets,
- bool over_rtx) {
+ int32_t bytes) {
// Drop this packet if we're not sending media packets.
if (!SendingMedia()) {
return bytes;
@@ -547,36 +502,34 @@
int bytes_sent = 0;
for (; bytes > 0; bytes -= padding_bytes_in_packet) {
// Always send full padding packets.
- if (force_full_size_packets && bytes < kMaxPaddingLength)
+ if (bytes < kMaxPaddingLength)
bytes = kMaxPaddingLength;
- if (bytes < kMaxPaddingLength) {
- if (force_full_size_packets) {
- bytes = kMaxPaddingLength;
- } else {
- // Round to the nearest multiple of 32.
- bytes = (bytes + 16) & 0xffe0;
- }
- }
- if (bytes < 32) {
- // Sanity don't send empty packets.
- break;
- }
+
uint32_t ssrc;
uint16_t sequence_number;
+ bool over_rtx;
{
CriticalSectionScoped cs(send_critsect_);
// Only send padding packets following the last packet of a frame,
// indicated by the marker bit.
- if (!over_rtx && !last_packet_marker_bit_)
- return bytes_sent;
if (rtx_ == kRtxOff) {
+ // Without RTX we can't send padding in the middle of frames.
+ if (!last_packet_marker_bit_)
+ return bytes_sent;
ssrc = ssrc_;
sequence_number = sequence_number_;
++sequence_number_;
+ over_rtx = false;
} else {
+ // Without abs-send-time a media packet must be sent before padding so
+ // that the timestamps used for estimation are correct.
+ if (!media_has_been_sent_ && !rtp_header_extension_map_.IsRegistered(
+ kRtpExtensionAbsoluteSendTime))
+ return bytes_sent;
ssrc = ssrc_rtx_;
sequence_number = sequence_number_rtx_;
++sequence_number_rtx_;
+ over_rtx = true;
}
}
@@ -656,10 +609,13 @@
return length;
}
}
-
- CriticalSectionScoped lock(send_critsect_);
+ int rtx = kRtxOff;
+ {
+ CriticalSectionScoped lock(send_critsect_);
+ rtx = rtx_;
+ }
return PrepareAndSendPacket(data_buffer, length, capture_time_ms,
- (rtx_ & kRtxRetransmitted) > 0, true) ?
+ (rtx & kRtxRetransmitted) > 0, true) ?
length : -1;
}
@@ -852,6 +808,10 @@
diff_ms);
UpdateAbsoluteSendTime(buffer_to_send_ptr, length, rtp_header, now_ms);
bool ret = SendPacketToNetwork(buffer_to_send_ptr, length);
+ if (ret) {
+ CriticalSectionScoped lock(send_critsect_);
+ media_has_been_sent_ = true;
+ }
UpdateRtpStats(buffer_to_send_ptr, length, rtp_header, send_over_rtx,
is_retransmit);
return ret;
@@ -907,6 +867,7 @@
}
int RTPSender::TimeToSendPadding(int bytes) {
+ assert(bytes > 0);
int payload_type;
int64_t capture_time_ms;
uint32_t timestamp;
@@ -933,12 +894,8 @@
bytes_sent = SendRedundantPayloads(payload_type, bytes);
bytes -= bytes_sent;
if (bytes > 0) {
- int padding_sent = SendPadData(payload_type,
- timestamp,
- capture_time_ms,
- bytes,
- true,
- rtx != kRtxOff);
+ int padding_sent =
+ SendPadData(payload_type, timestamp, capture_time_ms, bytes);
bytes_sent += padding_sent;
}
return bytes_sent;
@@ -992,6 +949,11 @@
uint32_t length = payload_length + rtp_header_length;
if (!SendPacketToNetwork(buffer, length))
return -1;
+ assert(payload_length - rtp_header.paddingLength > 0);
+ {
+ CriticalSectionScoped lock(send_critsect_);
+ media_has_been_sent_ = true;
+ }
UpdateRtpStats(buffer, length, rtp_header, false, false);
return 0;
}
@@ -1061,17 +1023,11 @@
}
}
-uint32_t RTPSender::Packets() const {
+void RTPSender::GetDataCounters(StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const {
CriticalSectionScoped lock(statistics_crit_.get());
- return rtp_stats_.packets + rtx_rtp_stats_.packets;
-}
-
-// Number of sent RTP bytes.
-uint32_t RTPSender::Bytes() const {
- CriticalSectionScoped lock(statistics_crit_.get());
- return rtp_stats_.bytes + rtp_stats_.header_bytes + rtp_stats_.padding_bytes +
- rtx_rtp_stats_.bytes + rtx_rtp_stats_.header_bytes +
- rtx_rtp_stats_.padding_bytes;
+ *rtp_stats = rtp_stats_;
+ *rtx_stats = rtx_rtp_stats_;
}
int RTPSender::CreateRTPHeader(
@@ -1732,6 +1688,7 @@
timestamp_ = rtp_state.timestamp;
capture_time_ms_ = rtp_state.capture_time_ms;
last_timestamp_time_ms_ = rtp_state.last_timestamp_time_ms;
+ media_has_been_sent_ = rtp_state.media_has_been_sent;
}
RtpState RTPSender::GetRtpState() const {
@@ -1743,6 +1700,7 @@
state.timestamp = timestamp_;
state.capture_time_ms = capture_time_ms_;
state.last_timestamp_time_ms = last_timestamp_time_ms_;
+ state.media_has_been_sent = media_has_been_sent_;
return state;
}
diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h
index 4a9e10e..f3a2bdc 100644
--- a/modules/rtp_rtcp/source/rtp_sender.h
+++ b/modules/rtp_rtcp/source/rtp_sender.h
@@ -109,11 +109,8 @@
void SetSendingMediaStatus(const bool enabled);
bool SendingMedia() const;
- // Number of sent RTP packets.
- uint32_t Packets() const;
-
- // Number of sent RTP bytes.
- uint32_t Bytes() const;
+ void GetDataCounters(StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const;
void ResetDataCounters();
@@ -270,9 +267,7 @@
int SendPadData(int payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
- int32_t bytes,
- bool force_full_size_packets,
- bool only_pad_after_markerbit);
+ int32_t bytes);
// Called on update of RTP statistics.
void RegisterRtpStatisticsCallback(StreamDataCountersCallback* callback);
@@ -312,9 +307,6 @@
int SendRedundantPayloads(int payload_type, int bytes);
- bool SendPaddingAccordingToBitrate(int8_t payload_type,
- uint32_t capture_timestamp,
- int64_t capture_time_ms);
int BuildPaddingPacket(uint8_t* packet, int header_length, int32_t bytes);
void BuildRtxPacket(uint8_t* buffer, uint16_t* length,
@@ -395,6 +387,7 @@
uint32_t timestamp_ GUARDED_BY(send_critsect_);
int64_t capture_time_ms_ GUARDED_BY(send_critsect_);
int64_t last_timestamp_time_ms_ GUARDED_BY(send_critsect_);
+ bool media_has_been_sent_ GUARDED_BY(send_critsect_);
bool last_packet_marker_bit_ GUARDED_BY(send_critsect_);
uint8_t num_csrcs_ GUARDED_BY(send_critsect_);
uint32_t csrcs_[kRtpCsrcSize] GUARDED_BY(send_critsect_);
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index 40b1054..e9b01de 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -39,6 +39,7 @@
const uint8_t kAudioLevelExtensionId = 9;
const int kAudioPayload = 103;
const uint64_t kStartTime = 123456789;
+const size_t kMaxPaddingSize = 224u;
} // namespace
using testing::_;
@@ -700,7 +701,7 @@
kAbsoluteSendTimeExtensionId);
rtp_sender_->SetTargetBitrate(300000);
const size_t kNumPayloadSizes = 10;
- const int kPayloadSizes[kNumPayloadSizes] = {500, 550, 600, 650, 700, 750,
+ const size_t kPayloadSizes[kNumPayloadSizes] = {500, 550, 600, 650, 700, 750,
800, 850, 900, 950};
// Send 10 packets of increasing size.
for (size_t i = 0; i < kNumPayloadSizes; ++i) {
@@ -711,25 +712,27 @@
rtp_sender_->TimeToSendPacket(seq_num++, capture_time_ms, false);
fake_clock_.AdvanceTimeMilliseconds(33);
}
- const int kPaddingPayloadSize = 224;
// The amount of padding to send it too small to send a payload packet.
- EXPECT_CALL(transport, SendPacket(_, _, kPaddingPayloadSize + rtp_header_len))
+ EXPECT_CALL(transport,
+ SendPacket(_, _, kMaxPaddingSize + rtp_header_len))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_EQ(kPaddingPayloadSize, rtp_sender_->TimeToSendPadding(49));
+ EXPECT_EQ(kMaxPaddingSize,
+ static_cast<size_t>(rtp_sender_->TimeToSendPadding(49)));
const int kRtxHeaderSize = 2;
EXPECT_CALL(transport, SendPacket(_, _, kPayloadSizes[0] +
rtp_header_len + kRtxHeaderSize))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_EQ(kPayloadSizes[0], rtp_sender_->TimeToSendPadding(500));
+ EXPECT_EQ(kPayloadSizes[0],
+ static_cast<size_t>(rtp_sender_->TimeToSendPadding(500)));
EXPECT_CALL(transport, SendPacket(_, _, kPayloadSizes[kNumPayloadSizes - 1] +
rtp_header_len + kRtxHeaderSize))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_CALL(transport, SendPacket(_, _, kPaddingPayloadSize + rtp_header_len))
+ EXPECT_CALL(transport, SendPacket(_, _, kMaxPaddingSize + rtp_header_len))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_EQ(kPayloadSizes[kNumPayloadSizes - 1] + kPaddingPayloadSize,
- rtp_sender_->TimeToSendPadding(999));
+ EXPECT_EQ(kPayloadSizes[kNumPayloadSizes - 1] + kMaxPaddingSize,
+ static_cast<size_t>(rtp_sender_->TimeToSendPadding(999)));
}
TEST_F(RtpSenderTest, SendGenericVideo) {
@@ -959,7 +962,6 @@
const uint8_t kRedPayloadType = 96;
const uint8_t kUlpfecPayloadType = 97;
- const uint32_t kMaxPaddingSize = 224;
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "GENERIC";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_->RegisterPayload(payload_name, payload_type, 90000,
@@ -988,7 +990,7 @@
// Send padding.
rtp_sender_->TimeToSendPadding(kMaxPaddingSize);
// {bytes = 6, header = 24, padding = 224, packets = 3, retrans = 1, fec = 0}
- EXPECT_TRUE(callback.Matches(ssrc, 6, 24, 224, 3, 1, 0));
+ EXPECT_TRUE(callback.Matches(ssrc, 6, 24, kMaxPaddingSize, 3, 1, 0));
// Send FEC.
rtp_sender_->SetGenericFECStatus(true, kRedPayloadType, kUlpfecPayloadType);
@@ -1003,7 +1005,7 @@
sizeof(payload), NULL));
// {bytes = 34, header = 48, padding = 224, packets = 5, retrans = 1, fec = 1}
- EXPECT_TRUE(callback.Matches(ssrc, 34, 48, 224, 5, 1, 1));
+ EXPECT_TRUE(callback.Matches(ssrc, 34, 48, kMaxPaddingSize, 5, 1, 1));
rtp_sender_->RegisterRtpStatisticsCallback(NULL);
}
@@ -1093,13 +1095,25 @@
sizeof(payload),
0));
- EXPECT_GT(transport_.total_bytes_sent_, 0u);
- EXPECT_EQ(transport_.total_bytes_sent_, rtp_sender_->Bytes());
- size_t last_bytes_sent = transport_.total_bytes_sent_;
+ // Will send 2 full-size padding packets.
+ rtp_sender_->TimeToSendPadding(1);
+ rtp_sender_->TimeToSendPadding(1);
- rtp_sender_->TimeToSendPadding(42);
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_->GetDataCounters(&rtp_stats, &rtx_stats);
- EXPECT_GT(transport_.total_bytes_sent_, last_bytes_sent);
- EXPECT_EQ(transport_.total_bytes_sent_, rtp_sender_->Bytes());
+ // Payload + 1-byte generic header.
+ EXPECT_EQ(rtp_stats.bytes, sizeof(payload) + 1);
+ EXPECT_EQ(rtp_stats.header_bytes, 12u);
+ EXPECT_EQ(rtp_stats.padding_bytes, 0u);
+ EXPECT_EQ(rtx_stats.bytes, 0u);
+ EXPECT_EQ(rtx_stats.header_bytes, 24u);
+ EXPECT_EQ(rtx_stats.padding_bytes, 2 * kMaxPaddingSize);
+
+ EXPECT_EQ(transport_.total_bytes_sent_,
+ rtp_stats.bytes + rtp_stats.header_bytes + rtp_stats.padding_bytes +
+ rtx_stats.bytes + rtx_stats.header_bytes +
+ rtx_stats.padding_bytes);
}
} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/ssrc_database.cc b/modules/rtp_rtcp/source/ssrc_database.cc
index df09b01..4e23083 100644
--- a/modules/rtp_rtcp/source/ssrc_database.cc
+++ b/modules/rtp_rtcp/source/ssrc_database.cc
@@ -57,45 +57,12 @@
uint32_t ssrc = GenerateRandom();
-#ifndef WEBRTC_NO_STL
-
while(_ssrcMap.find(ssrc) != _ssrcMap.end())
{
ssrc = GenerateRandom();
}
_ssrcMap[ssrc] = 0;
-#else
- if(_sizeOfSSRC <= _numberOfSSRC)
- {
- // allocate more space
- const int newSize = _sizeOfSSRC + 10;
- uint32_t* tempSSRCVector = new uint32_t[newSize];
- memcpy(tempSSRCVector, _ssrcVector, _sizeOfSSRC*sizeof(uint32_t));
- delete [] _ssrcVector;
-
- _ssrcVector = tempSSRCVector;
- _sizeOfSSRC = newSize;
- }
-
- // check if in DB
- if(_ssrcVector)
- {
- for (int i=0; i<_numberOfSSRC; i++)
- {
- if (_ssrcVector[i] == ssrc)
- {
- // we have a match
- i = 0; // start over with a new ssrc
- ssrc = GenerateRandom();
- }
-
- }
- // add to database
- _ssrcVector[_numberOfSSRC] = ssrc;
- _numberOfSSRC++;
- }
-#endif
return ssrc;
}
@@ -103,39 +70,7 @@
SSRCDatabase::RegisterSSRC(const uint32_t ssrc)
{
CriticalSectionScoped lock(_critSect);
-
-#ifndef WEBRTC_NO_STL
-
_ssrcMap[ssrc] = 0;
-
-#else
- if(_sizeOfSSRC <= _numberOfSSRC)
- {
- // allocate more space
- const int newSize = _sizeOfSSRC + 10;
- uint32_t* tempSSRCVector = new uint32_t[newSize];
- memcpy(tempSSRCVector, _ssrcVector, _sizeOfSSRC*sizeof(uint32_t));
- delete [] _ssrcVector;
-
- _ssrcVector = tempSSRCVector;
- _sizeOfSSRC = newSize;
- }
- // check if in DB
- if(_ssrcVector)
- {
- for (int i=0; i<_numberOfSSRC; i++)
- {
- if (_ssrcVector[i] == ssrc)
- {
- // we have a match
- return -1;
- }
- }
- // add to database
- _ssrcVector[_numberOfSSRC] = ssrc;
- _numberOfSSRC++;
- }
-#endif
return 0;
}
@@ -143,26 +78,7 @@
SSRCDatabase::ReturnSSRC(const uint32_t ssrc)
{
CriticalSectionScoped lock(_critSect);
-
-#ifndef WEBRTC_NO_STL
_ssrcMap.erase(ssrc);
-
-#else
- if(_ssrcVector)
- {
- for (int i=0; i<_numberOfSSRC; i++)
- {
- if (_ssrcVector[i] == ssrc)
- {
- // we have a match
- // remove from database
- _ssrcVector[i] = _ssrcVector[_numberOfSSRC-1];
- _numberOfSSRC--;
- break;
- }
- }
- }
-#endif
return 0;
}
@@ -178,21 +94,12 @@
srand(tv.tv_usec);
#endif
-#ifdef WEBRTC_NO_STL
- _sizeOfSSRC = 10;
- _numberOfSSRC = 0;
- _ssrcVector = new uint32_t[10];
-#endif
_critSect = CriticalSectionWrapper::CreateCriticalSection();
}
SSRCDatabase::~SSRCDatabase()
{
-#ifdef WEBRTC_NO_STL
- delete [] _ssrcVector;
-#else
_ssrcMap.clear();
-#endif
delete _critSect;
}
diff --git a/modules/rtp_rtcp/source/ssrc_database.h b/modules/rtp_rtcp/source/ssrc_database.h
index e1f90e7..2d4932a 100644
--- a/modules/rtp_rtcp/source/ssrc_database.h
+++ b/modules/rtp_rtcp/source/ssrc_database.h
@@ -11,9 +11,7 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
-#ifndef WEBRTC_NO_STL
#include <map>
-#endif
#include "webrtc/system_wrappers/interface/static_instance.h"
#include "webrtc/typedefs.h"
@@ -46,14 +44,7 @@
uint32_t GenerateRandom();
-#ifdef WEBRTC_NO_STL
- int _numberOfSSRC;
- int _sizeOfSSRC;
-
- uint32_t* _ssrcVector;
-#else
std::map<uint32_t, uint32_t> _ssrcMap;
-#endif
CriticalSectionWrapper* _critSect;
};
diff --git a/modules/video_capture/ios/rtc_video_capture_ios_objc.mm b/modules/video_capture/ios/rtc_video_capture_ios_objc.mm
index 641ca24..ac90b72 100644
--- a/modules/video_capture/ios/rtc_video_capture_ios_objc.mm
+++ b/modules/video_capture/ios/rtc_video_capture_ios_objc.mm
@@ -43,6 +43,12 @@
_owner = owner;
_captureId = captureId;
_captureSession = [[AVCaptureSession alloc] init];
+#if defined(__IPHONE_7_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_7_0
+ NSString* version = [[UIDevice currentDevice] systemVersion];
+ if ([version integerValue] >= 7) {
+ _captureSession.usesApplicationAudioSession = NO;
+ }
+#endif
_captureChanging = NO;
_captureChangingCondition = [[NSCondition alloc] init];
@@ -217,6 +223,9 @@
return;
switch ([UIApplication sharedApplication].statusBarOrientation) {
case UIInterfaceOrientationPortrait:
+#if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
+ case UIInterfaceOrientationUnknown:
+#endif
_connection.videoOrientation = AVCaptureVideoOrientationPortrait;
break;
case UIInterfaceOrientationPortraitUpsideDown:
diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn
index 0dc6721..cbd1293 100644
--- a/modules/video_coding/BUILD.gn
+++ b/modules/video_coding/BUILD.gn
@@ -75,8 +75,6 @@
source_set("video_coding_utility") {
sources = [
- "utility/exp_filter.cc",
- "utility/include/exp_filter.h",
"utility/include/frame_dropper.h",
"utility/frame_dropper.cc",
]
diff --git a/modules/video_coding/main/source/media_opt_util.cc b/modules/video_coding/main/source/media_opt_util.cc
index ba86575..b506a5b 100644
--- a/modules/video_coding/main/source/media_opt_util.cc
+++ b/modules/video_coding/main/source/media_opt_util.cc
@@ -837,7 +837,7 @@
case kNoFilter:
break;
case kAvgFilter:
- filtered_loss = static_cast<uint8_t> (_lossPr255.Value() + 0.5);
+ filtered_loss = static_cast<uint8_t>(_lossPr255.filtered() + 0.5);
break;
case kMaxFilter:
filtered_loss = MaxFilteredLossPr(nowMs);
@@ -907,8 +907,8 @@
_currentParameters.keyFrameSize = _keyFrameSize;
_currentParameters.fecRateDelta = _fecRateDelta;
_currentParameters.fecRateKey = _fecRateKey;
- _currentParameters.packetsPerFrame = _packetsPerFrame.Value();
- _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.Value();
+ _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+ _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
_currentParameters.residualPacketLossFec = _residualPacketLossFec;
_currentParameters.codecWidth = _codecWidth;
_currentParameters.codecHeight = _codecHeight;
diff --git a/modules/video_coding/main/source/media_opt_util.h b/modules/video_coding/main/source/media_opt_util.h
index f39a578..d421d9e 100644
--- a/modules/video_coding/main/source/media_opt_util.h
+++ b/modules/video_coding/main/source/media_opt_util.h
@@ -14,9 +14,9 @@
#include <math.h>
#include <stdlib.h>
+#include "webrtc/base/exp_filter.h"
#include "webrtc/modules/video_coding/main/source/internal_defines.h"
#include "webrtc/modules/video_coding/main/source/qm_select.h"
-#include "webrtc/modules/video_coding/utility/include/exp_filter.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/typedefs.h"
@@ -367,27 +367,27 @@
// Sets the available loss protection methods.
void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
uint8_t MaxFilteredLossPr(int64_t nowMs) const;
- VCMProtectionMethod* _selectedMethod;
- VCMProtectionParameters _currentParameters;
- uint32_t _rtt;
- float _lossPr;
- float _bitRate;
- float _frameRate;
- float _keyFrameSize;
- uint8_t _fecRateKey;
- uint8_t _fecRateDelta;
- int64_t _lastPrUpdateT;
- int64_t _lastPacketPerFrameUpdateT;
- int64_t _lastPacketPerFrameUpdateTKey;
- VCMExpFilter _lossPr255;
- VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
- uint8_t _shortMaxLossPr255;
- VCMExpFilter _packetsPerFrame;
- VCMExpFilter _packetsPerFrameKey;
- float _residualPacketLossFec;
- uint16_t _codecWidth;
- uint16_t _codecHeight;
- int _numLayers;
+ VCMProtectionMethod* _selectedMethod;
+ VCMProtectionParameters _currentParameters;
+ uint32_t _rtt;
+ float _lossPr;
+ float _bitRate;
+ float _frameRate;
+ float _keyFrameSize;
+ uint8_t _fecRateKey;
+ uint8_t _fecRateDelta;
+ int64_t _lastPrUpdateT;
+ int64_t _lastPacketPerFrameUpdateT;
+ int64_t _lastPacketPerFrameUpdateTKey;
+ rtc::ExpFilter _lossPr255;
+ VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+ uint8_t _shortMaxLossPr255;
+ rtc::ExpFilter _packetsPerFrame;
+ rtc::ExpFilter _packetsPerFrameKey;
+ float _residualPacketLossFec;
+ uint16_t _codecWidth;
+ uint16_t _codecHeight;
+ int _numLayers;
};
} // namespace media_optimization
diff --git a/modules/video_coding/utility/exp_filter.cc b/modules/video_coding/utility/exp_filter.cc
deleted file mode 100644
index 44f280b..0000000
--- a/modules/video_coding/utility/exp_filter.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/utility/include/exp_filter.h"
-
-#include <math.h>
-
-namespace webrtc {
-
-void
-VCMExpFilter::Reset(float alpha)
-{
- _alpha = alpha;
- _filtered = -1.0;
-}
-
-float
-VCMExpFilter::Apply(float exp, float sample)
-{
- if (_filtered == -1.0)
- {
- // Initialize filtered bit rates
- _filtered = sample;
- }
- else if (exp == 1.0)
- {
- _filtered = _alpha * _filtered + (1 - _alpha) * sample;
- }
- else
- {
- float alpha = pow(_alpha, exp);
- _filtered = alpha * _filtered + (1 - alpha) * sample;
- }
- if (_max != -1 && _filtered > _max)
- {
- _filtered = _max;
- }
- return _filtered;
-}
-
-void
-VCMExpFilter::UpdateBase(float alpha)
-{
- _alpha = alpha;
-}
-
-float
-VCMExpFilter::Value() const
-{
- return _filtered;
-}
-
-}
diff --git a/modules/video_coding/utility/frame_dropper.cc b/modules/video_coding/utility/frame_dropper.cc
index d3c25fb..54c8cb8 100644
--- a/modules/video_coding/utility/frame_dropper.cc
+++ b/modules/video_coding/utility/frame_dropper.cc
@@ -86,25 +86,27 @@
{
_keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
_keyFrameRatio.Apply(1.0, 1.0);
- if (frameSizeKbits > _keyFrameSizeAvgKbits.Value())
+ if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered())
{
// Remove the average key frame size since we
// compensate for key frames when adding delta
// frames.
- frameSizeKbits -= _keyFrameSizeAvgKbits.Value();
+ frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
}
else
{
// Shouldn't be negative, so zero is the lower bound.
frameSizeKbits = 0;
}
- if (_keyFrameRatio.Value() > 1e-5 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
+ if (_keyFrameRatio.filtered() > 1e-5 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
{
// We are sending key frames more often than our upper bound for
// how much we allow the key frame compensation to be spread
// out in time. Therefor we must use the key frame ratio rather
// than keyFrameSpreadFrames.
- _keyFrameCount = static_cast<int32_t>(1 / _keyFrameRatio.Value() + 0.5);
+ _keyFrameCount =
+ static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
}
else
{
@@ -145,13 +147,14 @@
if (_keyFrameCount > 0)
{
// Perform the key frame compensation
- if (_keyFrameRatio.Value() > 0 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
+ if (_keyFrameRatio.filtered() > 0 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
{
- T -= _keyFrameSizeAvgKbits.Value() * _keyFrameRatio.Value();
+ T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
}
else
{
- T -= _keyFrameSizeAvgKbits.Value() / _keyFrameSpreadFrames;
+ T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
}
_keyFrameCount--;
}
@@ -232,11 +235,11 @@
_dropCount = 0;
}
- if (_dropRatio.Value() >= 0.5f) // Drops per keep
+ if (_dropRatio.filtered() >= 0.5f) // Drops per keep
{
// limit is the number of frames we should drop between each kept frame
// to keep our drop ratio. limit is positive in this case.
- float denom = 1.0f - _dropRatio.Value();
+ float denom = 1.0f - _dropRatio.filtered();
if (denom < 1e-5)
{
denom = (float)1e-5;
@@ -252,7 +255,7 @@
if (_dropCount < 0)
{
// Reset the _dropCount since it was negative and should be positive.
- if (_dropRatio.Value() > 0.4f)
+ if (_dropRatio.filtered() > 0.4f)
{
_dropCount = -_dropCount;
}
@@ -274,12 +277,13 @@
return false;
}
}
- else if (_dropRatio.Value() > 0.0f && _dropRatio.Value() < 0.5f) // Keeps per drop
+ else if (_dropRatio.filtered() > 0.0f &&
+ _dropRatio.filtered() < 0.5f) // Keeps per drop
{
// limit is the number of frames we should keep between each drop
// in order to keep the drop ratio. limit is negative in this case,
// and the _dropCount is also negative.
- float denom = _dropRatio.Value();
+ float denom = _dropRatio.filtered();
if (denom < 1e-5)
{
denom = (float)1e-5;
@@ -289,7 +293,7 @@
{
// Reset the _dropCount since we have a positive
// _dropCount, and it should be negative.
- if (_dropRatio.Value() < 0.6f)
+ if (_dropRatio.filtered() < 0.6f)
{
_dropCount = -_dropCount;
}
@@ -350,7 +354,7 @@
{
return static_cast<float>(inputFrameRate);
}
- return inputFrameRate * (1.0f - _dropRatio.Value());
+ return inputFrameRate * (1.0f - _dropRatio.filtered());
}
// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
diff --git a/modules/video_coding/utility/include/exp_filter.h b/modules/video_coding/utility/include/exp_filter.h
deleted file mode 100644
index d8c37a3..0000000
--- a/modules/video_coding/utility/include/exp_filter.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_EXP_FILTER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_EXP_FILTER_H_
-
-namespace webrtc
-{
-
-/**********************/
-/* ExpFilter class */
-/**********************/
-
-class VCMExpFilter
-{
-public:
- VCMExpFilter(float alpha, float max = -1.0) : _alpha(alpha), _filtered(-1.0), _max(max) {}
-
- // Resets the filter to its initial state, and resets alpha to the given value
- //
- // Input:
- // - alpha : the new value of the filter factor base.
- void Reset(float alpha);
-
- // Applies the filter with the given exponent on the provided sample
- //
- // Input:
- // - exp : Exponent T in y(k) = alpha^T * y(k-1) + (1 - alpha^T) * x(k)
- // - sample : x(k) in the above filter equation
- float Apply(float exp, float sample);
-
- // Return current filtered value: y(k)
- //
- // Return value : The current filter output
- float Value() const;
-
- // Change the filter factor base
- //
- // Input:
- // - alpha : The new filter factor base.
- void UpdateBase(float alpha);
-
-private:
- float _alpha; // Filter factor base
- float _filtered; // Current filter output
- const float _max;
-}; // end of ExpFilter class
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_EXP_FILTER_H_
diff --git a/modules/video_coding/utility/include/frame_dropper.h b/modules/video_coding/utility/include/frame_dropper.h
index 4c1c168..8eebd78 100644
--- a/modules/video_coding/utility/include/frame_dropper.h
+++ b/modules/video_coding/utility/include/frame_dropper.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
-#include "webrtc/modules/video_coding/utility/include/exp_filter.h"
+#include "webrtc/base/exp_filter.h"
#include "webrtc/typedefs.h"
namespace webrtc
@@ -72,23 +72,23 @@
void UpdateRatio();
void CapAccumulator();
- VCMExpFilter _keyFrameSizeAvgKbits;
- VCMExpFilter _keyFrameRatio;
- float _keyFrameSpreadFrames;
- int32_t _keyFrameCount;
- float _accumulator;
- float _accumulatorMax;
- float _targetBitRate;
- bool _dropNext;
- VCMExpFilter _dropRatio;
- int32_t _dropCount;
- float _windowSize;
- float _incoming_frame_rate;
- bool _wasBelowMax;
- bool _enabled;
- bool _fastMode;
- float _cap_buffer_size;
- float _max_time_drops;
+ rtc::ExpFilter _keyFrameSizeAvgKbits;
+ rtc::ExpFilter _keyFrameRatio;
+ float _keyFrameSpreadFrames;
+ int32_t _keyFrameCount;
+ float _accumulator;
+ float _accumulatorMax;
+ float _targetBitRate;
+ bool _dropNext;
+ rtc::ExpFilter _dropRatio;
+ int32_t _dropCount;
+ float _windowSize;
+ float _incoming_frame_rate;
+ bool _wasBelowMax;
+ bool _enabled;
+ bool _fastMode;
+ float _cap_buffer_size;
+ float _max_time_drops;
}; // end of VCMFrameDropper class
} // namespace webrtc
diff --git a/modules/video_coding/utility/video_coding_utility.gyp b/modules/video_coding/utility/video_coding_utility.gyp
index 24f8880..2f0202b 100644
--- a/modules/video_coding/utility/video_coding_utility.gyp
+++ b/modules/video_coding/utility/video_coding_utility.gyp
@@ -18,9 +18,7 @@
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
],
'sources': [
- 'include/exp_filter.h',
'include/frame_dropper.h',
- 'exp_filter.cc',
'frame_dropper.cc',
],
},
diff --git a/modules/video_processing/BUILD.gn b/modules/video_processing/BUILD.gn
index 724a9dc..f9412f9 100644
--- a/modules/video_processing/BUILD.gn
+++ b/modules/video_processing/BUILD.gn
@@ -25,8 +25,6 @@
"main/source/content_analysis.h",
"main/source/deflickering.cc",
"main/source/deflickering.h",
- "main/source/denoising.cc",
- "main/source/denoising.h",
"main/source/frame_preprocessor.cc",
"main/source/frame_preprocessor.h",
"main/source/spatial_resampler.cc",
diff --git a/modules/video_processing/OWNERS b/modules/video_processing/OWNERS
index d5ae847..037de93 100644
--- a/modules/video_processing/OWNERS
+++ b/modules/video_processing/OWNERS
@@ -1 +1,6 @@
+stefan@webrtc.org
+mikhal@webrtc.org
+marpan@webrtc.org
+henrik.lundin@webrtc.org
+
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/modules/video_processing/main/OWNERS b/modules/video_processing/main/OWNERS
deleted file mode 100644
index 7183cf2..0000000
--- a/modules/video_processing/main/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-stefan@webrtc.org
-mikhal@webrtc.org
-marpan@webrtc.org
-henrik.lundin@webrtc.org
diff --git a/modules/video_processing/main/interface/video_processing.h b/modules/video_processing/main/interface/video_processing.h
index 817d43d..4df8812 100644
--- a/modules/video_processing/main/interface/video_processing.h
+++ b/modules/video_processing/main/interface/video_processing.h
@@ -177,17 +177,6 @@
virtual int32_t Deflickering(I420VideoFrame* frame, FrameStats* stats) = 0;
/**
- Denoises a video frame. Every frame from the stream should be passed in.
- Has a fixed-point implementation.
-
- \param[in,out] frame
- Pointer to the video frame.
-
- \return The number of modified pixels on success, -1 on failure.
- */
- virtual int32_t Denoising(I420VideoFrame* frame) = 0;
-
- /**
Detects if a video frame is excessively bright or dark. Returns a
warning if this is the case. Multiple frames should be passed in before
expecting a warning. Has a floating-point implementation.
diff --git a/modules/video_processing/main/source/Android.mk b/modules/video_processing/main/source/Android.mk
index 829fa96..62eb387 100644
--- a/modules/video_processing/main/source/Android.mk
+++ b/modules/video_processing/main/source/Android.mk
@@ -23,7 +23,6 @@
color_enhancement.cc \
content_analysis.cc \
deflickering.cc \
- denoising.cc \
frame_preprocessor.cc \
spatial_resampler.cc \
video_decimator.cc \
diff --git a/modules/video_processing/main/source/denoising.cc b/modules/video_processing/main/source/denoising.cc
deleted file mode 100644
index 4c8dcb4..0000000
--- a/modules/video_processing/main/source/denoising.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_processing/main/source/denoising.h"
-
-#include <string.h>
-
-namespace webrtc {
-// Down-sampling in time (unit: number of frames)
-enum { kSubsamplingTime = 0 };
-// Sub-sampling in width (unit: power of 2.
-enum { kSubsamplingWidth = 0 };
-// Sub-sampling in height (unit: power of 2)
-enum { kSubsamplingHeight = 0 };
-// (Q8) De-noising filter parameter
-enum { kDenoiseFiltParam = 179 };
-// (Q8) 1 - filter parameter
-enum { kDenoiseFiltParamRec = 77 };
-// (Q8) De-noising threshold level
-enum { kDenoiseThreshold = 19200 };
-
-VPMDenoising::VPMDenoising()
- : id_(0),
- moment1_(NULL),
- moment2_(NULL) {
- Reset();
-}
-
-VPMDenoising::~VPMDenoising() {
- if (moment1_) {
- delete [] moment1_;
- moment1_ = NULL;
-}
-
- if (moment2_) {
- delete [] moment2_;
- moment2_ = NULL;
- }
-}
-
-int32_t VPMDenoising::ChangeUniqueId(const int32_t id) {
- id_ = id;
- return VPM_OK;
-}
-
-void VPMDenoising::Reset() {
- frame_size_ = 0;
- denoise_frame_cnt_ = 0;
-
- if (moment1_) {
- delete [] moment1_;
- moment1_ = NULL;
- }
-
- if (moment2_) {
- delete [] moment2_;
- moment2_ = NULL;
- }
-}
-
-int32_t VPMDenoising::ProcessFrame(I420VideoFrame* frame) {
- assert(frame);
- int32_t thevar;
- int k;
- int jsub, ksub;
- int32_t diff0;
- uint32_t tmp_moment1;
- uint32_t tmp_moment2;
- uint32_t tmp;
- int32_t num_pixels_changed = 0;
-
- if (frame->IsZeroSize()) {
- return VPM_GENERAL_ERROR;
- }
-
- int width = frame->width();
- int height = frame->height();
-
- /* Size of luminance component */
- const uint32_t y_size = height * width;
-
- /* Initialization */
- if (y_size != frame_size_) {
- delete [] moment1_;
- moment1_ = NULL;
-
- delete [] moment2_;
- moment2_ = NULL;
- }
- frame_size_ = y_size;
-
- if (!moment1_) {
- moment1_ = new uint32_t[y_size];
- memset(moment1_, 0, sizeof(uint32_t)*y_size);
- }
-
- if (!moment2_) {
- moment2_ = new uint32_t[y_size];
- memset(moment2_, 0, sizeof(uint32_t)*y_size);
- }
-
- /* Apply de-noising on each pixel, but update variance sub-sampled */
- uint8_t* buffer = frame->buffer(kYPlane);
- for (int i = 0; i < height; i++) { // Collect over height
- k = i * width;
- ksub = ((i >> kSubsamplingHeight) << kSubsamplingHeight) * width;
- for (int j = 0; j < width; j++) { // Collect over width
- jsub = ((j >> kSubsamplingWidth) << kSubsamplingWidth);
- /* Update mean value for every pixel and every frame */
- tmp_moment1 = moment1_[k + j];
- tmp_moment1 *= kDenoiseFiltParam; // Q16
- tmp_moment1 += ((kDenoiseFiltParamRec * ((uint32_t)buffer[k + j])) << 8);
- tmp_moment1 >>= 8; // Q8
- moment1_[k + j] = tmp_moment1;
-
- tmp_moment2 = moment2_[ksub + jsub];
- if ((ksub == k) && (jsub == j) && (denoise_frame_cnt_ == 0)) {
- tmp = ((uint32_t)buffer[k + j] *
- (uint32_t)buffer[k + j]);
- tmp_moment2 *= kDenoiseFiltParam; // Q16
- tmp_moment2 += ((kDenoiseFiltParamRec * tmp) << 8);
- tmp_moment2 >>= 8; // Q8
- }
- moment2_[k + j] = tmp_moment2;
- /* Current event = deviation from mean value */
- diff0 = ((int32_t)buffer[k + j] << 8) - moment1_[k + j];
- /* Recent events = variance (variations over time) */
- thevar = moment2_[k + j];
- thevar -= ((moment1_[k + j] * moment1_[k + j]) >> 8);
- // De-noising criteria, i.e., when should we replace a pixel by its mean.
- // 1) recent events are minor.
- // 2) current events are minor.
- if ((thevar < kDenoiseThreshold)
- && ((diff0 * diff0 >> 8) < kDenoiseThreshold)) {
- // Replace with mean.
- buffer[k + j] = (uint8_t)(moment1_[k + j] >> 8);
- num_pixels_changed++;
- }
- }
- }
-
- denoise_frame_cnt_++;
- if (denoise_frame_cnt_ > kSubsamplingTime)
- denoise_frame_cnt_ = 0;
-
- return num_pixels_changed;
-}
-
-} // namespace
diff --git a/modules/video_processing/main/source/denoising.h b/modules/video_processing/main/source/denoising.h
deleted file mode 100644
index 60645fb..0000000
--- a/modules/video_processing/main/source/denoising.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_DENOISING_H_
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_DENOISING_H_
-
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class VPMDenoising {
- public:
- VPMDenoising();
- ~VPMDenoising();
-
- int32_t ChangeUniqueId(int32_t id);
-
- void Reset();
-
- int32_t ProcessFrame(I420VideoFrame* frame);
-
- private:
- int32_t id_;
-
- uint32_t* moment1_; // (Q8) First order moment (mean).
- uint32_t* moment2_; // (Q8) Second order moment.
- uint32_t frame_size_; // Size (# of pixels) of frame.
- int denoise_frame_cnt_; // Counter for subsampling in time.
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_DENOISING_H_
-
diff --git a/modules/video_processing/main/source/video_processing.gypi b/modules/video_processing/main/source/video_processing.gypi
index 7a0279e..f62aa18 100644
--- a/modules/video_processing/main/source/video_processing.gypi
+++ b/modules/video_processing/main/source/video_processing.gypi
@@ -31,8 +31,6 @@
'content_analysis.h',
'deflickering.cc',
'deflickering.h',
- 'denoising.cc',
- 'denoising.h',
'frame_preprocessor.cc',
'frame_preprocessor.h',
'spatial_resampler.cc',
diff --git a/modules/video_processing/main/source/video_processing_impl.cc b/modules/video_processing/main/source/video_processing_impl.cc
index 3560030..8bc5bf0 100644
--- a/modules/video_processing/main/source/video_processing_impl.cc
+++ b/modules/video_processing/main/source/video_processing_impl.cc
@@ -51,7 +51,6 @@
id_ = id;
brightness_detection_.ChangeUniqueId(id);
deflickering_.ChangeUniqueId(id);
- denoising_.ChangeUniqueId(id);
frame_pre_processor_.ChangeUniqueId(id);
return VPM_OK;
}
@@ -66,7 +65,6 @@
mutex_(*CriticalSectionWrapper::CreateCriticalSection()) {
brightness_detection_.ChangeUniqueId(id);
deflickering_.ChangeUniqueId(id);
- denoising_.ChangeUniqueId(id);
frame_pre_processor_.ChangeUniqueId(id);
}
@@ -77,7 +75,6 @@
void VideoProcessingModuleImpl::Reset() {
CriticalSectionScoped mutex(&mutex_);
deflickering_.Reset();
- denoising_.Reset();
brightness_detection_.Reset();
frame_pre_processor_.Reset();
}
@@ -146,11 +143,6 @@
return deflickering_.ProcessFrame(frame, stats);
}
-int32_t VideoProcessingModuleImpl::Denoising(I420VideoFrame* frame) {
- CriticalSectionScoped mutex(&mutex_);
- return denoising_.ProcessFrame(frame);
-}
-
int32_t VideoProcessingModuleImpl::BrightnessDetection(
const I420VideoFrame& frame,
const FrameStats& stats) {
diff --git a/modules/video_processing/main/source/video_processing_impl.h b/modules/video_processing/main/source/video_processing_impl.h
index deae6ff..6fe617d 100644
--- a/modules/video_processing/main/source/video_processing_impl.h
+++ b/modules/video_processing/main/source/video_processing_impl.h
@@ -16,7 +16,6 @@
#include "webrtc/modules/video_processing/main/source/brightness_detection.h"
#include "webrtc/modules/video_processing/main/source/color_enhancement.h"
#include "webrtc/modules/video_processing/main/source/deflickering.h"
-#include "webrtc/modules/video_processing/main/source/denoising.h"
#include "webrtc/modules/video_processing/main/source/frame_preprocessor.h"
namespace webrtc {
@@ -36,8 +35,6 @@
virtual int32_t Deflickering(I420VideoFrame* frame, FrameStats* stats);
- virtual int32_t Denoising(I420VideoFrame* frame);
-
virtual int32_t BrightnessDetection(const I420VideoFrame& frame,
const FrameStats& stats);
@@ -74,7 +71,6 @@
int32_t id_;
CriticalSectionWrapper& mutex_;
VPMDeflickering deflickering_;
- VPMDenoising denoising_;
VPMBrightnessDetection brightness_detection_;
VPMFramePreprocessor frame_pre_processor_;
};
diff --git a/modules/video_processing/main/test/unit_test/denoising_test.cc b/modules/video_processing/main/test/unit_test/denoising_test.cc
deleted file mode 100644
index c00db6a..0000000
--- a/modules/video_processing/main/test/unit_test/denoising_test.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/system_wrappers/interface/tick_util.h"
-#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-namespace webrtc {
-
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_ANDROID(Denoising))
-{
- enum { NumRuns = 10 };
- uint32_t frameNum = 0;
-
- int64_t min_runtime = 0;
- int64_t avg_runtime = 0;
-
- const std::string denoise_filename =
- webrtc::test::OutputPath() + "denoise_testfile.yuv";
- FILE* denoiseFile = fopen(denoise_filename.c_str(), "wb");
- ASSERT_TRUE(denoiseFile != NULL) <<
- "Could not open output file: " << denoise_filename << "\n";
-
- const std::string noise_filename =
- webrtc::test::OutputPath() + "noise_testfile.yuv";
- FILE* noiseFile = fopen(noise_filename.c_str(), "wb");
- ASSERT_TRUE(noiseFile != NULL) <<
- "Could not open noisy file: " << noise_filename << "\n";
-
- printf("\nRun time [us / frame]:\n");
- for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++)
- {
- TickTime t0;
- TickTime t1;
- TickInterval acc_ticks;
- int32_t modifiedPixels = 0;
-
- frameNum = 0;
- scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0,
- width_, height_,
- 0, kRotateNone, &video_frame_));
- frameNum++;
- uint8_t* sourceBuffer = video_frame_.buffer(kYPlane);
-
- // Add noise to a part in video stream
- // Random noise
- // TODO: investigate the effectiveness of this test.
-
- for (int ir = 0; ir < height_; ir++)
- {
- uint32_t ik = ir * width_;
- for (int ic = 0; ic < width_; ic++)
- {
- uint8_t r = rand() % 16;
- r -= 8;
- if (ir < height_ / 4)
- r = 0;
- if (ir >= 3 * height_ / 4)
- r = 0;
- if (ic < width_ / 4)
- r = 0;
- if (ic >= 3 * width_ / 4)
- r = 0;
-
- /*uint8_t pixelValue = 0;
- if (ir >= height_ / 2)
- { // Region 3 or 4
- pixelValue = 170;
- }
- if (ic >= width_ / 2)
- { // Region 2 or 4
- pixelValue += 85;
- }
- pixelValue += r;
- sourceBuffer[ik + ic] = pixelValue;
- */
- sourceBuffer[ik + ic] += r;
- }
- }
-
- if (run_idx == 0)
- {
- if (PrintI420VideoFrame(video_frame_, noiseFile) < 0) {
- return;
- }
- }
-
- t0 = TickTime::Now();
- ASSERT_GE(modifiedPixels = vpm_->Denoising(&video_frame_), 0);
- t1 = TickTime::Now();
- acc_ticks += (t1 - t0);
-
- if (run_idx == 0)
- {
- if (PrintI420VideoFrame(video_frame_, noiseFile) < 0) {
- return;
- }
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
- if (acc_ticks.Microseconds() < min_runtime || run_idx == 0)
- {
- min_runtime = acc_ticks.Microseconds();
- }
- avg_runtime += acc_ticks.Microseconds();
-
- rewind(source_file_);
- }
- ASSERT_EQ(0, fclose(denoiseFile));
- ASSERT_EQ(0, fclose(noiseFile));
- printf("\nAverage run time = %d us / frame\n",
- static_cast<int>(avg_runtime / frameNum / NumRuns));
- printf("Min run time = %d us / frame\n\n",
- static_cast<int>(min_runtime / frameNum));
-}
-
-} // namespace webrtc
diff --git a/modules/video_processing/main/test/unit_test/video_processing_unittest.cc b/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
index 973552c..b124503 100644
--- a/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
+++ b/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
@@ -82,8 +82,6 @@
EXPECT_EQ(-1, vpm_->Deflickering(&videoFrame, &stats));
- EXPECT_EQ(-1, vpm_->Denoising(&videoFrame));
-
EXPECT_EQ(-3, vpm_->BrightnessDetection(videoFrame, stats));
}
@@ -113,8 +111,6 @@
EXPECT_EQ(-1, vpm_->Deflickering(&video_frame_, &stats));
- EXPECT_EQ(-1, vpm_->Denoising(&video_frame_));
-
EXPECT_EQ(-3, vpm_->BrightnessDetection(video_frame_, stats));
EXPECT_EQ(VPM_PARAMETER_ERROR, vpm_->SetTargetResolution(0,0,0));
@@ -145,19 +141,6 @@
ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
source_file_));
- // Using ConvertToI420 to add stride to the image.
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0,
- width_, height_,
- 0, kRotateNone, &video_frame_));
- video_frame2.CopyFrame(video_frame_);
- EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
- ASSERT_GE(vpm_->Denoising(&video_frame_), 0);
- vpm_->Reset();
- ASSERT_GE(vpm_->Denoising(&video_frame2), 0);
- EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
-
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0,
width_, height_,
0, kRotateNone, &video_frame_));
diff --git a/system_wrappers/source/thread_unittest.cc b/system_wrappers/source/thread_unittest.cc
index 25095e6..f54d065 100644
--- a/system_wrappers/source/thread_unittest.cc
+++ b/system_wrappers/source/thread_unittest.cc
@@ -12,11 +12,13 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/sleep.h"
namespace webrtc {
// Function that does nothing, and reports success.
bool NullRunFunction(void* obj) {
+ SleepMs(0); // Hand over timeslice, prevents busy looping.
return true;
}
@@ -32,6 +34,7 @@
bool SetFlagRunFunction(void* obj) {
bool* obj_as_bool = static_cast<bool*>(obj);
*obj_as_bool = true;
+ SleepMs(0); // Hand over timeslice, prevents busy looping.
return true;
}
diff --git a/video/call.cc b/video/call.cc
index 95e1c7b..bcce6f0 100644
--- a/video/call.cc
+++ b/video/call.cc
@@ -33,6 +33,12 @@
const char* RtpExtension::kTOffset = "urn:ietf:params:rtp-hdrext:toffset";
const char* RtpExtension::kAbsSendTime =
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time";
+
+bool RtpExtension::IsSupported(const std::string& name) {
+ return name == webrtc::RtpExtension::kTOffset ||
+ name == webrtc::RtpExtension::kAbsSendTime;
+}
+
namespace internal {
class CpuOveruseObserverProxy : public webrtc::CpuOveruseObserver {
diff --git a/video/end_to_end_tests.cc b/video/end_to_end_tests.cc
index 3cb9624..3b5012d 100644
--- a/video/end_to_end_tests.cc
+++ b/video/end_to_end_tests.cc
@@ -1539,7 +1539,7 @@
TestSendsSetSsrcs(kNumSsrcs, true);
}
-TEST_F(EndToEndTest, RedundantPayloadsTransmittedOnAllSsrcs) {
+TEST_F(EndToEndTest, DISABLED_RedundantPayloadsTransmittedOnAllSsrcs) {
class ObserveRedundantPayloads: public test::EndToEndTest {
public:
ObserveRedundantPayloads()
diff --git a/video/rampup_tests.cc b/video/rampup_tests.cc
index af3be86..e1dd95a 100644
--- a/video/rampup_tests.cc
+++ b/video/rampup_tests.cc
@@ -99,7 +99,6 @@
// start bitrate, but due to the BWE implementation we can't guarantee the
// first estimate really is as high as the start bitrate.
EXPECT_GT(bitrate, 0.9 * start_bitrate_bps_);
- EXPECT_LT(bitrate, expected_bitrate_bps_);
start_bitrate_bps_ = 0;
}
if (bitrate >= expected_bitrate_bps_) {
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index a11a54c..76739a3 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -1436,4 +1436,48 @@
RunBaseTest(&test);
}
+TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) {
+ class RtcpByeTest : public test::SendTest {
+ public:
+ RtcpByeTest() : SendTest(kDefaultTimeoutMs), media_bytes_sent_(0) {}
+
+ private:
+ virtual Action OnSendRtp(const uint8_t* packet, size_t length) OVERRIDE {
+ RTPHeader header;
+ EXPECT_TRUE(parser_->Parse(packet, length, &header));
+ media_bytes_sent_ += length - header.headerLength - header.paddingLength;
+ return SEND_PACKET;
+ }
+
+ virtual Action OnSendRtcp(const uint8_t* packet, size_t length) OVERRIDE {
+ RTCPUtility::RTCPParserV2 parser(packet, length, true);
+ EXPECT_TRUE(parser.IsValid());
+
+ RTCPUtility::RTCPPacketTypes packet_type = parser.Begin();
+ uint32_t sender_octet_count = 0;
+ while (packet_type != RTCPUtility::kRtcpNotValidCode) {
+ if (packet_type == RTCPUtility::kRtcpSrCode) {
+ sender_octet_count = parser.Packet().SR.SenderOctetCount;
+ EXPECT_EQ(sender_octet_count, media_bytes_sent_);
+ if (sender_octet_count > 0)
+ observation_complete_->Set();
+ }
+
+ packet_type = parser.Iterate();
+ }
+
+ return SEND_PACKET;
+ }
+
+ virtual void PerformTest() OVERRIDE {
+ EXPECT_EQ(kEventSignaled, Wait())
+ << "Timed out while waiting for RTCP sender report.";
+ }
+
+ size_t media_bytes_sent_;
+ } test;
+
+ RunBaseTest(&test);
+}
+
} // namespace webrtc
diff --git a/video_engine/include/vie_errors.h b/video_engine/include/vie_errors.h
index 1e9be1d..24aa098 100644
--- a/video_engine/include/vie_errors.h
+++ b/video_engine/include/vie_errors.h
@@ -103,8 +103,8 @@
kViEImageProcessInvalidCaptureId, // No capture device exist with the provided capture id.
kViEImageProcessFilterExists, // RegisterCaptureEffectFilter,RegisterSendEffectFilter,RegisterRenderEffectFilter - Effect filter already registered.
kViEImageProcessFilterDoesNotExist, // DeRegisterCaptureEffectFilter,DeRegisterSendEffectFilter,DeRegisterRenderEffectFilter - Effect filter not registered.
- kViEImageProcessAlreadyEnabled, // EnableDeflickering,EnableDenoising,EnableColorEnhancement- Function already enabled.
- kViEImageProcessAlreadyDisabled, // EnableDeflickering,EnableDenoising,EnableColorEnhancement- Function already disabled.
+ kViEImageProcessAlreadyEnabled, // EnableDeflickering,EnableColorEnhancement- Function already enabled.
+ kViEImageProcessAlreadyDisabled, // EnableDeflickering,EnableColorEnhancement- Function already disabled.
kViEImageProcessUnknownError // An unknown error has occurred. Check the log file.
};
diff --git a/video_engine/include/vie_image_process.h b/video_engine/include/vie_image_process.h
index e24e98f..8bb895f 100644
--- a/video_engine/include/vie_image_process.h
+++ b/video_engine/include/vie_image_process.h
@@ -11,7 +11,6 @@
// This sub-API supports the following functionalities:
// - Effect filters
// - Deflickering
-// - Denoising
// - Color enhancement
#ifndef WEBRTC_VIDEO_ENGINE_INCLUDE_VIE_IMAGE_PROCESS_H_
@@ -85,9 +84,10 @@
// not all of them succeed. Enabling this function will remove the flicker.
virtual int EnableDeflickering(const int capture_id, const bool enable) = 0;
- // Some cameras produce very noisy captured images, especially in low‐light
- // conditions. This functionality will reduce the camera noise.
- virtual int EnableDenoising(const int capture_id, const bool enable) = 0;
+ // TODO(pbos): Remove this function when removed from fakewebrtcvideoengine.h.
+ virtual int EnableDenoising(const int capture_id, const bool enable) {
+ return -1;
+ }
// This function enhances the colors on the decoded video stream, enabled by
// default.
diff --git a/video_engine/overuse_frame_detector.cc b/video_engine/overuse_frame_detector.cc
index 764c258..6efb4be 100644
--- a/video_engine/overuse_frame_detector.cc
+++ b/video_engine/overuse_frame_detector.cc
@@ -17,7 +17,7 @@
#include <list>
#include <map>
-#include "webrtc/modules/video_coding/utility/include/exp_filter.h"
+#include "webrtc/base/exp_filter.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -54,8 +54,8 @@
Statistics::Statistics() :
sum_(0.0),
count_(0),
- filtered_samples_(new VCMExpFilter(kWeightFactorMean)),
- filtered_variance_(new VCMExpFilter(kWeightFactor)) {
+ filtered_samples_(new rtc::ExpFilter(kWeightFactorMean)),
+ filtered_variance_(new rtc::ExpFilter(kWeightFactor)) {
Reset();
}
@@ -84,8 +84,8 @@
float exp = sample_ms / kSampleDiffMs;
exp = std::min(exp, kMaxExp);
filtered_samples_->Apply(exp, sample_ms);
- filtered_variance_->Apply(exp, (sample_ms - filtered_samples_->Value()) *
- (sample_ms - filtered_samples_->Value()));
+ filtered_variance_->Apply(exp, (sample_ms - filtered_samples_->filtered()) *
+ (sample_ms - filtered_samples_->filtered()));
}
float Statistics::InitialMean() const {
@@ -101,10 +101,10 @@
return average_stddev * average_stddev;
}
-float Statistics::Mean() const { return filtered_samples_->Value(); }
+float Statistics::Mean() const { return filtered_samples_->filtered(); }
float Statistics::StdDev() const {
- return sqrt(std::max(filtered_variance_->Value(), 0.0f));
+ return sqrt(std::max(filtered_variance_->filtered(), 0.0f));
}
uint64_t Statistics::Count() const { return count_; }
@@ -116,7 +116,7 @@
EncodeTimeAvg()
: kWeightFactor(0.5f),
kInitialAvgEncodeTimeMs(5.0f),
- filtered_encode_time_ms_(new VCMExpFilter(kWeightFactor)) {
+ filtered_encode_time_ms_(new rtc::ExpFilter(kWeightFactor)) {
filtered_encode_time_ms_->Apply(1.0f, kInitialAvgEncodeTimeMs);
}
~EncodeTimeAvg() {}
@@ -128,13 +128,13 @@
}
int Value() const {
- return static_cast<int>(filtered_encode_time_ms_->Value() + 0.5);
+ return static_cast<int>(filtered_encode_time_ms_->filtered() + 0.5);
}
private:
const float kWeightFactor;
const float kInitialAvgEncodeTimeMs;
- scoped_ptr<VCMExpFilter> filtered_encode_time_ms_;
+ scoped_ptr<rtc::ExpFilter> filtered_encode_time_ms_;
};
// Class for calculating the encode usage.
@@ -146,8 +146,8 @@
kInitialSampleDiffMs(40.0f),
kMaxSampleDiffMs(45.0f),
count_(0),
- filtered_encode_time_ms_(new VCMExpFilter(kWeightFactorEncodeTime)),
- filtered_frame_diff_ms_(new VCMExpFilter(kWeightFactorFrameDiff)) {
+ filtered_encode_time_ms_(new rtc::ExpFilter(kWeightFactorEncodeTime)),
+ filtered_frame_diff_ms_(new rtc::ExpFilter(kWeightFactorFrameDiff)) {
Reset();
}
~EncodeUsage() {}
@@ -181,10 +181,10 @@
if (count_ < static_cast<uint32_t>(options_.min_frame_samples)) {
return static_cast<int>(InitialUsageInPercent() + 0.5f);
}
- float frame_diff_ms = std::max(filtered_frame_diff_ms_->Value(), 1.0f);
+ float frame_diff_ms = std::max(filtered_frame_diff_ms_->filtered(), 1.0f);
frame_diff_ms = std::min(frame_diff_ms, kMaxSampleDiffMs);
float encode_usage_percent =
- 100.0f * filtered_encode_time_ms_->Value() / frame_diff_ms;
+ 100.0f * filtered_encode_time_ms_->filtered() / frame_diff_ms;
return static_cast<int>(encode_usage_percent + 0.5);
}
@@ -205,8 +205,8 @@
const float kMaxSampleDiffMs;
uint64_t count_;
CpuOveruseOptions options_;
- scoped_ptr<VCMExpFilter> filtered_encode_time_ms_;
- scoped_ptr<VCMExpFilter> filtered_frame_diff_ms_;
+ scoped_ptr<rtc::ExpFilter> filtered_encode_time_ms_;
+ scoped_ptr<rtc::ExpFilter> filtered_frame_diff_ms_;
};
// Class for calculating the relative standard deviation of encode times.
@@ -215,7 +215,7 @@
EncodeTimeRsd(Clock* clock)
: kWeightFactor(0.6f),
count_(0),
- filtered_rsd_(new VCMExpFilter(kWeightFactor)),
+ filtered_rsd_(new rtc::ExpFilter(kWeightFactor)),
hist_samples_(0),
hist_sum_(0.0f),
last_process_time_ms_(clock->TimeInMilliseconds()) {
@@ -294,7 +294,7 @@
}
int Value() const {
- return static_cast<int>(filtered_rsd_->Value() + 0.5);
+ return static_cast<int>(filtered_rsd_->filtered() + 0.5);
}
private:
@@ -307,7 +307,7 @@
const float kWeightFactor;
uint32_t count_; // Number of encode samples since last reset.
CpuOveruseOptions options_;
- scoped_ptr<VCMExpFilter> filtered_rsd_;
+ scoped_ptr<rtc::ExpFilter> filtered_rsd_;
int hist_samples_;
float hist_sum_;
std::map<int,int> hist_; // Histogram of encode time of frames.
@@ -320,7 +320,7 @@
CaptureQueueDelay()
: kWeightFactor(0.5f),
delay_ms_(0),
- filtered_delay_ms_per_s_(new VCMExpFilter(kWeightFactor)) {
+ filtered_delay_ms_per_s_(new rtc::ExpFilter(kWeightFactor)) {
filtered_delay_ms_per_s_->Apply(1.0f, 0.0f);
}
~CaptureQueueDelay() {}
@@ -361,14 +361,14 @@
}
int Value() const {
- return static_cast<int>(filtered_delay_ms_per_s_->Value() + 0.5);
+ return static_cast<int>(filtered_delay_ms_per_s_->filtered() + 0.5);
}
private:
const float kWeightFactor;
std::list<int64_t> frames_;
int delay_ms_;
- scoped_ptr<VCMExpFilter> filtered_delay_ms_per_s_;
+ scoped_ptr<rtc::ExpFilter> filtered_delay_ms_per_s_;
};
OveruseFrameDetector::OveruseFrameDetector(Clock* clock)
diff --git a/video_engine/overuse_frame_detector.h b/video_engine/overuse_frame_detector.h
index efd23dc..df3c1a0 100644
--- a/video_engine/overuse_frame_detector.h
+++ b/video_engine/overuse_frame_detector.h
@@ -12,6 +12,7 @@
#define WEBRTC_VIDEO_ENGINE_OVERUSE_FRAME_DETECTOR_H_
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/exp_filter.h"
#include "webrtc/modules/interface/module.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/video_engine/include/vie_base.h"
@@ -21,7 +22,6 @@
class Clock;
class CpuOveruseObserver;
class CriticalSectionWrapper;
-class VCMExpFilter;
// TODO(pbos): Move this somewhere appropriate.
class Statistics {
@@ -43,8 +43,8 @@
float sum_;
uint64_t count_;
CpuOveruseOptions options_;
- scoped_ptr<VCMExpFilter> filtered_samples_;
- scoped_ptr<VCMExpFilter> filtered_variance_;
+ scoped_ptr<rtc::ExpFilter> filtered_samples_;
+ scoped_ptr<rtc::ExpFilter> filtered_variance_;
};
// Use to detect system overuse based on jitter in incoming frames.
diff --git a/video_engine/test/auto_test/source/vie_autotest_image_process.cc b/video_engine/test/auto_test/source/vie_autotest_image_process.cc
index 5424e92..c6f0a05 100644
--- a/video_engine/test/auto_test/source/vie_autotest_image_process.cc
+++ b/video_engine/test/auto_test/source/vie_autotest_image_process.cc
@@ -198,20 +198,6 @@
tbCapture.captureId, effectFilter));
//
- // Denoising
- //
- EXPECT_EQ(0, ViE.image_process->EnableDenoising(tbCapture.captureId, true));
- // If the denoising is already enabled, it will just reuturn 0.
- EXPECT_EQ(0, ViE.image_process->EnableDenoising(tbCapture.captureId, true));
- EXPECT_EQ(0, ViE.image_process->EnableDenoising(
- tbCapture.captureId, false));
- // If the denoising is already disabled, it will just reuturn 0.
- EXPECT_EQ(0, ViE.image_process->EnableDenoising(
- tbCapture.captureId, false));
- EXPECT_NE(0, ViE.image_process->EnableDenoising(
- tbChannel.videoChannel, true));
-
- //
// Deflickering
//
EXPECT_EQ(0, ViE.image_process->EnableDeflickering(
diff --git a/video_engine/vie_capturer.cc b/video_engine/vie_capturer.cc
index 30d6633..231dcfb 100644
--- a/video_engine/vie_capturer.cc
+++ b/video_engine/vie_capturer.cc
@@ -56,7 +56,6 @@
brightness_frame_stats_(NULL),
current_brightness_level_(Normal),
reported_brightness_level_(Normal),
- denoising_enabled_(false),
observer_cs_(CriticalSectionWrapper::CreateCriticalSection()),
observer_(NULL),
overuse_detector_(new OveruseFrameDetector(Clock::GetRealTimeClock())) {
@@ -404,28 +403,6 @@
return 0;
}
-int32_t ViECapturer::EnableDenoising(bool enable) {
- CriticalSectionScoped cs(deliver_cs_.get());
- if (enable) {
- if (denoising_enabled_) {
- // Already enabled, nothing need to be done.
- return 0;
- }
- denoising_enabled_ = true;
- if (IncImageProcRefCount() != 0) {
- return -1;
- }
- } else {
- if (denoising_enabled_ == false) {
- // Already disabled, nothing need to be done.
- return 0;
- }
- denoising_enabled_ = false;
- DecImageProcRefCount();
- }
- return 0;
-}
-
int32_t ViECapturer::EnableDeflickering(bool enable) {
CriticalSectionScoped cs(deliver_cs_.get());
if (enable) {
@@ -516,9 +493,6 @@
LOG_F(LS_ERROR) << "Could not get frame stats.";
}
}
- if (denoising_enabled_) {
- image_proc_module_->Denoising(video_frame);
- }
if (brightness_frame_stats_) {
if (image_proc_module_->GetFrameStats(brightness_frame_stats_,
*video_frame) == 0) {
diff --git a/video_engine/vie_capturer.h b/video_engine/vie_capturer.h
index 8e89357..2464ca0 100644
--- a/video_engine/vie_capturer.h
+++ b/video_engine/vie_capturer.h
@@ -94,7 +94,6 @@
// Effect filter.
int32_t RegisterEffectFilter(ViEEffectFilter* effect_filter);
- int32_t EnableDenoising(bool enable);
int32_t EnableDeflickering(bool enable);
int32_t EnableBrightnessAlarm(bool enable);
@@ -180,7 +179,6 @@
VideoProcessingModule::FrameStats* brightness_frame_stats_;
Brightness current_brightness_level_;
Brightness reported_brightness_level_;
- bool denoising_enabled_;
// Statistics observer.
scoped_ptr<CriticalSectionWrapper> observer_cs_;
diff --git a/video_engine/vie_encoder.cc b/video_engine/vie_encoder.cc
index a628188..ba7862a 100644
--- a/video_engine/vie_encoder.cc
+++ b/video_engine/vie_encoder.cc
@@ -322,7 +322,10 @@
current_send_codec.extra_options = NULL;
if (vcm_.RegisterSendCodec(¤t_send_codec, number_of_cores_,
max_data_payload_length) != VCM_OK) {
- return -1;
+ LOG(LS_INFO) << "De-registered the currently used external encoder ("
+ << static_cast<int>(pl_type) << ") and therefore tried to "
+ << "register the corresponding internal encoder, but none "
+ << "was supported.";
}
}
return 0;
diff --git a/video_engine/vie_image_process_impl.cc b/video_engine/vie_image_process_impl.cc
index d089c04..13e520a 100644
--- a/video_engine/vie_image_process_impl.cc
+++ b/video_engine/vie_image_process_impl.cc
@@ -182,29 +182,6 @@
return 0;
}
-int ViEImageProcessImpl::EnableDenoising(const int capture_id,
- const bool enable) {
- LOG_F(LS_INFO) << "capture_id: " << capture_id
- << " enable: " << (enable ? "on" : "off");
-
- ViEInputManagerScoped is(*(shared_data_->input_manager()));
- ViECapturer* vie_capture = is.Capture(capture_id);
- if (!vie_capture) {
- shared_data_->SetLastError(kViEImageProcessInvalidCaptureId);
- return -1;
- }
-
- if (vie_capture->EnableDenoising(enable) != 0) {
- if (enable) {
- shared_data_->SetLastError(kViEImageProcessAlreadyEnabled);
- } else {
- shared_data_->SetLastError(kViEImageProcessAlreadyDisabled);
- }
- return -1;
- }
- return 0;
-}
-
int ViEImageProcessImpl::EnableColorEnhancement(const int video_channel,
const bool enable) {
LOG_F(LS_INFO) << "video_channel: " << video_channel
diff --git a/video_engine/vie_image_process_impl.h b/video_engine/vie_image_process_impl.h
index 74a7ff0..38a6a08 100644
--- a/video_engine/vie_image_process_impl.h
+++ b/video_engine/vie_image_process_impl.h
@@ -35,7 +35,6 @@
ViEEffectFilter& render_filter);
virtual int DeregisterRenderEffectFilter(const int video_channel);
virtual int EnableDeflickering(const int capture_id, const bool enable);
- virtual int EnableDenoising(const int capture_id, const bool enable);
virtual int EnableColorEnhancement(const int video_channel,
const bool enable);
virtual void RegisterPreEncodeCallback(
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index 2d4ba57..af773e7 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -934,7 +934,8 @@
true)),
rtcp_bandwidth_observer_(
bitrate_controller_->CreateRtcpBandwidthObserver()),
- send_bitrate_observer_(new VoEBitrateObserver(this))
+ send_bitrate_observer_(new VoEBitrateObserver(this)),
+ network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock()))
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::Channel() - ctor");
@@ -1537,8 +1538,13 @@
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::OnNetworkChanged(bitrate_bps=%d, fration_lost=%d, rtt=%d)",
bitrate_bps, fraction_lost, rtt);
+ // |fraction_lost| from BitrateObserver is short time observation of packet
+ // loss rate from past. We use network predictor to make a more reasonable
+ // loss rate estimation.
+ network_predictor_->UpdatePacketLossRate(fraction_lost);
+ uint8_t loss_rate = network_predictor_->GetLossRate();
// Normalizes rate to 0 - 100.
- if (audio_coding_->SetPacketLossRate(100 * fraction_lost / 255) != 0) {
+ if (audio_coding_->SetPacketLossRate(100 * loss_rate / 255) != 0) {
_engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR,
kTraceError, "OnNetworkChanged() failed to set packet loss rate");
assert(false); // This should not happen.
diff --git a/voice_engine/channel.h b/voice_engine/channel.h
index 5cb2b9c..8385ccc 100644
--- a/voice_engine/channel.h
+++ b/voice_engine/channel.h
@@ -27,6 +27,7 @@
#include "webrtc/voice_engine/include/voe_audio_processing.h"
#include "webrtc/voice_engine/include/voe_network.h"
#include "webrtc/voice_engine/level_indicator.h"
+#include "webrtc/voice_engine/network_predictor.h"
#include "webrtc/voice_engine/shared_data.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
@@ -621,6 +622,7 @@
scoped_ptr<BitrateController> bitrate_controller_;
scoped_ptr<RtcpBandwidthObserver> rtcp_bandwidth_observer_;
scoped_ptr<BitrateObserver> send_bitrate_observer_;
+ scoped_ptr<NetworkPredictor> network_predictor_;
};
} // namespace voe
diff --git a/voice_engine/network_predictor.cc b/voice_engine/network_predictor.cc
new file mode 100644
index 0000000..4093877
--- /dev/null
+++ b/voice_engine/network_predictor.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/voice_engine/network_predictor.h"
+
+namespace webrtc {
+namespace voe {
+
+NetworkPredictor::NetworkPredictor(Clock* clock)
+ : clock_(clock),
+ last_loss_rate_update_time_ms_(clock_->TimeInMilliseconds()),
+ loss_rate_filter_(new rtc::ExpFilter(0.9999f)) {
+}
+
+void NetworkPredictor::UpdatePacketLossRate(uint8_t loss_rate) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ // Update the recursive average filter.
+ loss_rate_filter_->Apply(
+ static_cast<float>(now_ms - last_loss_rate_update_time_ms_),
+ static_cast<float>(loss_rate));
+ last_loss_rate_update_time_ms_ = now_ms;
+}
+
+uint8_t NetworkPredictor::GetLossRate() {
+ float value = loss_rate_filter_->filtered();
+ return (value == rtc::ExpFilter::kValueUndefined) ? 0 :
+ static_cast<uint8_t>(value + 0.5);
+}
+} // namespace voe
+} // namespace webrtc
diff --git a/voice_engine/network_predictor.h b/voice_engine/network_predictor.h
new file mode 100644
index 0000000..d9f0b7b
--- /dev/null
+++ b/voice_engine/network_predictor.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_NETWORK_PREDICTOR_H_
+#define WEBRTC_VOICE_ENGINE_NETWORK_PREDICTOR_H_
+
+#include "webrtc/base/exp_filter.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+
+namespace webrtc {
+
+namespace voe {
+
+// NetworkPredictor is to predict network conditions e.g., packet loss rate, for
+// sender and/or receiver to cope with changes in the network condition.
+class NetworkPredictor {
+ public:
+ explicit NetworkPredictor(Clock* clock);
+ ~NetworkPredictor() {}
+
+ // Gets the predicted packet loss rate.
+ uint8_t GetLossRate();
+
+ // Updates the packet loss rate predictor, on receiving a new observation of
+ // packet loss rate from past. Input packet loss rate should be in the
+ // interval [0, 255].
+ void UpdatePacketLossRate(uint8_t loss_rate);
+
+ private:
+ Clock* clock_;
+ int64_t last_loss_rate_update_time_ms_;
+
+ // An exponential filter is used to predict packet loss rate.
+ scoped_ptr<rtc::ExpFilter> loss_rate_filter_;
+};
+
+} // namespace voe
+} // namespace webrtc
+#endif // WEBRTC_VOICE_ENGINE_NETWORK_PREDICTOR_H_
diff --git a/voice_engine/network_predictor_unittest.cc b/voice_engine/network_predictor_unittest.cc
new file mode 100644
index 0000000..e399f68
--- /dev/null
+++ b/voice_engine/network_predictor_unittest.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/voice_engine/network_predictor.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+
+namespace webrtc {
+namespace voe {
+
+class TestNetworkPredictor : public ::testing::Test {
+ protected:
+ TestNetworkPredictor()
+ : clock_(0),
+ network_predictor_(new NetworkPredictor(&clock_)) {}
+ SimulatedClock clock_;
+ scoped_ptr<NetworkPredictor> network_predictor_;
+};
+
+TEST_F(TestNetworkPredictor, TestPacketLossRateFilter) {
+ // Test initial packet loss rate estimate is 0.
+ EXPECT_EQ(0, network_predictor_->GetLossRate());
+ network_predictor_->UpdatePacketLossRate(32);
+ // First time, no filtering.
+ EXPECT_EQ(32, network_predictor_->GetLossRate());
+ clock_.AdvanceTimeMilliseconds(1000);
+ network_predictor_->UpdatePacketLossRate(40);
+ float exp = pow(0.9999f, 1000);
+ float value = 32.0f * exp + (1 - exp) * 40.0f;
+ EXPECT_EQ(static_cast<uint8_t>(value + 0.5f),
+ network_predictor_->GetLossRate());
+}
+} // namespace voe
+} // namespace webrtc
diff --git a/voice_engine/test/auto_test/standard/mixing_test.cc b/voice_engine/test/auto_test/standard/mixing_test.cc
index eb520b8..2a5732b 100644
--- a/voice_engine/test/auto_test/standard/mixing_test.cc
+++ b/voice_engine/test/auto_test/standard/mixing_test.cc
@@ -20,8 +20,12 @@
const int16_t kLimiterHeadroom = 29204; // == -1 dbFS
const int16_t kInt16Max = 0x7fff;
-const int kSampleRateHz = 16000;
+const int kPayloadType = 105;
+const int kInSampleRateHz = 16000; // Input file taken as 16 kHz by default.
+const int kRecSampleRateHz = 16000; // Recorded with 16 kHz L16.
const int kTestDurationMs = 3000;
+const CodecInst kCodecL16 = {kPayloadType, "L16", 16000, 160, 1, 256000};
+const CodecInst kCodecOpus = {kPayloadType, "opus", 48000, 960, 1, 32000};
} // namespace
@@ -54,7 +58,8 @@
bool real_audio,
int16_t input_value,
int16_t max_output_value,
- int16_t min_output_value) {
+ int16_t min_output_value,
+ const CodecInst& codec_inst) {
ASSERT_LE(num_remote_streams_using_mono, num_remote_streams);
if (real_audio) {
@@ -77,7 +82,8 @@
remote_streams[i] = voe_base_->CreateChannel();
EXPECT_NE(-1, remote_streams[i]);
}
- StartRemoteStreams(remote_streams, num_remote_streams_using_mono);
+ StartRemoteStreams(remote_streams, num_remote_streams_using_mono,
+ codec_inst);
TEST_LOG("Playing %d remote streams.\n", num_remote_streams);
// Give it plenty of time to get started.
@@ -106,7 +112,7 @@
void GenerateInputFile(int16_t input_value) {
FILE* input_file = fopen(input_filename_.c_str(), "wb");
ASSERT_TRUE(input_file != NULL);
- for (int i = 0; i < kSampleRateHz / 1000 * (kTestDurationMs * 2); i++) {
+ for (int i = 0; i < kInSampleRateHz / 1000 * (kTestDurationMs * 2); i++) {
ASSERT_EQ(1u, fwrite(&input_value, sizeof(input_value), 1, input_file));
}
ASSERT_EQ(0, fclose(input_file));
@@ -129,7 +135,7 @@
// Ensure we've at least recorded half as much file as the duration of the
// test. We have to use a relaxed tolerance here due to filesystem flakiness
// on the bots.
- ASSERT_GE((samples_read * 1000.0) / kSampleRateHz, kTestDurationMs);
+ ASSERT_GE((samples_read * 1000.0) / kRecSampleRateHz, kTestDurationMs);
// Ensure we read the entire file.
ASSERT_NE(0, feof(output_file));
ASSERT_EQ(0, fclose(output_file));
@@ -153,17 +159,8 @@
// Start up remote streams ("normal" participants).
void StartRemoteStreams(const std::vector<int>& streams,
- int num_remote_streams_using_mono) {
- // Use L16 at 16kHz to minimize distortion (file recording is 16kHz and
- // resampling will cause distortion).
- CodecInst codec_inst;
- strcpy(codec_inst.plname, "L16");
- codec_inst.channels = 1;
- codec_inst.plfreq = kSampleRateHz;
- codec_inst.pltype = 105;
- codec_inst.pacsize = codec_inst.plfreq / 100;
- codec_inst.rate = codec_inst.plfreq * sizeof(int16_t) * 8; // 8 bits/byte.
-
+ int num_remote_streams_using_mono,
+ const CodecInst& codec_inst) {
for (int i = 0; i < num_remote_streams_using_mono; ++i) {
// Add some delay between starting up the channels in order to give them
// different energies in the "real audio" test and hopefully exercise
@@ -173,10 +170,11 @@
}
// The remainder of the streams will use stereo.
- codec_inst.channels = 2;
- codec_inst.pltype++;
+ CodecInst codec_inst_stereo = codec_inst;
+ codec_inst_stereo.channels = 2;
+ codec_inst_stereo.pltype++;
for (size_t i = num_remote_streams_using_mono; i < streams.size(); ++i) {
- StartRemoteStream(streams[i], codec_inst, 1234 + 2 * i);
+ StartRemoteStream(streams[i], codec_inst_stereo, 1234 + 2 * i);
}
}
@@ -210,7 +208,7 @@
EXPECT_NE(-1, size);
fclose(fid);
// Divided by 2 due to 2 bytes/sample.
- return size * 1000 / kSampleRateHz / 2;
+ return size * 1000 / kRecSampleRateHz / 2;
}
std::string input_filename_;
@@ -222,7 +220,11 @@
// somewhat more realistic scenario using real audio. It can at least hunt for
// asserts and crashes.
TEST_F(MixingTest, MixManyChannelsForStress) {
- RunMixingTest(10, 0, 10, true, 0, 0, 0);
+ RunMixingTest(10, 0, 10, true, 0, 0, 0, kCodecL16);
+}
+
+TEST_F(MixingTest, MixManyChannelsForStressOpus) {
+ RunMixingTest(10, 0, 10, true, 0, 0, 0, kCodecOpus);
}
// These tests assume a maximum of three mixed participants. We typically allow
@@ -232,7 +234,7 @@
const int16_t kInputValue = 1000;
const int16_t kExpectedOutput = kInputValue * 3;
RunMixingTest(4, 0, 4, false, kInputValue, 1.1 * kExpectedOutput,
- 0.9 * kExpectedOutput);
+ 0.9 * kExpectedOutput, kCodecL16);
}
// Ensure the mixing saturation protection is working. We can do this because
@@ -245,7 +247,7 @@
ASSERT_GT(kInputValue * 3, kInt16Max);
ASSERT_LT(1.1 * kExpectedOutput, kInt16Max);
RunMixingTest(3, 0, 3, false, kInputValue, 1.1 * kExpectedOutput,
- 0.9 * kExpectedOutput);
+ 0.9 * kExpectedOutput, kCodecL16);
}
TEST_F(MixingTest, SaturationProtectionHasNoEffectOnOneChannel) {
@@ -255,21 +257,21 @@
ASSERT_GT(0.95 * kExpectedOutput, kLimiterHeadroom);
// Tighter constraints are required here to properly test this.
RunMixingTest(1, 0, 1, false, kInputValue, kExpectedOutput,
- 0.95 * kExpectedOutput);
+ 0.95 * kExpectedOutput, kCodecL16);
}
TEST_F(MixingTest, VerifyAnonymousAndNormalParticipantMixing) {
const int16_t kInputValue = 1000;
const int16_t kExpectedOutput = kInputValue * 2;
RunMixingTest(1, 1, 1, false, kInputValue, 1.1 * kExpectedOutput,
- 0.9 * kExpectedOutput);
+ 0.9 * kExpectedOutput, kCodecL16);
}
TEST_F(MixingTest, AnonymousParticipantsAreAlwaysMixed) {
const int16_t kInputValue = 1000;
const int16_t kExpectedOutput = kInputValue * 4;
RunMixingTest(3, 1, 3, false, kInputValue, 1.1 * kExpectedOutput,
- 0.9 * kExpectedOutput);
+ 0.9 * kExpectedOutput, kCodecL16);
}
TEST_F(MixingTest, VerifyStereoAndMonoMixing) {
@@ -277,7 +279,7 @@
const int16_t kExpectedOutput = kInputValue * 2;
RunMixingTest(2, 0, 1, false, kInputValue, 1.1 * kExpectedOutput,
// Lower than 0.9 due to observed flakiness on bots.
- 0.8 * kExpectedOutput);
+ 0.8 * kExpectedOutput, kCodecL16);
}
} // namespace webrtc
diff --git a/voice_engine/voice_engine.gyp b/voice_engine/voice_engine.gyp
index 19342c3..43296ff 100644
--- a/voice_engine/voice_engine.gyp
+++ b/voice_engine/voice_engine.gyp
@@ -55,6 +55,8 @@
'level_indicator.h',
'monitor_module.cc',
'monitor_module.h',
+ 'network_predictor.cc',
+ 'network_predictor.h',
'output_mixer.cc',
'output_mixer.h',
'shared_data.cc',
@@ -122,6 +124,7 @@
],
'sources': [
'channel_unittest.cc',
+ 'network_predictor_unittest.cc',
'transmit_mixer_unittest.cc',
'utility_unittest.cc',
'voe_audio_processing_unittest.cc',