Reformat the WebRTC code base
Running clang-format with chromium's style guide.
The goal is n-fold:
* providing consistency and readability (that's what code guidelines are for)
* preventing noise with presubmit checks and git cl format
* building on the previous point: making it easier to automatically fix format issues
* you name it
Please consider using git-hyper-blame to ignore this commit.
Bug: webrtc:9340
Change-Id: I694567c4cdf8cee2860958cfe82bfaf25848bb87
Reviewed-on: https://webrtc-review.googlesource.com/81185
Reviewed-by: Patrik Höglund <phoglund@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23660}
diff --git a/modules/audio_coding/neteq/accelerate.h b/modules/audio_coding/neteq/accelerate.h
index bf4f0f7..6d5b115 100644
--- a/modules/audio_coding/neteq/accelerate.h
+++ b/modules/audio_coding/neteq/accelerate.h
@@ -29,10 +29,10 @@
// Accelerate are implemented.
class Accelerate : public TimeStretch {
public:
- Accelerate(int sample_rate_hz, size_t num_channels,
+ Accelerate(int sample_rate_hz,
+ size_t num_channels,
const BackgroundNoise& background_noise)
- : TimeStretch(sample_rate_hz, num_channels, background_noise) {
- }
+ : TimeStretch(sample_rate_hz, num_channels, background_noise) {}
// This method performs the actual Accelerate operation. The samples are
// read from |input|, of length |input_length| elements, and are written to
diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc
index e8f7a4a..54ede6f 100644
--- a/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -114,7 +114,7 @@
decoder_ = NULL;
}
- virtual void InitEncoder() { }
+ virtual void InitEncoder() {}
// TODO(henrik.lundin) Change return type to size_t once most/all overriding
// implementations are gone.
@@ -136,12 +136,13 @@
samples_per_10ms, channels_,
interleaved_input.get());
- encoded_info = audio_encoder_->Encode(
- 0, rtc::ArrayView<const int16_t>(interleaved_input.get(),
- audio_encoder_->NumChannels() *
- audio_encoder_->SampleRateHz() /
- 100),
- output);
+ encoded_info =
+ audio_encoder_->Encode(0,
+ rtc::ArrayView<const int16_t>(
+ interleaved_input.get(),
+ audio_encoder_->NumChannels() *
+ audio_encoder_->SampleRateHz() / 100),
+ output);
}
EXPECT_EQ(payload_type_, encoded_info.payload_type);
return static_cast<int>(encoded_info.encoded_bytes);
@@ -152,11 +153,14 @@
// with |mse|. The encoded stream should contain |expected_bytes|. For stereo
// audio, the absolute difference between the two channels is compared vs
// |channel_diff_tolerance|.
- void EncodeDecodeTest(size_t expected_bytes, int tolerance, double mse,
- int delay = 0, int channel_diff_tolerance = 0) {
+ void EncodeDecodeTest(size_t expected_bytes,
+ int tolerance,
+ double mse,
+ int delay = 0,
+ int channel_diff_tolerance = 0) {
ASSERT_GE(tolerance, 0) << "Test must define a tolerance >= 0";
- ASSERT_GE(channel_diff_tolerance, 0) <<
- "Test must define a channel_diff_tolerance >= 0";
+ ASSERT_GE(channel_diff_tolerance, 0)
+ << "Test must define a channel_diff_tolerance >= 0";
size_t processed_samples = 0u;
rtc::Buffer encoded;
size_t encoded_bytes = 0u;
@@ -168,10 +172,10 @@
input.resize(input.size() + frame_size_, 0);
// Read from input file.
ASSERT_GE(input.size() - processed_samples, frame_size_);
- ASSERT_TRUE(input_audio_.Read(
- frame_size_, codec_input_rate_hz_, &input[processed_samples]));
- size_t enc_len = EncodeFrame(
- &input[processed_samples], frame_size_, &encoded);
+ ASSERT_TRUE(input_audio_.Read(frame_size_, codec_input_rate_hz_,
+ &input[processed_samples]));
+ size_t enc_len =
+ EncodeFrame(&input[processed_samples], frame_size_, &encoded);
// Make sure that frame_size_ * channels_ samples are allocated and free.
decoded.resize((processed_samples + frame_size_) * channels_, 0);
AudioDecoder::SpeechType speech_type;
@@ -189,11 +193,11 @@
if (expected_bytes) {
EXPECT_EQ(expected_bytes, encoded_bytes);
}
- CompareInputOutput(
- input, decoded, processed_samples, channels_, tolerance, delay);
+ CompareInputOutput(input, decoded, processed_samples, channels_, tolerance,
+ delay);
if (channels_ == 2)
- CompareTwoChannels(
- decoded, processed_samples, channels_, channel_diff_tolerance);
+ CompareTwoChannels(decoded, processed_samples, channels_,
+ channel_diff_tolerance);
EXPECT_LE(
MseInputOutput(input, decoded, processed_samples, channels_, delay),
mse);
@@ -242,10 +246,9 @@
AudioDecoder::SpeechType speech_type;
decoder_->Reset();
std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
- size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
- codec_input_rate_hz_,
- frame_size_ * channels_ * sizeof(int16_t),
- output.get(), &speech_type);
+ size_t dec_len = decoder_->Decode(
+ encoded.data(), enc_len, codec_input_rate_hz_,
+ frame_size_ * channels_ * sizeof(int16_t), output.get(), &speech_type);
EXPECT_EQ(frame_size_ * channels_, dec_len);
// Call DecodePlc and verify that we get one frame of data.
// (Overwrite the output from the above Decode call, but that does not
@@ -332,10 +335,9 @@
AudioDecoder::SpeechType speech_type;
decoder_->Reset();
std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
- size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
- codec_input_rate_hz_,
- frame_size_ * channels_ * sizeof(int16_t),
- output.get(), &speech_type);
+ size_t dec_len = decoder_->Decode(
+ encoded.data(), enc_len, codec_input_rate_hz_,
+ frame_size_ * channels_ * sizeof(int16_t), output.get(), &speech_type);
EXPECT_EQ(frame_size_, dec_len);
// Simply call DecodePlc and verify that we get 0 as return value.
EXPECT_EQ(0U, decoder_->DecodePlc(1, output.get()));
diff --git a/modules/audio_coding/neteq/audio_multi_vector.cc b/modules/audio_coding/neteq/audio_multi_vector.cc
index c3e623f..fee37cb 100644
--- a/modules/audio_coding/neteq/audio_multi_vector.cc
+++ b/modules/audio_coding/neteq/audio_multi_vector.cc
@@ -21,7 +21,8 @@
AudioMultiVector::AudioMultiVector(size_t N) {
assert(N > 0);
- if (N < 1) N = 1;
+ if (N < 1)
+ N = 1;
for (size_t n = 0; n < N; ++n) {
channels_.push_back(new AudioVector);
}
@@ -30,7 +31,8 @@
AudioMultiVector::AudioMultiVector(size_t N, size_t initial_size) {
assert(N > 0);
- if (N < 1) N = 1;
+ if (N < 1)
+ N = 1;
for (size_t n = 0; n < N; ++n) {
channels_.push_back(new AudioVector(initial_size));
}
@@ -86,7 +88,7 @@
}
channels_[channel]->PushBack(temp_array, length_per_channel);
}
- delete [] temp_array;
+ delete[] temp_array;
}
void AudioMultiVector::PushBack(const AudioMultiVector& append_this) {
diff --git a/modules/audio_coding/neteq/audio_multi_vector_unittest.cc b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
index f05aee0..7272dc2 100644
--- a/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
+++ b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
@@ -37,9 +37,7 @@
array_interleaved_ = new int16_t[num_channels_ * array_length()];
}
- ~AudioMultiVectorTest() {
- delete [] array_interleaved_;
- }
+ ~AudioMultiVectorTest() { delete[] array_interleaved_; }
virtual void SetUp() {
// Populate test arrays.
@@ -58,9 +56,7 @@
}
}
- size_t array_length() const {
- return sizeof(array_) / sizeof(array_[0]);
- }
+ size_t array_length() const { return sizeof(array_) / sizeof(array_[0]); }
const size_t num_channels_;
size_t interleaved_length_;
@@ -168,8 +164,9 @@
ASSERT_EQ(2u, vec2.Size());
for (size_t channel = 0; channel < num_channels_; ++channel) {
for (size_t i = 0; i < 2; ++i) {
- EXPECT_EQ(array_interleaved_[channel + num_channels_ *
- (array_length() - 2 + i)], vec2[channel][i]);
+ EXPECT_EQ(array_interleaved_[channel +
+ num_channels_ * (array_length() - 2 + i)],
+ vec2[channel][i]);
}
}
}
@@ -206,7 +203,7 @@
EXPECT_EQ(0,
memcmp(array_interleaved_, output, read_samples * sizeof(int16_t)));
- delete [] output;
+ delete[] output;
}
// Test the PopFront method.
diff --git a/modules/audio_coding/neteq/audio_vector.cc b/modules/audio_coding/neteq/audio_vector.cc
index 93cd1fb..0486416 100644
--- a/modules/audio_coding/neteq/audio_vector.cc
+++ b/modules/audio_coding/neteq/audio_vector.cc
@@ -20,8 +20,7 @@
namespace webrtc {
-AudioVector::AudioVector()
- : AudioVector(kDefaultInitialSize) {
+AudioVector::AudioVector() : AudioVector(kDefaultInitialSize) {
Clear();
}
@@ -47,16 +46,15 @@
copy_to->end_index_ = Size();
}
-void AudioVector::CopyTo(
- size_t length, size_t position, int16_t* copy_to) const {
+void AudioVector::CopyTo(size_t length,
+ size_t position,
+ int16_t* copy_to) const {
if (length == 0)
return;
length = std::min(length, Size() - position);
const size_t copy_index = (begin_index_ + position) % capacity_;
- const size_t first_chunk_length =
- std::min(length, capacity_ - copy_index);
- memcpy(copy_to, &array_[copy_index],
- first_chunk_length * sizeof(int16_t));
+ const size_t first_chunk_length = std::min(length, capacity_ - copy_index);
+ memcpy(copy_to, &array_[copy_index], first_chunk_length * sizeof(int16_t));
const size_t remaining_length = length - first_chunk_length;
if (remaining_length > 0) {
memcpy(©_to[first_chunk_length], array_.get(),
@@ -102,8 +100,9 @@
PushBack(append_this, append_this.Size(), 0);
}
-void AudioVector::PushBack(
- const AudioVector& append_this, size_t length, size_t position) {
+void AudioVector::PushBack(const AudioVector& append_this,
+ size_t length,
+ size_t position) {
RTC_DCHECK_LE(position, append_this.Size());
RTC_DCHECK_LE(length, append_this.Size() - position);
@@ -116,8 +115,8 @@
const size_t start_index =
(append_this.begin_index_ + position) % append_this.capacity_;
- const size_t first_chunk_length = std::min(
- length, append_this.capacity_ - start_index);
+ const size_t first_chunk_length =
+ std::min(length, append_this.capacity_ - start_index);
PushBack(&append_this.array_[start_index], first_chunk_length);
const size_t remaining_length = length - first_chunk_length;
@@ -179,8 +178,7 @@
}
}
-void AudioVector::InsertZerosAt(size_t length,
- size_t position) {
+void AudioVector::InsertZerosAt(size_t length, size_t position) {
if (length == 0)
return;
// Cap the insert position at the current array length.
@@ -265,7 +263,8 @@
alpha -= alpha_step;
array_[(position + i) % capacity_] =
(alpha * array_[(position + i) % capacity_] +
- (16384 - alpha) * append_this[i] + 8192) >> 14;
+ (16384 - alpha) * append_this[i] + 8192) >>
+ 14;
}
assert(alpha >= 0); // Verify that the slope was correct.
// Append what is left of |append_this|.
@@ -319,8 +318,8 @@
}
void AudioVector::InsertByPushFront(const int16_t* insert_this,
- size_t length,
- size_t position) {
+ size_t length,
+ size_t position) {
std::unique_ptr<int16_t[]> temp_array(nullptr);
if (position > 0) {
// TODO(minyue): see if it is possible to avoid copying to a buffer.
@@ -335,8 +334,7 @@
PushFront(temp_array.get(), position);
}
-void AudioVector::InsertZerosByPushBack(size_t length,
- size_t position) {
+void AudioVector::InsertZerosByPushBack(size_t length, size_t position) {
const size_t move_chunk_length = Size() - position;
std::unique_ptr<int16_t[]> temp_array(nullptr);
if (move_chunk_length > 0) {
@@ -359,8 +357,7 @@
PushBack(temp_array.get(), move_chunk_length);
}
-void AudioVector::InsertZerosByPushFront(size_t length,
- size_t position) {
+void AudioVector::InsertZerosByPushFront(size_t length, size_t position) {
std::unique_ptr<int16_t[]> temp_array(nullptr);
if (position > 0) {
temp_array.reset(new int16_t[position]);
diff --git a/modules/audio_coding/neteq/audio_vector.h b/modules/audio_coding/neteq/audio_vector.h
index 754a9fd..65939ce 100644
--- a/modules/audio_coding/neteq/audio_vector.h
+++ b/modules/audio_coding/neteq/audio_vector.h
@@ -75,7 +75,8 @@
// them at |position|. The length of the AudioVector is increased by |length|.
// |position| = 0 means that the new values are prepended to the vector.
// |position| = Size() means that the new values are appended to the vector.
- virtual void InsertAt(const int16_t* insert_this, size_t length,
+ virtual void InsertAt(const int16_t* insert_this,
+ size_t length,
size_t position);
// Like InsertAt, but inserts |length| zero elements at |position|.
@@ -140,10 +141,12 @@
void Reserve(size_t n);
- void InsertByPushBack(const int16_t* insert_this, size_t length,
+ void InsertByPushBack(const int16_t* insert_this,
+ size_t length,
size_t position);
- void InsertByPushFront(const int16_t* insert_this, size_t length,
+ void InsertByPushFront(const int16_t* insert_this,
+ size_t length,
size_t position);
void InsertZerosByPushBack(size_t length, size_t position);
diff --git a/modules/audio_coding/neteq/audio_vector_unittest.cc b/modules/audio_coding/neteq/audio_vector_unittest.cc
index 1b54abc..e70178c 100644
--- a/modules/audio_coding/neteq/audio_vector_unittest.cc
+++ b/modules/audio_coding/neteq/audio_vector_unittest.cc
@@ -30,9 +30,7 @@
}
}
- size_t array_length() const {
- return sizeof(array_) / sizeof(array_[0]);
- }
+ size_t array_length() const { return sizeof(array_) / sizeof(array_[0]); }
int16_t array_[10];
};
@@ -283,8 +281,8 @@
for (int i = 0; i < kNewLength; ++i) {
new_array[i] = 100 + i;
}
- int insert_position = rtc::checked_cast<int>(
- array_length() + 10); // Too large.
+ int insert_position =
+ rtc::checked_cast<int>(array_length() + 10); // Too large.
vec.InsertAt(new_array, kNewLength, insert_position);
// Verify that the vector looks as follows:
// {0, 1, ..., kLength - 1, 100, 101, ..., 100 + kNewLength - 1 }.
@@ -375,7 +373,7 @@
EXPECT_EQ(0, vec1[i]);
}
// Check mixing zone.
- for (size_t i = 0 ; i < kFadeLength; ++i) {
+ for (size_t i = 0; i < kFadeLength; ++i) {
EXPECT_NEAR((i + 1) * 100 / (kFadeLength + 1),
vec1[kLength - kFadeLength + i], 1);
}
diff --git a/modules/audio_coding/neteq/background_noise.cc b/modules/audio_coding/neteq/background_noise.cc
index 50ffa86..08c278e 100644
--- a/modules/audio_coding/neteq/background_noise.cc
+++ b/modules/audio_coding/neteq/background_noise.cc
@@ -58,11 +58,11 @@
int16_t temp_signal_array[kVecLen + kMaxLpcOrder] = {0};
int16_t* temp_signal = &temp_signal_array[kMaxLpcOrder];
input[channel_ix].CopyTo(kVecLen, input.Size() - kVecLen, temp_signal);
- int32_t sample_energy = CalculateAutoCorrelation(temp_signal, kVecLen,
- auto_correlation);
+ int32_t sample_energy =
+ CalculateAutoCorrelation(temp_signal, kVecLen, auto_correlation);
if ((!vad.running() &&
- sample_energy < parameters.energy_update_threshold) ||
+ sample_energy < parameters.energy_update_threshold) ||
(vad.running() && !vad.active_speech())) {
// Generate LPC coefficients.
if (auto_correlation[0] > 0) {
@@ -91,10 +91,8 @@
WebRtcSpl_FilterMAFastQ12(temp_signal + kVecLen - kResidualLength,
fiter_output, lpc_coefficients,
kMaxLpcOrder + 1, kResidualLength);
- int32_t residual_energy = WebRtcSpl_DotProductWithScale(fiter_output,
- fiter_output,
- kResidualLength,
- 0);
+ int32_t residual_energy = WebRtcSpl_DotProductWithScale(
+ fiter_output, fiter_output, kResidualLength, 0);
// Check spectral flatness.
// Comparing the residual variance with the input signal variance tells
@@ -146,7 +144,8 @@
return channel_parameters_[channel].filter_state;
}
-void BackgroundNoise::SetFilterState(size_t channel, const int16_t* input,
+void BackgroundNoise::SetFilterState(size_t channel,
+ const int16_t* input,
size_t length) {
assert(channel < num_channels_);
length = std::min(length, kMaxLpcOrder);
@@ -164,7 +163,9 @@
}
int32_t BackgroundNoise::CalculateAutoCorrelation(
- const int16_t* signal, size_t length, int32_t* auto_correlation) const {
+ const int16_t* signal,
+ size_t length,
+ int32_t* auto_correlation) const {
static const int kCorrelationStep = -1;
const int correlation_scale =
CrossCorrelationWithAutoShift(signal, signal, length, kMaxLpcOrder + 1,
@@ -185,15 +186,16 @@
assert(channel < num_channels_);
ChannelParameters& parameters = channel_parameters_[channel];
int32_t temp_energy =
- (kThresholdIncrement * parameters.low_energy_update_threshold) >> 16;
- temp_energy += kThresholdIncrement *
- (parameters.energy_update_threshold & 0xFF);
- temp_energy += (kThresholdIncrement *
- ((parameters.energy_update_threshold>>8) & 0xFF)) << 8;
+ (kThresholdIncrement * parameters.low_energy_update_threshold) >> 16;
+ temp_energy +=
+ kThresholdIncrement * (parameters.energy_update_threshold & 0xFF);
+ temp_energy +=
+ (kThresholdIncrement * ((parameters.energy_update_threshold >> 8) & 0xFF))
+ << 8;
parameters.low_energy_update_threshold += temp_energy;
- parameters.energy_update_threshold += kThresholdIncrement *
- (parameters.energy_update_threshold>>16);
+ parameters.energy_update_threshold +=
+ kThresholdIncrement * (parameters.energy_update_threshold >> 16);
parameters.energy_update_threshold +=
parameters.low_energy_update_threshold >> 16;
parameters.low_energy_update_threshold =
@@ -201,8 +203,7 @@
// Update maximum energy.
// Decrease by a factor 1/1024 each time.
- parameters.max_energy = parameters.max_energy -
- (parameters.max_energy >> 10);
+ parameters.max_energy = parameters.max_energy - (parameters.max_energy >> 10);
if (sample_energy > parameters.max_energy) {
parameters.max_energy = sample_energy;
}
@@ -223,9 +224,8 @@
assert(channel < num_channels_);
ChannelParameters& parameters = channel_parameters_[channel];
memcpy(parameters.filter, lpc_coefficients,
- (kMaxLpcOrder+1) * sizeof(int16_t));
- memcpy(parameters.filter_state, filter_state,
- kMaxLpcOrder * sizeof(int16_t));
+ (kMaxLpcOrder + 1) * sizeof(int16_t));
+ memcpy(parameters.filter_state, filter_state, kMaxLpcOrder * sizeof(int16_t));
// Save energy level and update energy threshold levels.
// Never get under 1.0 in average sample energy.
parameters.energy = std::max(sample_energy, 1);
diff --git a/modules/audio_coding/neteq/background_noise.h b/modules/audio_coding/neteq/background_noise.h
index a6f1395..26d42b5 100644
--- a/modules/audio_coding/neteq/background_noise.h
+++ b/modules/audio_coding/neteq/background_noise.h
@@ -38,8 +38,7 @@
// Updates the parameter estimates based on the signal currently in the
// |sync_buffer|, and on the latest decision in |vad| if it is running.
- void Update(const AudioMultiVector& sync_buffer,
- const PostDecodeVad& vad);
+ void Update(const AudioMultiVector& sync_buffer, const PostDecodeVad& vad);
// Returns |energy_| for |channel|.
int32_t Energy(size_t channel) const;
@@ -78,9 +77,7 @@
struct ChannelParameters {
// Constructor.
- ChannelParameters() {
- Reset();
- }
+ ChannelParameters() { Reset(); }
void Reset() {
energy = 2500;
diff --git a/modules/audio_coding/neteq/buffer_level_filter.cc b/modules/audio_coding/neteq/buffer_level_filter.cc
index 6005de6..4d015b6 100644
--- a/modules/audio_coding/neteq/buffer_level_filter.cc
+++ b/modules/audio_coding/neteq/buffer_level_filter.cc
@@ -31,7 +31,8 @@
// (1 - |level_factor_|) * |buffer_size_packets|
// |level_factor_| and |filtered_current_level_| are in Q8.
// |buffer_size_packets| is in Q0.
- filtered_current_level_ = ((level_factor_ * filtered_current_level_) >> 8) +
+ filtered_current_level_ =
+ ((level_factor_ * filtered_current_level_) >> 8) +
((256 - level_factor_) * static_cast<int>(buffer_size_packets));
// Account for time-scale operations (accelerate and pre-emptive expand).
diff --git a/modules/audio_coding/neteq/buffer_level_filter.h b/modules/audio_coding/neteq/buffer_level_filter.h
index 7a48c72..c8d27dc 100644
--- a/modules/audio_coding/neteq/buffer_level_filter.h
+++ b/modules/audio_coding/neteq/buffer_level_filter.h
@@ -28,7 +28,8 @@
// corresponding number of packets, and is subtracted from the filtered
// value (thus bypassing the filter operation). |packet_len_samples| is the
// number of audio samples carried in each incoming packet.
- virtual void Update(size_t buffer_size_packets, int time_stretched_samples,
+ virtual void Update(size_t buffer_size_packets,
+ int time_stretched_samples,
size_t packet_len_samples);
// Set the current target buffer level (obtained from
diff --git a/modules/audio_coding/neteq/buffer_level_filter_unittest.cc b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
index 72c8727..b6dcd2a 100644
--- a/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
+++ b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
@@ -39,8 +39,7 @@
}
// Expect the filtered value to be (theoretically)
// (1 - (251/256) ^ |times|) * |value|.
- double expected_value_double =
- (1 - pow(251.0 / 256.0, times)) * value;
+ double expected_value_double = (1 - pow(251.0 / 256.0, times)) * value;
int expected_value = static_cast<int>(expected_value_double);
// filtered_current_level() returns the value in Q8.
// The actual value may differ slightly from the expected value due to
@@ -94,7 +93,6 @@
EXPECT_EQ(expected_value, filter.filtered_current_level() >> 8);
}
-
TEST(BufferLevelFilter, TimeStretchedSamples) {
BufferLevelFilter filter;
filter.SetTargetBufferLevel(1); // Makes filter coefficient 251/256.
diff --git a/modules/audio_coding/neteq/comfort_noise.cc b/modules/audio_coding/neteq/comfort_noise.cc
index 5e0a875..b341acd 100644
--- a/modules/audio_coding/neteq/comfort_noise.cc
+++ b/modules/audio_coding/neteq/comfort_noise.cc
@@ -35,10 +35,9 @@
return kOK;
}
-int ComfortNoise::Generate(size_t requested_length,
- AudioMultiVector* output) {
+int ComfortNoise::Generate(size_t requested_length, AudioMultiVector* output) {
// TODO(hlundin): Change to an enumerator and skip assert.
- assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
+ assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
fs_hz_ == 48000);
// Not adapted for multi-channel yet.
if (output->Channels() != 1) {
@@ -63,8 +62,7 @@
std::unique_ptr<int16_t[]> temp(new int16_t[number_of_samples]);
if (!cng_decoder->Generate(
- rtc::ArrayView<int16_t>(temp.get(), number_of_samples),
- new_period)) {
+ rtc::ArrayView<int16_t>(temp.get(), number_of_samples), new_period)) {
// Error returned.
output->Zeros(requested_length);
RTC_LOG(LS_ERROR)
@@ -75,9 +73,9 @@
if (first_call_) {
// Set tapering window parameters. Values are in Q15.
- int16_t muting_window; // Mixing factor for overlap data.
- int16_t muting_window_increment; // Mixing factor increment (negative).
- int16_t unmuting_window; // Mixing factor for comfort noise.
+ int16_t muting_window; // Mixing factor for overlap data.
+ int16_t muting_window_increment; // Mixing factor increment (negative).
+ int16_t unmuting_window; // Mixing factor for comfort noise.
int16_t unmuting_window_increment; // Mixing factor increment.
if (fs_hz_ == 8000) {
muting_window = DspHelper::kMuteFactorStart8kHz;
@@ -109,7 +107,8 @@
// channel.
(*sync_buffer_)[0][start_ix + i] =
(((*sync_buffer_)[0][start_ix + i] * muting_window) +
- ((*output)[0][i] * unmuting_window) + 16384) >> 15;
+ ((*output)[0][i] * unmuting_window) + 16384) >>
+ 15;
muting_window += muting_window_increment;
unmuting_window += unmuting_window_increment;
}
diff --git a/modules/audio_coding/neteq/comfort_noise.h b/modules/audio_coding/neteq/comfort_noise.h
index 18800ad..c8cc64a 100644
--- a/modules/audio_coding/neteq/comfort_noise.h
+++ b/modules/audio_coding/neteq/comfort_noise.h
@@ -32,14 +32,14 @@
kMultiChannelNotSupported
};
- ComfortNoise(int fs_hz, DecoderDatabase* decoder_database,
+ ComfortNoise(int fs_hz,
+ DecoderDatabase* decoder_database,
SyncBuffer* sync_buffer)
: fs_hz_(fs_hz),
first_call_(true),
overlap_length_(5 * fs_hz_ / 8000),
decoder_database_(decoder_database),
- sync_buffer_(sync_buffer) {
- }
+ sync_buffer_(sync_buffer) {}
// Resets the state. Should be called before each new comfort noise period.
void Reset();
diff --git a/modules/audio_coding/neteq/cross_correlation.cc b/modules/audio_coding/neteq/cross_correlation.cc
index da9c913..2a03d4a 100644
--- a/modules/audio_coding/neteq/cross_correlation.cc
+++ b/modules/audio_coding/neteq/cross_correlation.cc
@@ -48,8 +48,9 @@
// There are some corner cases that 2) is not satisfied, e.g.,
// max_1 = 17, max_2 = 30848, sequence_1_length = 4095, in such case,
// optimal scaling is 0, while the following calculation results in 1.
- const int32_t factor = (max_1 * max_2) / (std::numeric_limits<int32_t>::max()
- / static_cast<int32_t>(sequence_1_length));
+ const int32_t factor =
+ (max_1 * max_2) / (std::numeric_limits<int32_t>::max() /
+ static_cast<int32_t>(sequence_1_length));
const int scaling = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
WebRtcSpl_CrossCorrelation(cross_correlation, sequence_1, sequence_2,
diff --git a/modules/audio_coding/neteq/decision_logic.cc b/modules/audio_coding/neteq/decision_logic.cc
index 279a9e6..cc58f04 100644
--- a/modules/audio_coding/neteq/decision_logic.cc
+++ b/modules/audio_coding/neteq/decision_logic.cc
@@ -95,7 +95,7 @@
void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) {
// TODO(hlundin): Change to an enumerator and skip assert.
- assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
+ assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
fs_mult_ = fs_hz / 8000;
output_size_samples_ = output_size_samples;
}
@@ -122,11 +122,11 @@
const size_t cur_size_samples =
samples_left + packet_buffer_.NumSamplesInBuffer(decoder_frame_length);
- prev_time_scale_ = prev_time_scale_ &&
- (prev_mode == kModeAccelerateSuccess ||
- prev_mode == kModeAccelerateLowEnergy ||
- prev_mode == kModePreemptiveExpandSuccess ||
- prev_mode == kModePreemptiveExpandLowEnergy);
+ prev_time_scale_ =
+ prev_time_scale_ && (prev_mode == kModeAccelerateSuccess ||
+ prev_mode == kModeAccelerateLowEnergy ||
+ prev_mode == kModePreemptiveExpandSuccess ||
+ prev_mode == kModePreemptiveExpandLowEnergy);
FilterBufferLevel(cur_size_samples, prev_mode);
diff --git a/modules/audio_coding/neteq/decision_logic.h b/modules/audio_coding/neteq/decision_logic.h
index 9d88c4d..d23aa74 100644
--- a/modules/audio_coding/neteq/decision_logic.h
+++ b/modules/audio_coding/neteq/decision_logic.h
@@ -97,9 +97,7 @@
virtual void ExpandDecision(Operations operation);
// Adds |value| to |sample_memory_|.
- void AddSampleMemory(int32_t value) {
- sample_memory_ += value;
- }
+ void AddSampleMemory(int32_t value) { sample_memory_ += value; }
// Accessors and mutators.
void set_sample_memory(int32_t value) { sample_memory_ = value; }
@@ -115,11 +113,7 @@
// The value 5 sets maximum time-stretch rate to about 100 ms/s.
static const int kMinTimescaleInterval = 5;
- enum CngState {
- kCngOff,
- kCngRfc3389On,
- kCngInternalOn
- };
+ enum CngState { kCngOff, kCngRfc3389On, kCngInternalOn };
// Returns the operation that should be done next. |sync_buffer| and |expand|
// are provided for reference. |decoder_frame_length| is the number of samples
diff --git a/modules/audio_coding/neteq/decision_logic_fax.cc b/modules/audio_coding/neteq/decision_logic_fax.cc
index 22d36ce..0f904bb 100644
--- a/modules/audio_coding/neteq/decision_logic_fax.cc
+++ b/modules/audio_coding/neteq/decision_logic_fax.cc
@@ -39,8 +39,8 @@
decoder_database_->IsComfortNoise(next_packet->payload_type);
}
if (is_cng_packet) {
- if (static_cast<int32_t>((generated_noise_samples + target_timestamp)
- - available_timestamp) >= 0) {
+ if (static_cast<int32_t>((generated_noise_samples + target_timestamp) -
+ available_timestamp) >= 0) {
// Time to play this packet now.
return kRfc3389Cng;
} else {
@@ -72,8 +72,8 @@
} else if (target_timestamp == available_timestamp) {
return kNormal;
} else {
- if (static_cast<int32_t>((generated_noise_samples + target_timestamp)
- - available_timestamp) >= 0) {
+ if (static_cast<int32_t>((generated_noise_samples + target_timestamp) -
+ available_timestamp) >= 0) {
return kNormal;
} else {
// If currently playing comfort noise, continue with that. Do not
@@ -100,5 +100,4 @@
}
}
-
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/decision_logic_normal.cc b/modules/audio_coding/neteq/decision_logic_normal.cc
index c163999..a683b8c 100644
--- a/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -79,8 +79,8 @@
// Note that the MuteFactor is in Q14, so a value of 16384 corresponds to 1.
if (postpone_decoding_after_expand_ && prev_mode == kModeExpand &&
!packet_buffer_.ContainsDtxOrCngPacket(decoder_database_) &&
- cur_size_samples < static_cast<size_t>(delay_manager_->TargetLevel() *
- packet_length_samples_) >> 8 &&
+ cur_size_samples<static_cast<size_t>(delay_manager_->TargetLevel() *
+ packet_length_samples_)>> 8 &&
expand.MuteFactor(0) < 16384 / 2) {
return kExpand;
}
@@ -92,10 +92,9 @@
return ExpectedPacketAvailable(prev_mode, play_dtmf);
} else if (!PacketBuffer::IsObsoleteTimestamp(
available_timestamp, target_timestamp, five_seconds_samples)) {
- return FuturePacketAvailable(sync_buffer, expand, decoder_frame_length,
- prev_mode, target_timestamp,
- available_timestamp, play_dtmf,
- generated_noise_samples);
+ return FuturePacketAvailable(
+ sync_buffer, expand, decoder_frame_length, prev_mode, target_timestamp,
+ available_timestamp, play_dtmf, generated_noise_samples);
} else {
// This implies that available_timestamp < target_timestamp, which can
// happen when a new stream or codec is received. Signal for a reset.
@@ -183,10 +182,8 @@
// Check if we should continue with an ongoing expand because the new packet
// is too far into the future.
uint32_t timestamp_leap = available_timestamp - target_timestamp;
- if ((prev_mode == kModeExpand) &&
- !ReinitAfterExpands(timestamp_leap) &&
- !MaxWaitForPacket() &&
- PacketTooEarly(timestamp_leap) &&
+ if ((prev_mode == kModeExpand) && !ReinitAfterExpands(timestamp_leap) &&
+ !MaxWaitForPacket() && PacketTooEarly(timestamp_leap) &&
UnderTargetLevel()) {
if (play_dtmf) {
// Still have DTMF to play, so do not do expand.
@@ -199,12 +196,11 @@
const size_t samples_left =
sync_buffer.FutureLength() - expand.overlap_length();
- const size_t cur_size_samples = samples_left +
- packet_buffer_.NumPacketsInBuffer() * decoder_frame_length;
+ const size_t cur_size_samples =
+ samples_left + packet_buffer_.NumPacketsInBuffer() * decoder_frame_length;
// If previous was comfort noise, then no merge is needed.
- if (prev_mode == kModeRfc3389Cng ||
- prev_mode == kModeCodecInternalCng) {
+ if (prev_mode == kModeRfc3389Cng || prev_mode == kModeCodecInternalCng) {
// Keep the same delay as before the CNG, but make sure that the number of
// samples in buffer is no higher than 4 times the optimal level. (Note that
// TargetLevel() is in Q8.)
@@ -212,7 +208,7 @@
available_timestamp ||
cur_size_samples >
((delay_manager_->TargetLevel() * packet_length_samples_) >> 8) *
- 4) {
+ 4) {
// Time to play this new packet.
return kNormal;
} else {
@@ -237,17 +233,17 @@
bool DecisionLogicNormal::UnderTargetLevel() const {
return buffer_level_filter_->filtered_current_level() <=
- delay_manager_->TargetLevel();
+ delay_manager_->TargetLevel();
}
bool DecisionLogicNormal::ReinitAfterExpands(uint32_t timestamp_leap) const {
return timestamp_leap >=
- static_cast<uint32_t>(output_size_samples_ * kReinitAfterExpands);
+ static_cast<uint32_t>(output_size_samples_ * kReinitAfterExpands);
}
bool DecisionLogicNormal::PacketTooEarly(uint32_t timestamp_leap) const {
return timestamp_leap >
- static_cast<uint32_t>(output_size_samples_ * num_consecutive_expands_);
+ static_cast<uint32_t>(output_size_samples_ * num_consecutive_expands_);
}
bool DecisionLogicNormal::MaxWaitForPacket() const {
diff --git a/modules/audio_coding/neteq/decision_logic_normal.h b/modules/audio_coding/neteq/decision_logic_normal.h
index a718f99..ed2ea39 100644
--- a/modules/audio_coding/neteq/decision_logic_normal.h
+++ b/modules/audio_coding/neteq/decision_logic_normal.h
@@ -58,15 +58,14 @@
// Returns the operation to do given that the expected packet is not
// available, but a packet further into the future is at hand.
- virtual Operations FuturePacketAvailable(
- const SyncBuffer& sync_buffer,
- const Expand& expand,
- size_t decoder_frame_length,
- Modes prev_mode,
- uint32_t target_timestamp,
- uint32_t available_timestamp,
- bool play_dtmf,
- size_t generated_noise_samples);
+ virtual Operations FuturePacketAvailable(const SyncBuffer& sync_buffer,
+ const Expand& expand,
+ size_t decoder_frame_length,
+ Modes prev_mode,
+ uint32_t target_timestamp,
+ uint32_t available_timestamp,
+ bool play_dtmf,
+ size_t generated_noise_samples);
// Returns the operation to do given that the expected packet is available.
virtual Operations ExpectedPacketAvailable(Modes prev_mode, bool play_dtmf);
diff --git a/modules/audio_coding/neteq/decoder_database.cc b/modules/audio_coding/neteq/decoder_database.cc
index 72c0376..1fd8c03 100644
--- a/modules/audio_coding/neteq/decoder_database.cc
+++ b/modules/audio_coding/neteq/decoder_database.cc
@@ -134,9 +134,13 @@
return Subtype::kNormal;
}
-bool DecoderDatabase::Empty() const { return decoders_.empty(); }
+bool DecoderDatabase::Empty() const {
+ return decoders_.empty();
+}
-int DecoderDatabase::Size() const { return static_cast<int>(decoders_.size()); }
+int DecoderDatabase::Size() const {
+ return static_cast<int>(decoders_.size());
+}
void DecoderDatabase::Reset() {
decoders_.clear();
@@ -276,7 +280,7 @@
int DecoderDatabase::SetActiveDecoder(uint8_t rtp_payload_type,
bool* new_decoder) {
// Check that |rtp_payload_type| exists in the database.
- const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
if (!info) {
// Decoder not found.
return kDecoderNotFound;
@@ -289,7 +293,7 @@
*new_decoder = true;
} else if (active_decoder_type_ != rtp_payload_type) {
// Moving from one active decoder to another. Delete the first one.
- const DecoderInfo *old_info = GetDecoderInfo(active_decoder_type_);
+ const DecoderInfo* old_info = GetDecoderInfo(active_decoder_type_);
RTC_DCHECK(old_info);
old_info->DropDecoder();
*new_decoder = true;
@@ -308,7 +312,7 @@
int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) {
// Check that |rtp_payload_type| exists in the database.
- const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
if (!info) {
// Decoder not found.
return kDecoderNotFound;
@@ -335,7 +339,7 @@
}
AudioDecoder* DecoderDatabase::GetDecoder(uint8_t rtp_payload_type) const {
- const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
return info ? info->GetDecoder() : nullptr;
}
@@ -350,17 +354,17 @@
}
bool DecoderDatabase::IsComfortNoise(uint8_t rtp_payload_type) const {
- const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
return info && info->IsComfortNoise();
}
bool DecoderDatabase::IsDtmf(uint8_t rtp_payload_type) const {
- const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
return info && info->IsDtmf();
}
bool DecoderDatabase::IsRed(uint8_t rtp_payload_type) const {
- const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+ const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
return info && info->IsRed();
}
diff --git a/modules/audio_coding/neteq/decoder_database.h b/modules/audio_coding/neteq/decoder_database.h
index 6b388dd..107d2f3 100644
--- a/modules/audio_coding/neteq/decoder_database.h
+++ b/modules/audio_coding/neteq/decoder_database.h
@@ -88,14 +88,10 @@
}
// Returns true if the decoder's format is DTMF.
- bool IsDtmf() const {
- return subtype_ == Subtype::kDtmf;
- }
+ bool IsDtmf() const { return subtype_ == Subtype::kDtmf; }
// Returns true if the decoder's format is RED.
- bool IsRed() const {
- return subtype_ == Subtype::kRed;
- }
+ bool IsRed() const { return subtype_ == Subtype::kRed; }
// Returns true if the decoder's format is named |name|.
bool IsType(const char* name) const;
@@ -125,12 +121,7 @@
};
const absl::optional<CngDecoder> cng_decoder_;
- enum class Subtype : int8_t {
- kNormal,
- kComfortNoise,
- kDtmf,
- kRed
- };
+ enum class Subtype : int8_t { kNormal, kComfortNoise, kDtmf, kRed };
static Subtype SubtypeFromFormat(const SdpAudioFormat& format);
diff --git a/modules/audio_coding/neteq/decoder_database_unittest.cc b/modules/audio_coding/neteq/decoder_database_unittest.cc
index afd10ae..10043e0 100644
--- a/modules/audio_coding/neteq/decoder_database_unittest.cc
+++ b/modules/audio_coding/neteq/decoder_database_unittest.cc
@@ -110,7 +110,7 @@
EXPECT_EQ(kCodecName, info->get_name());
EXPECT_EQ(decoder, db.GetDecoder(kPayloadType));
info = db.GetDecoderInfo(kPayloadType + 1); // Other payload type.
- EXPECT_TRUE(info == NULL); // Should not be found.
+ EXPECT_TRUE(info == NULL); // Should not be found.
}
TEST(DecoderDatabase, GetDecoder) {
@@ -292,7 +292,6 @@
// Try to set non-existing codecs as active.
EXPECT_EQ(DecoderDatabase::kDecoderNotFound,
db.SetActiveDecoder(17, &changed));
- EXPECT_EQ(DecoderDatabase::kDecoderNotFound,
- db.SetActiveCngDecoder(17));
+ EXPECT_EQ(DecoderDatabase::kDecoderNotFound, db.SetActiveCngDecoder(17));
}
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/delay_manager.cc b/modules/audio_coding/neteq/delay_manager.cc
index b70131d..a945cdc 100644
--- a/modules/audio_coding/neteq/delay_manager.cc
+++ b/modules/audio_coding/neteq/delay_manager.cc
@@ -155,8 +155,9 @@
(packet_iat_stopwatch_->ElapsedMs() << 8) / packet_len_ms;
// Calculate cumulative sum IAT with sequence number compensation. The sum
// is zero if there is no clock-drift.
- iat_cumulative_sum_ += (iat_packets_q8 -
- (static_cast<int>(sequence_number - last_seq_no_) << 8));
+ iat_cumulative_sum_ +=
+ (iat_packets_q8 -
+ (static_cast<int>(sequence_number - last_seq_no_) << 8));
// Subtract drift term.
iat_cumulative_sum_ -= kCumulativeSumDrift;
// Ensure not negative.
@@ -189,8 +190,8 @@
assert(iat_packets < iat_vector_.size());
int vector_sum = 0; // Sum up the vector elements as they are processed.
// Multiply each element in |iat_vector_| with |iat_factor_|.
- for (IATVector::iterator it = iat_vector_.begin();
- it != iat_vector_.end(); ++it) {
+ for (IATVector::iterator it = iat_vector_.begin(); it != iat_vector_.end();
+ ++it) {
*it = (static_cast<int64_t>(*it) * iat_factor_) >> 15;
vector_sum += *it;
}
@@ -236,7 +237,7 @@
least_required_delay_ms_ = (target_level_ * packet_len_ms_) >> 8;
if (packet_len_ms_ > 0 && minimum_delay_ms_ > 0) {
- int minimum_delay_packet_q8 = (minimum_delay_ms_ << 8) / packet_len_ms_;
+ int minimum_delay_packet_q8 = (minimum_delay_ms_ << 8) / packet_len_ms_;
target_level_ = std::max(target_level_, minimum_delay_packet_q8);
}
@@ -269,8 +270,8 @@
// (in Q30) by definition, and since the solution is often a low value for
// |iat_index|, it is more efficient to start with |sum| = 1 and subtract
// elements from the start of the histogram.
- size_t index = 0; // Start from the beginning of |iat_vector_|.
- int sum = 1 << 30; // Assign to 1 in Q30.
+ size_t index = 0; // Start from the beginning of |iat_vector_|.
+ int sum = 1 << 30; // Assign to 1 in Q30.
sum -= iat_vector_[index]; // Ensure that target level is >= 1.
do {
@@ -313,13 +314,12 @@
return 0;
}
-
void DelayManager::Reset() {
packet_len_ms_ = 0; // Packet size unknown.
streaming_mode_ = false;
peak_detector_.Reset();
ResetHistogram(); // Resets target levels too.
- iat_factor_ = 0; // Adapt the histogram faster for the first few packets.
+ iat_factor_ = 0; // Adapt the histogram faster for the first few packets.
packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
max_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
iat_cumulative_sum_ = 0;
@@ -471,8 +471,12 @@
return least_required_delay_ms_;
}
-int DelayManager::base_target_level() const { return base_target_level_; }
-void DelayManager::set_streaming_mode(bool value) { streaming_mode_ = value; }
+int DelayManager::base_target_level() const {
+ return base_target_level_;
+}
+void DelayManager::set_streaming_mode(bool value) {
+ streaming_mode_ = value;
+}
int DelayManager::last_pack_cng_or_dtmf() const {
return last_pack_cng_or_dtmf_;
}
diff --git a/modules/audio_coding/neteq/delay_manager.h b/modules/audio_coding/neteq/delay_manager.h
index 0d082c8..08004ea 100644
--- a/modules/audio_coding/neteq/delay_manager.h
+++ b/modules/audio_coding/neteq/delay_manager.h
@@ -117,9 +117,9 @@
virtual void set_last_pack_cng_or_dtmf(int value);
private:
- static const int kLimitProbability = 53687091; // 1/20 in Q30.
+ static const int kLimitProbability = 53687091; // 1/20 in Q30.
static const int kLimitProbabilityStreaming = 536871; // 1/2000 in Q30.
- static const int kMaxStreamingPeakPeriodMs = 600000; // 10 minutes in ms.
+ static const int kMaxStreamingPeakPeriodMs = 600000; // 10 minutes in ms.
static const int kCumulativeSumDrift = 2; // Drift term for cumulative sum
// |iat_cumulative_sum_|.
// Steady-state forgetting factor for |iat_vector_|, 0.9993 in Q15.
@@ -146,28 +146,29 @@
bool first_packet_received_;
const size_t max_packets_in_buffer_; // Capacity of the packet buffer.
- IATVector iat_vector_; // Histogram of inter-arrival times.
+ IATVector iat_vector_; // Histogram of inter-arrival times.
int iat_factor_; // Forgetting factor for updating the IAT histogram (Q15).
const TickTimer* tick_timer_;
// Time elapsed since last packet.
std::unique_ptr<TickTimer::Stopwatch> packet_iat_stopwatch_;
- int base_target_level_; // Currently preferred buffer level before peak
- // detection and streaming mode (Q0).
+ int base_target_level_; // Currently preferred buffer level before peak
+ // detection and streaming mode (Q0).
// TODO(turajs) change the comment according to the implementation of
// minimum-delay.
- int target_level_; // Currently preferred buffer level in (fractions)
- // of packets (Q8), before adding any extra delay.
+ int target_level_; // Currently preferred buffer level in (fractions)
+ // of packets (Q8), before adding any extra delay.
int packet_len_ms_; // Length of audio in each incoming packet [ms].
bool streaming_mode_;
- uint16_t last_seq_no_; // Sequence number for last received packet.
- uint32_t last_timestamp_; // Timestamp for the last received packet.
- int minimum_delay_ms_; // Externally set minimum delay.
+ uint16_t last_seq_no_; // Sequence number for last received packet.
+ uint32_t last_timestamp_; // Timestamp for the last received packet.
+ int minimum_delay_ms_; // Externally set minimum delay.
int least_required_delay_ms_; // Smallest preferred buffer level (same unit
- // as |target_level_|), before applying
- // |minimum_delay_ms_| and/or |maximum_delay_ms_|.
- int maximum_delay_ms_; // Externally set maximum allowed delay.
- int iat_cumulative_sum_; // Cumulative sum of delta inter-arrival times.
- int max_iat_cumulative_sum_; // Max of |iat_cumulative_sum_|.
+ // as |target_level_|), before applying
+ // |minimum_delay_ms_| and/or
+ // |maximum_delay_ms_|.
+ int maximum_delay_ms_; // Externally set maximum allowed delay.
+ int iat_cumulative_sum_; // Cumulative sum of delta inter-arrival times.
+ int max_iat_cumulative_sum_; // Max of |iat_cumulative_sum_|.
// Time elapsed since maximum was observed.
std::unique_ptr<TickTimer::Stopwatch> max_iat_stopwatch_;
DelayPeakDetector& peak_detector_;
diff --git a/modules/audio_coding/neteq/delay_manager_unittest.cc b/modules/audio_coding/neteq/delay_manager_unittest.cc
index 953bc6b..f9c5680 100644
--- a/modules/audio_coding/neteq/delay_manager_unittest.cc
+++ b/modules/audio_coding/neteq/delay_manager_unittest.cc
@@ -49,8 +49,7 @@
: dm_(NULL), detector_(&tick_timer_), seq_no_(0x1234), ts_(0x12345678) {}
void DelayManagerTest::SetUp() {
- EXPECT_CALL(detector_, Reset())
- .Times(1);
+ EXPECT_CALL(detector_, Reset()).Times(1);
dm_ = new DelayManager(kMaxNumberOfPackets, &detector_, &tick_timer_);
}
@@ -94,8 +93,7 @@
TEST_F(DelayManagerTest, SetPacketAudioLength) {
const int kLengthMs = 30;
// Expect DelayManager to pass on the new length to the detector object.
- EXPECT_CALL(detector_, SetPacketAudioLength(kLengthMs))
- .Times(1);
+ EXPECT_CALL(detector_, SetPacketAudioLength(kLengthMs)).Times(1);
EXPECT_EQ(0, dm_->SetPacketAudioLength(kLengthMs));
EXPECT_EQ(-1, dm_->SetPacketAudioLength(-1)); // Illegal parameter value.
}
@@ -121,8 +119,7 @@
// Expect detector update method to be called once with inter-arrival time
// equal to 1 packet, and (base) target level equal to 1 as well.
// Return false to indicate no peaks found.
- EXPECT_CALL(detector_, Update(1, 1))
- .WillOnce(Return(false));
+ EXPECT_CALL(detector_, Update(1, 1)).WillOnce(Return(false));
InsertNextPacket();
EXPECT_EQ(1 << 8, dm_->TargetLevel()); // In Q8.
EXPECT_EQ(1, dm_->base_target_level());
@@ -145,8 +142,7 @@
// Expect detector update method to be called once with inter-arrival time
// equal to 1 packet, and (base) target level equal to 1 as well.
// Return false to indicate no peaks found.
- EXPECT_CALL(detector_, Update(2, 2))
- .WillOnce(Return(false));
+ EXPECT_CALL(detector_, Update(2, 2)).WillOnce(Return(false));
InsertNextPacket();
EXPECT_EQ(2 << 8, dm_->TargetLevel()); // In Q8.
EXPECT_EQ(2, dm_->base_target_level());
@@ -169,10 +165,8 @@
// Expect detector update method to be called once with inter-arrival time
// equal to 1 packet, and (base) target level equal to 1 as well.
// Return true to indicate that peaks are found. Let the peak height be 5.
- EXPECT_CALL(detector_, Update(1, 1))
- .WillOnce(Return(true));
- EXPECT_CALL(detector_, MaxPeakHeight())
- .WillOnce(Return(5));
+ EXPECT_CALL(detector_, Update(1, 1)).WillOnce(Return(true));
+ EXPECT_CALL(detector_, MaxPeakHeight()).WillOnce(Return(5));
InsertNextPacket();
EXPECT_EQ(5 << 8, dm_->TargetLevel());
EXPECT_EQ(1, dm_->base_target_level()); // Base target level is w/o peaks.
@@ -193,8 +187,7 @@
// Expect detector update method to be called once with inter-arrival time
// equal to 1 packet, and (base) target level equal to 1 as well.
// Return false to indicate no peaks found.
- EXPECT_CALL(detector_, Update(1, 1))
- .WillOnce(Return(false));
+ EXPECT_CALL(detector_, Update(1, 1)).WillOnce(Return(false));
InsertNextPacket();
const int kExpectedTarget = 1;
EXPECT_EQ(kExpectedTarget << 8, dm_->TargetLevel()); // In Q8.
diff --git a/modules/audio_coding/neteq/delay_peak_detector_unittest.cc b/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
index 058ba66..fd4dded 100644
--- a/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
+++ b/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
@@ -65,8 +65,8 @@
int next = 1; // Start with the second packet to get a proper IAT.
while (next < kNumPackets) {
while (next < kNumPackets && arrival_times_ms[next] <= time) {
- int iat_packets = (arrival_times_ms[next] - arrival_times_ms[next - 1]) /
- kPacketSizeMs;
+ int iat_packets =
+ (arrival_times_ms[next] - arrival_times_ms[next - 1]) / kPacketSizeMs;
const int kTargetBufferLevel = 1; // Define peaks to be iat > 2.
if (time < peak_mode_start_ms || time > peak_mode_end_ms) {
EXPECT_FALSE(detector.Update(iat_packets, kTargetBufferLevel));
@@ -112,8 +112,8 @@
int next = 1; // Start with the second packet to get a proper IAT.
while (next < kNumPackets) {
while (next < kNumPackets && arrival_times_ms[next] <= time) {
- int iat_packets = (arrival_times_ms[next] - arrival_times_ms[next - 1]) /
- kPacketSizeMs;
+ int iat_packets =
+ (arrival_times_ms[next] - arrival_times_ms[next - 1]) / kPacketSizeMs;
const int kTargetBufferLevel = 2; // Define peaks to be iat > 4.
EXPECT_FALSE(detector.Update(iat_packets, kTargetBufferLevel));
++next;
diff --git a/modules/audio_coding/neteq/dsp_helper.cc b/modules/audio_coding/neteq/dsp_helper.cc
index 2a1d81b..05b0f70 100644
--- a/modules/audio_coding/neteq/dsp_helper.cc
+++ b/modules/audio_coding/neteq/dsp_helper.cc
@@ -21,41 +21,29 @@
// Table of constants used in method DspHelper::ParabolicFit().
const int16_t DspHelper::kParabolaCoefficients[17][3] = {
- { 120, 32, 64 },
- { 140, 44, 75 },
- { 150, 50, 80 },
- { 160, 57, 85 },
- { 180, 72, 96 },
- { 200, 89, 107 },
- { 210, 98, 112 },
- { 220, 108, 117 },
- { 240, 128, 128 },
- { 260, 150, 139 },
- { 270, 162, 144 },
- { 280, 174, 149 },
- { 300, 200, 160 },
- { 320, 228, 171 },
- { 330, 242, 176 },
- { 340, 257, 181 },
- { 360, 288, 192 } };
+ {120, 32, 64}, {140, 44, 75}, {150, 50, 80}, {160, 57, 85},
+ {180, 72, 96}, {200, 89, 107}, {210, 98, 112}, {220, 108, 117},
+ {240, 128, 128}, {260, 150, 139}, {270, 162, 144}, {280, 174, 149},
+ {300, 200, 160}, {320, 228, 171}, {330, 242, 176}, {340, 257, 181},
+ {360, 288, 192}};
// Filter coefficients used when downsampling from the indicated sample rates
// (8, 16, 32, 48 kHz) to 4 kHz. Coefficients are in Q12. The corresponding Q0
// values are provided in the comments before each array.
// Q0 values: {0.3, 0.4, 0.3}.
-const int16_t DspHelper::kDownsample8kHzTbl[3] = { 1229, 1638, 1229 };
+const int16_t DspHelper::kDownsample8kHzTbl[3] = {1229, 1638, 1229};
// Q0 values: {0.15, 0.2, 0.3, 0.2, 0.15}.
-const int16_t DspHelper::kDownsample16kHzTbl[5] = { 614, 819, 1229, 819, 614 };
+const int16_t DspHelper::kDownsample16kHzTbl[5] = {614, 819, 1229, 819, 614};
// Q0 values: {0.1425, 0.1251, 0.1525, 0.1628, 0.1525, 0.1251, 0.1425}.
-const int16_t DspHelper::kDownsample32kHzTbl[7] = {
- 584, 512, 625, 667, 625, 512, 584 };
+const int16_t DspHelper::kDownsample32kHzTbl[7] = {584, 512, 625, 667,
+ 625, 512, 584};
// Q0 values: {0.2487, 0.0952, 0.1042, 0.1074, 0.1042, 0.0952, 0.2487}.
-const int16_t DspHelper::kDownsample48kHzTbl[7] = {
- 1019, 390, 427, 440, 427, 390, 1019 };
+const int16_t DspHelper::kDownsample48kHzTbl[7] = {1019, 390, 427, 440,
+ 427, 390, 1019};
int DspHelper::RampSignal(const int16_t* input,
size_t length,
@@ -115,9 +103,12 @@
return end_factor;
}
-void DspHelper::PeakDetection(int16_t* data, size_t data_length,
- size_t num_peaks, int fs_mult,
- size_t* peak_index, int16_t* peak_value) {
+void DspHelper::PeakDetection(int16_t* data,
+ size_t data_length,
+ size_t num_peaks,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value) {
size_t min_index = 0;
size_t max_index = 0;
@@ -163,8 +154,10 @@
}
}
-void DspHelper::ParabolicFit(int16_t* signal_points, int fs_mult,
- size_t* peak_index, int16_t* peak_value) {
+void DspHelper::ParabolicFit(int16_t* signal_points,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value) {
uint16_t fit_index[13];
if (fs_mult == 1) {
fit_index[0] = 0;
@@ -204,23 +197,26 @@
// num = -3 * signal_points[0] + 4 * signal_points[1] - signal_points[2];
// den = signal_points[0] - 2 * signal_points[1] + signal_points[2];
- int32_t num = (signal_points[0] * -3) + (signal_points[1] * 4)
- - signal_points[2];
+ int32_t num =
+ (signal_points[0] * -3) + (signal_points[1] * 4) - signal_points[2];
int32_t den = signal_points[0] + (signal_points[1] * -2) + signal_points[2];
int32_t temp = num * 120;
int flag = 1;
- int16_t stp = kParabolaCoefficients[fit_index[fs_mult]][0]
- - kParabolaCoefficients[fit_index[fs_mult - 1]][0];
- int16_t strt = (kParabolaCoefficients[fit_index[fs_mult]][0]
- + kParabolaCoefficients[fit_index[fs_mult - 1]][0]) / 2;
+ int16_t stp = kParabolaCoefficients[fit_index[fs_mult]][0] -
+ kParabolaCoefficients[fit_index[fs_mult - 1]][0];
+ int16_t strt = (kParabolaCoefficients[fit_index[fs_mult]][0] +
+ kParabolaCoefficients[fit_index[fs_mult - 1]][0]) /
+ 2;
int16_t lmt;
if (temp < -den * strt) {
lmt = strt - stp;
while (flag) {
if ((flag == fs_mult) || (temp > -den * lmt)) {
- *peak_value = (den * kParabolaCoefficients[fit_index[fs_mult - flag]][1]
- + num * kParabolaCoefficients[fit_index[fs_mult - flag]][2]
- + signal_points[0] * 256) / 256;
+ *peak_value =
+ (den * kParabolaCoefficients[fit_index[fs_mult - flag]][1] +
+ num * kParabolaCoefficients[fit_index[fs_mult - flag]][2] +
+ signal_points[0] * 256) /
+ 256;
*peak_index = *peak_index * 2 * fs_mult - flag;
flag = 0;
} else {
@@ -233,9 +229,9 @@
while (flag) {
if ((flag == fs_mult) || (temp < -den * lmt)) {
int32_t temp_term_1 =
- den * kParabolaCoefficients[fit_index[fs_mult+flag]][1];
+ den * kParabolaCoefficients[fit_index[fs_mult + flag]][1];
int32_t temp_term_2 =
- num * kParabolaCoefficients[fit_index[fs_mult+flag]][2];
+ num * kParabolaCoefficients[fit_index[fs_mult + flag]][2];
int32_t temp_term_3 = signal_points[0] * 256;
*peak_value = (temp_term_1 + temp_term_2 + temp_term_3) / 256;
*peak_index = *peak_index * 2 * fs_mult + flag;
@@ -251,8 +247,10 @@
}
}
-size_t DspHelper::MinDistortion(const int16_t* signal, size_t min_lag,
- size_t max_lag, size_t length,
+size_t DspHelper::MinDistortion(const int16_t* signal,
+ size_t min_lag,
+ size_t max_lag,
+ size_t length,
int32_t* distortion_value) {
size_t best_index = 0;
int32_t min_distortion = WEBRTC_SPL_WORD32_MAX;
@@ -273,9 +271,12 @@
return best_index;
}
-void DspHelper::CrossFade(const int16_t* input1, const int16_t* input2,
- size_t length, int16_t* mix_factor,
- int16_t factor_decrement, int16_t* output) {
+void DspHelper::CrossFade(const int16_t* input1,
+ const int16_t* input2,
+ size_t length,
+ int16_t* mix_factor,
+ int16_t factor_decrement,
+ int16_t* output) {
int16_t factor = *mix_factor;
int16_t complement_factor = 16384 - factor;
for (size_t i = 0; i < length; i++) {
@@ -287,8 +288,10 @@
*mix_factor = factor;
}
-void DspHelper::UnmuteSignal(const int16_t* input, size_t length,
- int16_t* factor, int increment,
+void DspHelper::UnmuteSignal(const int16_t* input,
+ size_t length,
+ int16_t* factor,
+ int increment,
int16_t* output) {
uint16_t factor_16b = *factor;
int32_t factor_32b = (static_cast<int32_t>(factor_16b) << 6) + 32;
@@ -308,17 +311,20 @@
}
}
-int DspHelper::DownsampleTo4kHz(const int16_t* input, size_t input_length,
- size_t output_length, int input_rate_hz,
- bool compensate_delay, int16_t* output) {
+int DspHelper::DownsampleTo4kHz(const int16_t* input,
+ size_t input_length,
+ size_t output_length,
+ int input_rate_hz,
+ bool compensate_delay,
+ int16_t* output) {
// Set filter parameters depending on input frequency.
// NOTE: The phase delay values are wrong compared to the true phase delay
// of the filters. However, the error is preserved (through the +1 term) for
// consistency.
const int16_t* filter_coefficients; // Filter coefficients.
- size_t filter_length; // Number of coefficients.
- size_t filter_delay; // Phase delay in samples.
- int16_t factor; // Conversion rate (inFsHz / 8000).
+ size_t filter_length; // Number of coefficients.
+ size_t filter_delay; // Phase delay in samples.
+ int16_t factor; // Conversion rate (inFsHz / 8000).
switch (input_rate_hz) {
case 8000: {
filter_length = 3;
diff --git a/modules/audio_coding/neteq/dsp_helper.h b/modules/audio_coding/neteq/dsp_helper.h
index 7ceb66f..8940acd 100644
--- a/modules/audio_coding/neteq/dsp_helper.h
+++ b/modules/audio_coding/neteq/dsp_helper.h
@@ -85,9 +85,12 @@
// locations and values are written to the arrays |peak_index| and
// |peak_value|, respectively. Both arrays must hold at least |num_peaks|
// elements.
- static void PeakDetection(int16_t* data, size_t data_length,
- size_t num_peaks, int fs_mult,
- size_t* peak_index, int16_t* peak_value);
+ static void PeakDetection(int16_t* data,
+ size_t data_length,
+ size_t num_peaks,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value);
// Estimates the height and location of a maximum. The three values in the
// array |signal_points| are used as basis for a parabolic fit, which is then
@@ -95,30 +98,40 @@
// assumed to be from a 4 kHz signal, while the maximum, written to
// |peak_index| and |peak_value| is given in the full sample rate, as
// indicated by the sample rate multiplier |fs_mult|.
- static void ParabolicFit(int16_t* signal_points, int fs_mult,
- size_t* peak_index, int16_t* peak_value);
+ static void ParabolicFit(int16_t* signal_points,
+ int fs_mult,
+ size_t* peak_index,
+ int16_t* peak_value);
// Calculates the sum-abs-diff for |signal| when compared to a displaced
// version of itself. Returns the displacement lag that results in the minimum
// distortion. The resulting distortion is written to |distortion_value|.
// The values of |min_lag| and |max_lag| are boundaries for the search.
- static size_t MinDistortion(const int16_t* signal, size_t min_lag,
- size_t max_lag, size_t length,
- int32_t* distortion_value);
+ static size_t MinDistortion(const int16_t* signal,
+ size_t min_lag,
+ size_t max_lag,
+ size_t length,
+ int32_t* distortion_value);
// Mixes |length| samples from |input1| and |input2| together and writes the
// result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and
// is decreased by |factor_decrement| (Q14) for each sample. The gain for
// |input2| is the complement 16384 - mix_factor.
- static void CrossFade(const int16_t* input1, const int16_t* input2,
- size_t length, int16_t* mix_factor,
- int16_t factor_decrement, int16_t* output);
+ static void CrossFade(const int16_t* input1,
+ const int16_t* input2,
+ size_t length,
+ int16_t* mix_factor,
+ int16_t factor_decrement,
+ int16_t* output);
// Scales |input| with an increasing gain. Applies |factor| (Q14) to the first
// sample and increases the gain by |increment| (Q20) for each sample. The
// result is written to |output|. |length| samples are processed.
- static void UnmuteSignal(const int16_t* input, size_t length, int16_t* factor,
- int increment, int16_t* output);
+ static void UnmuteSignal(const int16_t* input,
+ size_t length,
+ int16_t* factor,
+ int increment,
+ int16_t* output);
// Starts at unity gain and gradually fades out |signal|. For each sample,
// the gain is reduced by |mute_slope| (Q14). |length| samples are processed.
@@ -129,9 +142,12 @@
// samples to |output|. Compensates for the phase delay of the downsampling
// filters if |compensate_delay| is true. Returns -1 if the input is too short
// to produce |output_length| samples, otherwise 0.
- static int DownsampleTo4kHz(const int16_t* input, size_t input_length,
- size_t output_length, int input_rate_hz,
- bool compensate_delay, int16_t* output);
+ static int DownsampleTo4kHz(const int16_t* input,
+ size_t input_length,
+ size_t output_length,
+ int input_rate_hz,
+ bool compensate_delay,
+ int16_t* output);
private:
// Table of constants used in method DspHelper::ParabolicFit().
diff --git a/modules/audio_coding/neteq/dsp_helper_unittest.cc b/modules/audio_coding/neteq/dsp_helper_unittest.cc
index 98ae2a2..9d5da5d 100644
--- a/modules/audio_coding/neteq/dsp_helper_unittest.cc
+++ b/modules/audio_coding/neteq/dsp_helper_unittest.cc
@@ -30,8 +30,8 @@
int increment = (16384 << 6) / kLen;
// Test first method.
- int stop_factor = DspHelper::RampSignal(input, kLen, start_factor, increment,
- output);
+ int stop_factor =
+ DspHelper::RampSignal(input, kLen, start_factor, increment, output);
EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14.
for (int i = 0; i < kLen; ++i) {
EXPECT_EQ(1000 * i / kLen, output[i]);
@@ -63,8 +63,8 @@
// Q20, while the factor is in Q14, hence the shift by 6.
int increment = (16384 << 6) / kLen;
- int stop_factor = DspHelper::RampSignal(&input, start_index, kLen,
- start_factor, increment);
+ int stop_factor =
+ DspHelper::RampSignal(&input, start_index, kLen, start_factor, increment);
EXPECT_EQ(16383, stop_factor); // Almost reach 1 in Q14.
// Verify that the first |kLen| samples are left untouched.
int i;
diff --git a/modules/audio_coding/neteq/dtmf_buffer.cc b/modules/audio_coding/neteq/dtmf_buffer.cc
index 370de42..656cff9 100644
--- a/modules/audio_coding/neteq/dtmf_buffer.cc
+++ b/modules/audio_coding/neteq/dtmf_buffer.cc
@@ -98,9 +98,8 @@
// already in the buffer. If so, the new event is simply merged with the
// existing one.
int DtmfBuffer::InsertEvent(const DtmfEvent& event) {
- if (event.event_no < 0 || event.event_no > 15 ||
- event.volume < 0 || event.volume > 63 ||
- event.duration <= 0 || event.duration > 65535) {
+ if (event.event_no < 0 || event.event_no > 15 || event.volume < 0 ||
+ event.volume > 63 || event.duration <= 0 || event.duration > 65535) {
RTC_LOG(LS_WARNING) << "InsertEvent invalid parameters";
return kInvalidEventParameters;
}
@@ -142,8 +141,8 @@
#endif
}
}
- if (current_timestamp >= it->timestamp
- && current_timestamp <= event_end) { // TODO(hlundin): Change to <.
+ if (current_timestamp >= it->timestamp &&
+ current_timestamp <= event_end) { // TODO(hlundin): Change to <.
// Found a matching event.
if (event) {
event->event_no = it->event_no;
@@ -153,16 +152,15 @@
event->timestamp = it->timestamp;
}
#ifdef LEGACY_BITEXACT
- if (it->end_bit &&
- current_timestamp + frame_len_samples_ >= event_end) {
+ if (it->end_bit && current_timestamp + frame_len_samples_ >= event_end) {
// We are done playing this. Erase the event.
buffer_.erase(it);
}
#endif
return true;
} else if (current_timestamp > event_end) { // TODO(hlundin): Change to >=.
- // Erase old event. Operation returns a valid pointer to the next element
- // in the list.
+// Erase old event. Operation returns a valid pointer to the next element
+// in the list.
#ifdef LEGACY_BITEXACT
if (!next_available) {
if (event) {
@@ -196,10 +194,7 @@
}
int DtmfBuffer::SetSampleRate(int fs_hz) {
- if (fs_hz != 8000 &&
- fs_hz != 16000 &&
- fs_hz != 32000 &&
- fs_hz != 48000) {
+ if (fs_hz != 8000 && fs_hz != 16000 && fs_hz != 32000 && fs_hz != 48000) {
return kInvalidSampleRate;
}
max_extrapolation_samples_ = 7 * fs_hz / 100;
diff --git a/modules/audio_coding/neteq/dtmf_buffer.h b/modules/audio_coding/neteq/dtmf_buffer.h
index 87a5655..1035e87 100644
--- a/modules/audio_coding/neteq/dtmf_buffer.h
+++ b/modules/audio_coding/neteq/dtmf_buffer.h
@@ -28,19 +28,9 @@
// Constructors
DtmfEvent()
- : timestamp(0),
- event_no(0),
- volume(0),
- duration(0),
- end_bit(false) {
- }
+ : timestamp(0), event_no(0), volume(0), duration(0), end_bit(false) {}
DtmfEvent(uint32_t ts, int ev, int vol, int dur, bool end)
- : timestamp(ts),
- event_no(ev),
- volume(vol),
- duration(dur),
- end_bit(end) {
- }
+ : timestamp(ts), event_no(ev), volume(vol), duration(dur), end_bit(end) {}
};
// This is the buffer holding DTMF events while waiting for them to be played.
diff --git a/modules/audio_coding/neteq/dtmf_buffer_unittest.cc b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
index 7bcf1e0..607a5ec 100644
--- a/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
+++ b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
@@ -31,11 +31,11 @@
static uint32_t MakeDtmfPayload(int event, bool end, int volume, int duration) {
uint32_t payload = 0;
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | event |E|R| volume | duration |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ // | event |E|R| volume | duration |
+ // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
payload |= (event & 0x00FF) << 24;
payload |= (end ? 0x00800000 : 0x00000000);
payload |= (volume & 0x003F) << 16;
@@ -44,13 +44,10 @@
return payload;
}
-static bool EqualEvents(const DtmfEvent& a,
- const DtmfEvent& b) {
- return (a.duration == b.duration
- && a.end_bit == b.end_bit
- && a.event_no == b.event_no
- && a.timestamp == b.timestamp
- && a.volume == b.volume);
+static bool EqualEvents(const DtmfEvent& a, const DtmfEvent& b) {
+ return (a.duration == b.duration && a.end_bit == b.end_bit &&
+ a.event_no == b.event_no && a.timestamp == b.timestamp &&
+ a.volume == b.volume);
}
TEST(DtmfBuffer, CreateAndDestroy) {
@@ -68,9 +65,8 @@
uint32_t payload = MakeDtmfPayload(event_no, end_bit, volume, duration);
uint8_t* payload_ptr = reinterpret_cast<uint8_t*>(&payload);
DtmfEvent event;
- EXPECT_EQ(DtmfBuffer::kOK,
- DtmfBuffer::ParseEvent(timestamp, payload_ptr, sizeof(payload),
- &event));
+ EXPECT_EQ(DtmfBuffer::kOK, DtmfBuffer::ParseEvent(timestamp, payload_ptr,
+ sizeof(payload), &event));
EXPECT_EQ(duration, event.duration);
EXPECT_EQ(end_bit, event.end_bit);
EXPECT_EQ(event_no, event.event_no);
@@ -107,7 +103,7 @@
EXPECT_TRUE(EqualEvents(event, out_event));
EXPECT_EQ(1u, buffer.Length());
EXPECT_FALSE(buffer.Empty());
- // Give a "current" timestamp after the event has ended.
+// Give a "current" timestamp after the event has ended.
#ifdef LEGACY_BITEXACT
EXPECT_TRUE(buffer.GetEvent(timestamp + duration + 10, &out_event));
#endif
@@ -171,17 +167,17 @@
// Expect to get the long event.
EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
EXPECT_TRUE(EqualEvents(long_event, out_event));
- // Expect no more events.
+// Expect no more events.
#ifdef LEGACY_BITEXACT
- EXPECT_TRUE(buffer.GetEvent(timestamp + long_event.duration + 10,
- &out_event));
+ EXPECT_TRUE(
+ buffer.GetEvent(timestamp + long_event.duration + 10, &out_event));
EXPECT_TRUE(EqualEvents(long_event, out_event));
- EXPECT_TRUE(buffer.GetEvent(timestamp + long_event.duration + 10,
- &out_event));
+ EXPECT_TRUE(
+ buffer.GetEvent(timestamp + long_event.duration + 10, &out_event));
EXPECT_TRUE(EqualEvents(short_event, out_event));
#else
- EXPECT_FALSE(buffer.GetEvent(timestamp + long_event.duration + 10,
- &out_event));
+ EXPECT_FALSE(
+ buffer.GetEvent(timestamp + long_event.duration + 10, &out_event));
#endif
EXPECT_TRUE(buffer.Empty());
}
diff --git a/modules/audio_coding/neteq/dtmf_tone_generator.cc b/modules/audio_coding/neteq/dtmf_tone_generator.cc
index b848c60..6fdb95a 100644
--- a/modules/audio_coding/neteq/dtmf_tone_generator.cc
+++ b/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -39,72 +39,69 @@
// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
// Values are in Q14.
const int DtmfToneGenerator::kCoeff1[4][16] = {
- { 24219, 27980, 27980, 27980, 26956, 26956, 26956, 25701, 25701, 25701,
- 24219, 24219, 27980, 26956, 25701, 24219 },
- { 30556, 31548, 31548, 31548, 31281, 31281, 31281, 30951, 30951, 30951,
- 30556, 30556, 31548, 31281, 30951, 30556 },
- { 32210, 32462, 32462, 32462, 32394, 32394, 32394, 32311, 32311, 32311,
- 32210, 32210, 32462, 32394, 32311, 32210 },
- { 32520, 32632, 32632, 32632, 32602, 32602, 32602, 32564, 32564, 32564,
- 32520, 32520, 32632, 32602, 32564, 32520 } };
+ {24219, 27980, 27980, 27980, 26956, 26956, 26956, 25701, 25701, 25701,
+ 24219, 24219, 27980, 26956, 25701, 24219},
+ {30556, 31548, 31548, 31548, 31281, 31281, 31281, 30951, 30951, 30951,
+ 30556, 30556, 31548, 31281, 30951, 30556},
+ {32210, 32462, 32462, 32462, 32394, 32394, 32394, 32311, 32311, 32311,
+ 32210, 32210, 32462, 32394, 32311, 32210},
+ {32520, 32632, 32632, 32632, 32602, 32602, 32602, 32564, 32564, 32564,
+ 32520, 32520, 32632, 32602, 32564, 32520}};
// The filter coefficient a = 2*cos(2*pi*f/fs) for the high frequency tone, for
// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
// Values are in Q14.
const int DtmfToneGenerator::kCoeff2[4][16] = {
- { 16325, 19073, 16325, 13085, 19073, 16325, 13085, 19073, 16325, 13085,
- 19073, 13085, 9315, 9315, 9315, 9315},
- { 28361, 29144, 28361, 27409, 29144, 28361, 27409, 29144, 28361, 27409,
- 29144, 27409, 26258, 26258, 26258, 26258},
- { 31647, 31849, 31647, 31400, 31849, 31647, 31400, 31849, 31647, 31400,
- 31849, 31400, 31098, 31098, 31098, 31098},
- { 32268, 32359, 32268, 32157, 32359, 32268, 32157, 32359, 32268, 32157,
- 32359, 32157, 32022, 32022, 32022, 32022} };
+ {16325, 19073, 16325, 13085, 19073, 16325, 13085, 19073, 16325, 13085,
+ 19073, 13085, 9315, 9315, 9315, 9315},
+ {28361, 29144, 28361, 27409, 29144, 28361, 27409, 29144, 28361, 27409,
+ 29144, 27409, 26258, 26258, 26258, 26258},
+ {31647, 31849, 31647, 31400, 31849, 31647, 31400, 31849, 31647, 31400,
+ 31849, 31400, 31098, 31098, 31098, 31098},
+ {32268, 32359, 32268, 32157, 32359, 32268, 32157, 32359, 32268, 32157,
+ 32359, 32157, 32022, 32022, 32022, 32022}};
// The initialization value x[-2] = sin(2*pi*f/fs) for the low frequency tone,
// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
// Values are in Q14.
const int DtmfToneGenerator::kInitValue1[4][16] = {
- { 11036, 8528, 8528, 8528, 9315, 9315, 9315, 10163, 10163, 10163, 11036,
- 11036, 8528, 9315, 10163, 11036},
- { 5918, 4429, 4429, 4429, 4879, 4879, 4879, 5380, 5380, 5380, 5918, 5918,
- 4429, 4879, 5380, 5918},
- { 3010, 2235, 2235, 2235, 2468, 2468, 2468, 2728, 2728, 2728, 3010, 3010,
- 2235, 2468, 2728, 3010},
- { 2013, 1493, 1493, 1493, 1649, 1649, 1649, 1823, 1823, 1823, 2013, 2013,
- 1493, 1649, 1823, 2013 } };
+ {11036, 8528, 8528, 8528, 9315, 9315, 9315, 10163, 10163, 10163, 11036,
+ 11036, 8528, 9315, 10163, 11036},
+ {5918, 4429, 4429, 4429, 4879, 4879, 4879, 5380, 5380, 5380, 5918, 5918,
+ 4429, 4879, 5380, 5918},
+ {3010, 2235, 2235, 2235, 2468, 2468, 2468, 2728, 2728, 2728, 3010, 3010,
+ 2235, 2468, 2728, 3010},
+ {2013, 1493, 1493, 1493, 1649, 1649, 1649, 1823, 1823, 1823, 2013, 2013,
+ 1493, 1649, 1823, 2013}};
// The initialization value x[-2] = sin(2*pi*f/fs) for the high frequency tone,
// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
// Values are in Q14.
const int DtmfToneGenerator::kInitValue2[4][16] = {
- { 14206, 13323, 14206, 15021, 13323, 14206, 15021, 13323, 14206, 15021,
- 13323, 15021, 15708, 15708, 15708, 15708},
- { 8207, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8979,
- 9801, 9801, 9801, 9801},
- { 4249, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4685,
- 5164, 5164, 5164, 5164},
- { 2851, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 3148,
- 3476, 3476, 3476, 3476} };
+ {14206, 13323, 14206, 15021, 13323, 14206, 15021, 13323, 14206, 15021,
+ 13323, 15021, 15708, 15708, 15708, 15708},
+ {8207, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8979,
+ 9801, 9801, 9801, 9801},
+ {4249, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4685,
+ 5164, 5164, 5164, 5164},
+ {2851, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 3148,
+ 3476, 3476, 3476, 3476}};
// Amplitude multipliers for volume values 0 through 63, corresponding to
// 0 dBm0 through -63 dBm0. Values are in Q14.
// for a in range(0, 64):
// print round(16141.0 * 10**(-float(a)/20))
const int DtmfToneGenerator::kAmplitude[64] = {
- 16141, 14386, 12821, 11427, 10184, 9077, 8090, 7210, 6426, 5727, 5104, 4549,
- 4054, 3614, 3221, 2870, 2558, 2280, 2032, 1811, 1614, 1439, 1282, 1143,
- 1018, 908, 809, 721, 643, 573, 510, 455, 405, 361, 322, 287, 256, 228, 203,
- 181, 161, 144, 128, 114, 102, 91, 81, 72, 64, 57, 51, 45, 41, 36, 32, 29,
- 26, 23, 20, 18, 16, 14, 13, 11 };
+ 16141, 14386, 12821, 11427, 10184, 9077, 8090, 7210, 6426, 5727, 5104,
+ 4549, 4054, 3614, 3221, 2870, 2558, 2280, 2032, 1811, 1614, 1439,
+ 1282, 1143, 1018, 908, 809, 721, 643, 573, 510, 455, 405,
+ 361, 322, 287, 256, 228, 203, 181, 161, 144, 128, 114,
+ 102, 91, 81, 72, 64, 57, 51, 45, 41, 36, 32,
+ 29, 26, 23, 20, 18, 16, 14, 13, 11};
// Constructor.
DtmfToneGenerator::DtmfToneGenerator()
- : initialized_(false),
- coeff1_(0),
- coeff2_(0),
- amplitude_(0) {
-}
+ : initialized_(false), coeff1_(0), coeff2_(0), amplitude_(0) {}
// Initialize the DTMF generator with sample rate fs Hz (8000, 16000, 32000,
// 48000), event (0-15) and attenuation (0-36 dB).
@@ -170,8 +167,7 @@
}
// Generate num_samples of DTMF signal and write to |output|.
-int DtmfToneGenerator::Generate(size_t num_samples,
- AudioMultiVector* output) {
+int DtmfToneGenerator::Generate(size_t num_samples, AudioMultiVector* output) {
if (!initialized_) {
return kNotInitialized;
}
@@ -183,10 +179,10 @@
output->AssertSize(num_samples);
for (size_t i = 0; i < num_samples; ++i) {
// Use recursion formula y[n] = a * y[n - 1] - y[n - 2].
- int16_t temp_val_low = ((coeff1_ * sample_history1_[1] + 8192) >> 14)
- - sample_history1_[0];
- int16_t temp_val_high = ((coeff2_ * sample_history2_[1] + 8192) >> 14)
- - sample_history2_[0];
+ int16_t temp_val_low =
+ ((coeff1_ * sample_history1_[1] + 8192) >> 14) - sample_history1_[0];
+ int16_t temp_val_high =
+ ((coeff2_ * sample_history2_[1] + 8192) >> 14) - sample_history2_[0];
// Update recursion memory.
sample_history1_[0] = sample_history1_[1];
diff --git a/modules/audio_coding/neteq/dtmf_tone_generator.h b/modules/audio_coding/neteq/dtmf_tone_generator.h
index faad6a2..b91d221 100644
--- a/modules/audio_coding/neteq/dtmf_tone_generator.h
+++ b/modules/audio_coding/neteq/dtmf_tone_generator.h
@@ -37,7 +37,7 @@
static const int kCoeff2[4][16]; // 2nd oscillator model coefficient table.
static const int kInitValue1[4][16]; // Initialization for 1st oscillator.
static const int kInitValue2[4][16]; // Initialization for 2nd oscillator.
- static const int kAmplitude[64]; // Amplitude for 0 through -63 dBm0.
+ static const int kAmplitude[64]; // Amplitude for 0 through -63 dBm0.
static const int16_t kAmpMultiplier = 23171; // 3 dB attenuation (in Q15).
bool initialized_; // True if generator is initialized properly.
diff --git a/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc b/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
index 8c22fe5..11a0ac6 100644
--- a/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
+++ b/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
@@ -84,8 +84,7 @@
// Verify that the attenuation is correct.
for (int channel = 0; channel < channels; ++channel) {
EXPECT_NEAR(attenuation_factor * ref_signal[channel][n],
- signal[channel][n],
- 2);
+ signal[channel][n], 2);
}
}
diff --git a/modules/audio_coding/neteq/expand.cc b/modules/audio_coding/neteq/expand.cc
index 73e8d07..5f671ad 100644
--- a/modules/audio_coding/neteq/expand.cc
+++ b/modules/audio_coding/neteq/expand.cc
@@ -14,7 +14,7 @@
#include <string.h> // memset
#include <algorithm> // min, max
-#include <limits> // numeric_limits<T>
+#include <limits> // numeric_limits<T>
#include "common_audio/signal_processing/include/signal_processing_library.h"
#include "modules/audio_coding/neteq/background_noise.h"
@@ -94,7 +94,6 @@
GenerateRandomVector(2, rand_length, random_vector);
}
-
// Generate signal.
UpdateLagIndex();
@@ -103,8 +102,8 @@
size_t expansion_vector_length = max_lag_ + overlap_length_;
size_t current_lag = expand_lags_[current_lag_index_];
// Copy lag+overlap data.
- size_t expansion_vector_position = expansion_vector_length - current_lag -
- overlap_length_;
+ size_t expansion_vector_position =
+ expansion_vector_length - current_lag - overlap_length_;
size_t temp_length = current_lag + overlap_length_;
for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
ChannelParameters& parameters = channel_parameters_[channel_ix];
@@ -175,8 +174,10 @@
// Do overlap add between new vector and overlap.
(*sync_buffer_)[channel_ix][start_ix + i] =
(((*sync_buffer_)[channel_ix][start_ix + i] * muting_window) +
- (((parameters.mute_factor * voiced_vector_storage[i]) >> 14) *
- unmuting_window) + 16384) >> 15;
+ (((parameters.mute_factor * voiced_vector_storage[i]) >> 14) *
+ unmuting_window) +
+ 16384) >>
+ 15;
muting_window += muting_window_increment;
unmuting_window += unmuting_window_increment;
}
@@ -188,10 +189,10 @@
// parameters.expand_vector0 and parameters.expand_vector1 no longer
// match with expand_lags_, causing invalid reads and writes. Is it a good
// idea to enable this again, and solve the vector size problem?
-// max_lag_ = fs_mult * 120;
-// expand_lags_[0] = fs_mult * 120;
-// expand_lags_[1] = fs_mult * 120;
-// expand_lags_[2] = fs_mult * 120;
+ // max_lag_ = fs_mult * 120;
+ // expand_lags_[0] = fs_mult * 120;
+ // expand_lags_[1] = fs_mult * 120;
+ // expand_lags_[2] = fs_mult * 120;
}
// Unvoiced part.
@@ -204,8 +205,7 @@
}
WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
parameters.ar_gain, add_constant,
- parameters.ar_gain_scale,
- current_lag);
+ parameters.ar_gain_scale, current_lag);
WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
parameters.ar_filter, kUnvoicedLpcOrder + 1,
current_lag);
@@ -230,8 +230,9 @@
// Create combined signal by shifting in more and more of unvoiced part.
temp_shift = 8 - temp_shift; // = getbits(mix_factor_increment).
- size_t temp_length = (parameters.current_voice_mix_factor -
- parameters.voice_mix_factor) >> temp_shift;
+ size_t temp_length =
+ (parameters.current_voice_mix_factor - parameters.voice_mix_factor) >>
+ temp_shift;
temp_length = std::min(temp_length, current_lag);
DspHelper::CrossFade(voiced_vector, unvoiced_vector, temp_length,
¶meters.current_voice_mix_factor,
@@ -266,9 +267,8 @@
// Mute segment according to slope value.
if ((consecutive_expands_ != 0) || !parameters.onset) {
// Mute to the previous level, then continue with the muting.
- WebRtcSpl_AffineTransformVector(temp_data, temp_data,
- parameters.mute_factor, 8192,
- 14, current_lag);
+ WebRtcSpl_AffineTransformVector(
+ temp_data, temp_data, parameters.mute_factor, 8192, 14, current_lag);
if (!stop_muting_) {
DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
@@ -276,8 +276,8 @@
// Shift by 6 to go from Q20 to Q14.
// TODO(hlundin): Adding 8192 before shifting 6 steps seems wrong.
// Legacy.
- int16_t gain = static_cast<int16_t>(16384 -
- (((current_lag * parameters.mute_slope) + 8192) >> 6));
+ int16_t gain = static_cast<int16_t>(
+ 16384 - (((current_lag * parameters.mute_slope) + 8192) >> 6));
gain = ((gain * parameters.mute_factor) + 8192) >> 14;
// Guard against getting stuck with very small (but sometimes audible)
@@ -291,12 +291,9 @@
}
// Background noise part.
- GenerateBackgroundNoise(random_vector,
- channel_ix,
- channel_parameters_[channel_ix].mute_slope,
- TooManyExpands(),
- current_lag,
- unvoiced_array_memory);
+ GenerateBackgroundNoise(
+ random_vector, channel_ix, channel_parameters_[channel_ix].mute_slope,
+ TooManyExpands(), current_lag, unvoiced_array_memory);
// Add background noise to the combined voiced-unvoiced signal.
for (size_t i = 0; i < current_lag; i++) {
@@ -311,8 +308,9 @@
}
// Increase call number and cap it.
- consecutive_expands_ = consecutive_expands_ >= kMaxConsecutiveExpands ?
- kMaxConsecutiveExpands : consecutive_expands_ + 1;
+ consecutive_expands_ = consecutive_expands_ >= kMaxConsecutiveExpands
+ ? kMaxConsecutiveExpands
+ : consecutive_expands_ + 1;
expand_duration_samples_ += output->Size();
// Clamp the duration counter at 2 seconds.
expand_duration_samples_ = std::min(expand_duration_samples_,
@@ -329,7 +327,7 @@
}
void Expand::SetParametersForMergeAfterExpand() {
- current_lag_index_ = -1; /* out of the 3 possible ones */
+ current_lag_index_ = -1; /* out of the 3 possible ones */
lag_index_direction_ = 1; /* make sure we get the "optimal" lag */
stop_muting_ = true;
}
@@ -357,7 +355,7 @@
consecutive_expands_ = 0;
for (size_t ix = 0; ix < num_channels_; ++ix) {
channel_parameters_[ix].current_voice_mix_factor = 16384; // 1.0 in Q14.
- channel_parameters_[ix].mute_factor = 16384; // 1.0 in Q14.
+ channel_parameters_[ix].mute_factor = 16384; // 1.0 in Q14.
// Start with 0 gain for background noise.
background_noise_->SetMuteFactor(ix, 0);
}
@@ -420,10 +418,10 @@
// Calculate distortion around the |kNumCorrelationCandidates| best lags.
int distortion_scale = 0;
for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
- size_t min_index = std::max(fs_mult_20,
- best_correlation_index[i] - fs_mult_4);
- size_t max_index = std::min(fs_mult_120 - 1,
- best_correlation_index[i] + fs_mult_4);
+ size_t min_index =
+ std::max(fs_mult_20, best_correlation_index[i] - fs_mult_4);
+ size_t max_index =
+ std::min(fs_mult_120 - 1, best_correlation_index[i] + fs_mult_4);
best_distortion_index[i] = DspHelper::MinDistortion(
&(audio_history[signal_length - fs_mult_dist_len]), min_index,
max_index, fs_mult_dist_len, &best_distortion_w32[i]);
@@ -459,23 +457,23 @@
// Calculate the exact best correlation in the range between
// |correlation_lag| and |distortion_lag|.
- correlation_length =
- std::max(std::min(distortion_lag + 10, fs_mult_120),
- static_cast<size_t>(60 * fs_mult));
+ correlation_length = std::max(std::min(distortion_lag + 10, fs_mult_120),
+ static_cast<size_t>(60 * fs_mult));
size_t start_index = std::min(distortion_lag, correlation_lag);
size_t correlation_lags = static_cast<size_t>(
- WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1);
+ WEBRTC_SPL_ABS_W16((distortion_lag - correlation_lag)) + 1);
assert(correlation_lags <= static_cast<size_t>(99 * fs_mult + 1));
for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
ChannelParameters& parameters = channel_parameters_[channel_ix];
// Calculate suitable scaling.
int16_t signal_max = WebRtcSpl_MaxAbsValueW16(
- &audio_history[signal_length - correlation_length - start_index
- - correlation_lags],
- correlation_length + start_index + correlation_lags - 1);
- int correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
+ &audio_history[signal_length - correlation_length - start_index -
+ correlation_lags],
+ correlation_length + start_index + correlation_lags - 1);
+ int correlation_scale =
+ (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
(31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
correlation_scale = std::max(0, correlation_scale);
@@ -520,8 +518,8 @@
// Calculate max_correlation / sqrt(energy1 * energy2) in Q14.
int cc_shift = 14 - (energy1_scale + energy2_scale) / 2;
max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift);
- corr_coefficient = WebRtcSpl_DivW32W16(max_correlation,
- sqrt_energy_product);
+ corr_coefficient =
+ WebRtcSpl_DivW32W16(max_correlation, sqrt_energy_product);
// Cap at 1.0 in Q14.
corr_coefficient = std::min(16384, corr_coefficient);
} else {
@@ -547,9 +545,9 @@
int32_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
int32_t scaled_energy1 = scaled_energy2 - 13;
// Calculate scaled_energy1 / scaled_energy2 in Q13.
- int32_t energy_ratio = WebRtcSpl_DivW32W16(
- WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
- static_cast<int16_t>(energy2 >> scaled_energy2));
+ int32_t energy_ratio =
+ WebRtcSpl_DivW32W16(WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
+ static_cast<int16_t>(energy2 >> scaled_energy2));
// Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
amplitude_ratio =
static_cast<int16_t>(WebRtcSpl_SqrtFloor(energy_ratio << 13));
@@ -558,16 +556,13 @@
parameters.expand_vector0.PushBack(vector1, expansion_length);
parameters.expand_vector1.Clear();
if (parameters.expand_vector1.Size() < expansion_length) {
- parameters.expand_vector1.Extend(
- expansion_length - parameters.expand_vector1.Size());
+ parameters.expand_vector1.Extend(expansion_length -
+ parameters.expand_vector1.Size());
}
std::unique_ptr<int16_t[]> temp_1(new int16_t[expansion_length]);
- WebRtcSpl_AffineTransformVector(temp_1.get(),
- const_cast<int16_t*>(vector2),
- amplitude_ratio,
- 4096,
- 13,
- expansion_length);
+ WebRtcSpl_AffineTransformVector(
+ temp_1.get(), const_cast<int16_t*>(vector2), amplitude_ratio, 4096,
+ 13, expansion_length);
parameters.expand_vector1.OverwriteAt(temp_1.get(), expansion_length, 0);
} else {
// Energy change constraint not fulfilled. Only use last vector.
@@ -606,11 +601,11 @@
// Calculate the LPC and the gain of the filters.
// Calculate kUnvoicedLpcOrder + 1 lags of the auto-correlation function.
- size_t temp_index = signal_length - fs_mult_lpc_analysis_len -
- kUnvoicedLpcOrder;
+ size_t temp_index =
+ signal_length - fs_mult_lpc_analysis_len - kUnvoicedLpcOrder;
// Copy signal to temporary vector to be able to pad with leading zeros.
- int16_t* temp_signal = new int16_t[fs_mult_lpc_analysis_len
- + kUnvoicedLpcOrder];
+ int16_t* temp_signal =
+ new int16_t[fs_mult_lpc_analysis_len + kUnvoicedLpcOrder];
memset(temp_signal, 0,
sizeof(int16_t) * (fs_mult_lpc_analysis_len + kUnvoicedLpcOrder));
memcpy(&temp_signal[kUnvoicedLpcOrder],
@@ -619,16 +614,15 @@
CrossCorrelationWithAutoShift(
&temp_signal[kUnvoicedLpcOrder], &temp_signal[kUnvoicedLpcOrder],
fs_mult_lpc_analysis_len, kUnvoicedLpcOrder + 1, -1, auto_correlation);
- delete [] temp_signal;
+ delete[] temp_signal;
// Verify that variance is positive.
if (auto_correlation[0] > 0) {
// Estimate AR filter parameters using Levinson-Durbin algorithm;
// kUnvoicedLpcOrder + 1 filter coefficients.
- int16_t stability = WebRtcSpl_LevinsonDurbin(auto_correlation,
- parameters.ar_filter,
- reflection_coeff,
- kUnvoicedLpcOrder);
+ int16_t stability =
+ WebRtcSpl_LevinsonDurbin(auto_correlation, parameters.ar_filter,
+ reflection_coeff, kUnvoicedLpcOrder);
// Keep filter parameters only if filter is stable.
if (stability != 1) {
@@ -671,10 +665,8 @@
&(audio_history[signal_length - 128 - kUnvoicedLpcOrder]),
sizeof(int16_t) * kUnvoicedLpcOrder);
WebRtcSpl_FilterMAFastQ12(&audio_history[signal_length - 128],
- unvoiced_vector,
- parameters.ar_filter,
- kUnvoicedLpcOrder + 1,
- 128);
+ unvoiced_vector, parameters.ar_filter,
+ kUnvoicedLpcOrder + 1, 128);
const int unvoiced_max_abs = [&] {
const int16_t max_abs = WebRtcSpl_MaxAbsValueW16(unvoiced_vector, 128);
// Since WebRtcSpl_MaxAbsValueW16 returns 2^15 - 1 when the input contains
@@ -689,10 +681,8 @@
int unvoiced_prescale =
std::max(0, 2 * WebRtcSpl_GetSizeInBits(unvoiced_max_abs) - 24);
- int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(unvoiced_vector,
- unvoiced_vector,
- 128,
- unvoiced_prescale);
+ int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(
+ unvoiced_vector, unvoiced_vector, 128, unvoiced_prescale);
// Normalize |unvoiced_energy| to 28 or 29 bits to preserve sqrt() accuracy.
int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3;
@@ -703,8 +693,8 @@
unvoiced_energy = WEBRTC_SPL_SHIFT_W32(unvoiced_energy, unvoiced_scale);
int16_t unvoiced_gain =
static_cast<int16_t>(WebRtcSpl_SqrtFloor(unvoiced_energy));
- parameters.ar_gain_scale = 13
- + (unvoiced_scale + 7 - unvoiced_prescale) / 2;
+ parameters.ar_gain_scale =
+ 13 + (unvoiced_scale + 7 - unvoiced_prescale) / 2;
parameters.ar_gain = unvoiced_gain;
// Calculate voice_mix_factor from corr_coefficient.
@@ -717,17 +707,17 @@
int16_t x1, x2, x3;
// |corr_coefficient| is in Q14.
x1 = static_cast<int16_t>(corr_coefficient);
- x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14.
+ x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14.
x3 = (x1 * x2) >> 14;
- static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 };
+ static const int kCoefficients[4] = {-5179, 19931, -16422, 5776};
int32_t temp_sum = kCoefficients[0] * 16384;
temp_sum += kCoefficients[1] * x1;
temp_sum += kCoefficients[2] * x2;
temp_sum += kCoefficients[3] * x3;
parameters.voice_mix_factor =
static_cast<int16_t>(std::min(temp_sum / 4096, 16384));
- parameters.voice_mix_factor = std::max(parameters.voice_mix_factor,
- static_cast<int16_t>(0));
+ parameters.voice_mix_factor =
+ std::max(parameters.voice_mix_factor, static_cast<int16_t>(0));
} else {
parameters.voice_mix_factor = 0;
}
@@ -816,8 +806,8 @@
static const size_t kNumCorrelationLags = 54;
static const size_t kCorrelationLength = 60;
// Downsample to 4 kHz sample rate.
- static const size_t kDownsampledLength = kCorrelationStartLag
- + kNumCorrelationLags + kCorrelationLength;
+ static const size_t kDownsampledLength =
+ kCorrelationStartLag + kNumCorrelationLags + kCorrelationLength;
int16_t downsampled_input[kDownsampledLength];
static const size_t kFilterDelay = 0;
WebRtcSpl_DownsampleFast(
@@ -827,8 +817,8 @@
downsampling_factor, kFilterDelay);
// Normalize |downsampled_input| to using all 16 bits.
- int16_t max_value = WebRtcSpl_MaxAbsValueW16(downsampled_input,
- kDownsampledLength);
+ int16_t max_value =
+ WebRtcSpl_MaxAbsValueW16(downsampled_input, kDownsampledLength);
int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value);
WebRtcSpl_VectorBitShiftW16(downsampled_input, kDownsampledLength,
downsampled_input, norm_shift);
@@ -836,13 +826,13 @@
int32_t correlation[kNumCorrelationLags];
CrossCorrelationWithAutoShift(
&downsampled_input[kDownsampledLength - kCorrelationLength],
- &downsampled_input[kDownsampledLength - kCorrelationLength
- - kCorrelationStartLag],
+ &downsampled_input[kDownsampledLength - kCorrelationLength -
+ kCorrelationStartLag],
kCorrelationLength, kNumCorrelationLags, -1, correlation);
// Normalize and move data from 32-bit to 16-bit vector.
- int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
- kNumCorrelationLags);
+ int32_t max_correlation =
+ WebRtcSpl_MaxAbsValueW32(correlation, kNumCorrelationLags);
int16_t norm_shift2 = static_cast<int16_t>(
std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
@@ -894,19 +884,15 @@
// Scale random vector to correct energy level.
WebRtcSpl_AffineTransformVector(
- scaled_random_vector, random_vector,
- background_noise_->Scale(channel), dc_offset,
- background_noise_->ScaleShift(channel),
- num_noise_samples);
+ scaled_random_vector, random_vector, background_noise_->Scale(channel),
+ dc_offset, background_noise_->ScaleShift(channel), num_noise_samples);
WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
background_noise_->Filter(channel),
- kNoiseLpcOrder + 1,
- num_noise_samples);
+ kNoiseLpcOrder + 1, num_noise_samples);
background_noise_->SetFilterState(
- channel,
- &(noise_samples[num_noise_samples - kNoiseLpcOrder]),
+ channel, &(noise_samples[num_noise_samples - kNoiseLpcOrder]),
kNoiseLpcOrder);
// Unmute the background noise.
diff --git a/modules/audio_coding/neteq/expand.h b/modules/audio_coding/neteq/expand.h
index 4060bd7..2fd4fae 100644
--- a/modules/audio_coding/neteq/expand.h
+++ b/modules/audio_coding/neteq/expand.h
@@ -114,7 +114,7 @@
int16_t ar_filter_state[kUnvoicedLpcOrder];
int16_t ar_gain;
int16_t ar_gain_scale;
- int16_t voice_mix_factor; /* Q14 */
+ int16_t voice_mix_factor; /* Q14 */
int16_t current_voice_mix_factor; /* Q14 */
AudioVector expand_vector0;
AudioVector expand_vector1;
diff --git a/modules/audio_coding/neteq/include/neteq.h b/modules/audio_coding/neteq/include/neteq.h
index 6288aeb..273979b 100644
--- a/modules/audio_coding/neteq/include/neteq.h
+++ b/modules/audio_coding/neteq/include/neteq.h
@@ -33,25 +33,25 @@
class AudioDecoderFactory;
struct NetEqNetworkStatistics {
- uint16_t current_buffer_size_ms; // Current jitter buffer size in ms.
+ uint16_t current_buffer_size_ms; // Current jitter buffer size in ms.
uint16_t preferred_buffer_size_ms; // Target buffer size in ms.
- uint16_t jitter_peaks_found; // 1 if adding extra delay due to peaky
- // jitter; 0 otherwise.
- uint16_t packet_loss_rate; // Loss rate (network + late) in Q14.
- uint16_t expand_rate; // Fraction (of original stream) of synthesized
- // audio inserted through expansion (in Q14).
+ uint16_t jitter_peaks_found; // 1 if adding extra delay due to peaky
+ // jitter; 0 otherwise.
+ uint16_t packet_loss_rate; // Loss rate (network + late) in Q14.
+ uint16_t expand_rate; // Fraction (of original stream) of synthesized
+ // audio inserted through expansion (in Q14).
uint16_t speech_expand_rate; // Fraction (of original stream) of synthesized
// speech inserted through expansion (in Q14).
- uint16_t preemptive_rate; // Fraction of data inserted through pre-emptive
- // expansion (in Q14).
- uint16_t accelerate_rate; // Fraction of data removed through acceleration
- // (in Q14).
- uint16_t secondary_decoded_rate; // Fraction of data coming from FEC/RED
- // decoding (in Q14).
+ uint16_t preemptive_rate; // Fraction of data inserted through pre-emptive
+ // expansion (in Q14).
+ uint16_t accelerate_rate; // Fraction of data removed through acceleration
+ // (in Q14).
+ uint16_t secondary_decoded_rate; // Fraction of data coming from FEC/RED
+ // decoding (in Q14).
uint16_t secondary_discarded_rate; // Fraction of discarded FEC/RED data (in
// Q14).
- int32_t clockdrift_ppm; // Average clock-drift in parts-per-million
- // (positive or negative).
+ int32_t clockdrift_ppm; // Average clock-drift in parts-per-million
+ // (positive or negative).
size_t added_zero_samples; // Number of zero samples added in "off" mode.
// Statistics for packet waiting times, i.e., the time between a packet
// arrives until it is decoded.
@@ -104,11 +104,7 @@
absl::optional<AudioCodecPairId> codec_pair_id;
};
- enum ReturnCodes {
- kOK = 0,
- kFail = -1,
- kNotImplemented = -2
- };
+ enum ReturnCodes { kOK = 0, kFail = -1, kNotImplemented = -2 };
// Creates a new NetEq object, with parameters set in |config|. The |config|
// object will only have to be valid for the duration of the call to this
diff --git a/modules/audio_coding/neteq/merge.cc b/modules/audio_coding/neteq/merge.cc
index fb0bb0d..3c9ad19 100644
--- a/modules/audio_coding/neteq/merge.cc
+++ b/modules/audio_coding/neteq/merge.cc
@@ -43,10 +43,11 @@
Merge::~Merge() = default;
-size_t Merge::Process(int16_t* input, size_t input_length,
+size_t Merge::Process(int16_t* input,
+ size_t input_length,
AudioMultiVector* output) {
// TODO(hlundin): Change to an enumerator and skip assert.
- assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
+ assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
fs_hz_ == 48000);
assert(fs_hz_ <= kMaxSampleRate); // Should not be possible.
@@ -68,8 +69,8 @@
new int16_t[input_length_per_channel]);
std::unique_ptr<int16_t[]> expanded_channel(new int16_t[expanded_length]);
for (size_t channel = 0; channel < num_channels_; ++channel) {
- input_vector[channel].CopyTo(
- input_length_per_channel, 0, input_channel.get());
+ input_vector[channel].CopyTo(input_length_per_channel, 0,
+ input_channel.get());
expanded_[channel].CopyTo(expanded_length, 0, expanded_channel.get());
const int16_t new_mute_factor = std::min<int16_t>(
@@ -93,11 +94,11 @@
// Mute the new decoded data if needed (and unmute it linearly).
// This is the overlapping part of expanded_signal.
- size_t interpolation_length = std::min(
- kMaxCorrelationLength * fs_mult_,
- expanded_length - best_correlation_index);
- interpolation_length = std::min(interpolation_length,
- input_length_per_channel);
+ size_t interpolation_length =
+ std::min(kMaxCorrelationLength * fs_mult_,
+ expanded_length - best_correlation_index);
+ interpolation_length =
+ std::min(interpolation_length, input_length_per_channel);
RTC_DCHECK_LE(new_mute_factor, 16384);
int16_t mute_factor =
@@ -203,30 +204,28 @@
return required_length;
}
-int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
+int16_t Merge::SignalScaling(const int16_t* input,
+ size_t input_length,
const int16_t* expanded_signal) const {
// Adjust muting factor if new vector is more or less of the BGN energy.
const auto mod_input_length = rtc::SafeMin<size_t>(
64 * rtc::dchecked_cast<size_t>(fs_mult_), input_length);
const int16_t expanded_max =
WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
- int32_t factor = (expanded_max * expanded_max) /
- (std::numeric_limits<int32_t>::max() /
- static_cast<int32_t>(mod_input_length));
+ int32_t factor =
+ (expanded_max * expanded_max) / (std::numeric_limits<int32_t>::max() /
+ static_cast<int32_t>(mod_input_length));
const int expanded_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
- int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
- expanded_signal,
- mod_input_length,
- expanded_shift);
+ int32_t energy_expanded = WebRtcSpl_DotProductWithScale(
+ expanded_signal, expanded_signal, mod_input_length, expanded_shift);
// Calculate energy of input signal.
const int16_t input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
factor = (input_max * input_max) / (std::numeric_limits<int32_t>::max() /
- static_cast<int32_t>(mod_input_length));
+ static_cast<int32_t>(mod_input_length));
const int input_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
- int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input,
- mod_input_length,
- input_shift);
+ int32_t energy_input = WebRtcSpl_DotProductWithScale(
+ input, input, mod_input_length, input_shift);
// Align to the same Q-domain.
if (input_shift > expanded_shift) {
@@ -257,8 +256,10 @@
// TODO(hlundin): There are some parameter values in this method that seem
// strange. Compare with Expand::Correlation.
-void Merge::Downsample(const int16_t* input, size_t input_length,
- const int16_t* expanded_signal, size_t expanded_length) {
+void Merge::Downsample(const int16_t* input,
+ size_t input_length,
+ const int16_t* expanded_signal,
+ size_t expanded_length) {
const int16_t* filter_coefficients;
size_t num_coefficients;
int decimation_factor = fs_hz_ / 4000;
@@ -278,11 +279,10 @@
num_coefficients = 7;
}
size_t signal_offset = num_coefficients - 1;
- WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
- expanded_length - signal_offset,
- expanded_downsampled_, kExpandDownsampLength,
- filter_coefficients, num_coefficients,
- decimation_factor, kCompensateDelay);
+ WebRtcSpl_DownsampleFast(
+ &expanded_signal[signal_offset], expanded_length - signal_offset,
+ expanded_downsampled_, kExpandDownsampLength, filter_coefficients,
+ num_coefficients, decimation_factor, kCompensateDelay);
if (input_length <= length_limit) {
// Not quite long enough, so we have to cheat a bit.
// If the input is really short, we'll just use the input length as is, and
@@ -301,15 +301,15 @@
memset(&input_downsampled_[downsamp_temp_len], 0,
sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len));
} else {
- WebRtcSpl_DownsampleFast(&input[signal_offset],
- input_length - signal_offset, input_downsampled_,
- kInputDownsampLength, filter_coefficients,
- num_coefficients, decimation_factor,
- kCompensateDelay);
+ WebRtcSpl_DownsampleFast(
+ &input[signal_offset], input_length - signal_offset, input_downsampled_,
+ kInputDownsampLength, filter_coefficients, num_coefficients,
+ decimation_factor, kCompensateDelay);
}
}
-size_t Merge::CorrelateAndPeakSearch(size_t start_position, size_t input_length,
+size_t Merge::CorrelateAndPeakSearch(size_t start_position,
+ size_t input_length,
size_t expand_period) const {
// Calculate correlation without any normalization.
const size_t max_corr_length = kMaxCorrelationLength;
@@ -328,8 +328,8 @@
new int16_t[correlation_buffer_size]);
memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
int16_t* correlation_ptr = &correlation16[pad_length];
- int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
- stop_position_downsamp);
+ int32_t max_correlation =
+ WebRtcSpl_MaxAbsValueW32(correlation, stop_position_downsamp);
int norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation));
WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp,
correlation, norm_shift);
@@ -366,7 +366,7 @@
while (((best_correlation_index + input_length) <
(timestamps_per_call_ + expand_->overlap_length())) ||
((best_correlation_index + input_length) < start_position)) {
- assert(false); // Should never happen.
+ assert(false); // Should never happen.
best_correlation_index += expand_period; // Jump one lag ahead.
}
return best_correlation_index;
@@ -376,5 +376,4 @@
return fs_hz_ / 100 * num_channels_; // 10 ms.
}
-
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/merge.h b/modules/audio_coding/neteq/merge.h
index 6da0b4f..017e824 100644
--- a/modules/audio_coding/neteq/merge.h
+++ b/modules/audio_coding/neteq/merge.h
@@ -44,7 +44,8 @@
// (interleaved). The result is written to |output|. The number of channels
// allocated in |output| defines the number of channels that will be used when
// de-interleaving |input|.
- virtual size_t Process(int16_t* input, size_t input_length,
+ virtual size_t Process(int16_t* input,
+ size_t input_length,
AudioMultiVector* output);
virtual size_t RequiredFutureSamples();
@@ -68,19 +69,23 @@
// Analyzes |input| and |expanded_signal| and returns muting factor (Q14) to
// be used on the new data.
- int16_t SignalScaling(const int16_t* input, size_t input_length,
+ int16_t SignalScaling(const int16_t* input,
+ size_t input_length,
const int16_t* expanded_signal) const;
// Downsamples |input| (|input_length| samples) and |expanded_signal| to
// 4 kHz sample rate. The downsampled signals are written to
// |input_downsampled_| and |expanded_downsampled_|, respectively.
- void Downsample(const int16_t* input, size_t input_length,
- const int16_t* expanded_signal, size_t expanded_length);
+ void Downsample(const int16_t* input,
+ size_t input_length,
+ const int16_t* expanded_signal,
+ size_t expanded_length);
// Calculates cross-correlation between |input_downsampled_| and
// |expanded_downsampled_|, and finds the correlation maximum. The maximizing
// lag is returned.
- size_t CorrelateAndPeakSearch(size_t start_position, size_t input_length,
+ size_t CorrelateAndPeakSearch(size_t start_position,
+ size_t input_length,
size_t expand_period) const;
const int fs_mult_; // fs_hz_ / 8000.
diff --git a/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h b/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
index f662fb6..bf9fd59 100644
--- a/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
+++ b/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
@@ -20,17 +20,14 @@
class MockBufferLevelFilter : public BufferLevelFilter {
public:
virtual ~MockBufferLevelFilter() { Die(); }
- MOCK_METHOD0(Die,
- void());
- MOCK_METHOD0(Reset,
- void());
+ MOCK_METHOD0(Die, void());
+ MOCK_METHOD0(Reset, void());
MOCK_METHOD3(Update,
- void(size_t buffer_size_packets, int time_stretched_samples,
- size_t packet_len_samples));
- MOCK_METHOD1(SetTargetBufferLevel,
- void(int target_buffer_level));
- MOCK_CONST_METHOD0(filtered_current_level,
- int());
+ void(size_t buffer_size_packets,
+ int time_stretched_samples,
+ size_t packet_len_samples));
+ MOCK_METHOD1(SetTargetBufferLevel, void(int target_buffer_level));
+ MOCK_CONST_METHOD0(filtered_current_level, int());
};
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/mock/mock_decoder_database.h b/modules/audio_coding/neteq/mock/mock_decoder_database.h
index 3d57edd..b1d8151 100644
--- a/modules/audio_coding/neteq/mock/mock_decoder_database.h
+++ b/modules/audio_coding/neteq/mock/mock_decoder_database.h
@@ -26,15 +26,13 @@
: DecoderDatabase(factory, absl::nullopt) {}
virtual ~MockDecoderDatabase() { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_CONST_METHOD0(Empty,
- bool());
- MOCK_CONST_METHOD0(Size,
- int());
- MOCK_METHOD0(Reset,
- void());
+ MOCK_CONST_METHOD0(Empty, bool());
+ MOCK_CONST_METHOD0(Size, int());
+ MOCK_METHOD0(Reset, void());
MOCK_METHOD3(RegisterPayload,
- int(uint8_t rtp_payload_type, NetEqDecoder codec_type,
- const std::string& name));
+ int(uint8_t rtp_payload_type,
+ NetEqDecoder codec_type,
+ const std::string& name));
MOCK_METHOD2(RegisterPayload,
int(int rtp_payload_type, const SdpAudioFormat& audio_format));
MOCK_METHOD4(InsertExternal,
@@ -42,19 +40,15 @@
NetEqDecoder codec_type,
const std::string& codec_name,
AudioDecoder* decoder));
- MOCK_METHOD1(Remove,
- int(uint8_t rtp_payload_type));
+ MOCK_METHOD1(Remove, int(uint8_t rtp_payload_type));
MOCK_METHOD0(RemoveAll, void());
MOCK_CONST_METHOD1(GetDecoderInfo,
- const DecoderInfo*(uint8_t rtp_payload_type));
+ const DecoderInfo*(uint8_t rtp_payload_type));
MOCK_METHOD2(SetActiveDecoder,
- int(uint8_t rtp_payload_type, bool* new_decoder));
- MOCK_CONST_METHOD0(GetActiveDecoder,
- AudioDecoder*());
- MOCK_METHOD1(SetActiveCngDecoder,
- int(uint8_t rtp_payload_type));
- MOCK_CONST_METHOD0(GetActiveCngDecoder,
- ComfortNoiseDecoder*());
+ int(uint8_t rtp_payload_type, bool* new_decoder));
+ MOCK_CONST_METHOD0(GetActiveDecoder, AudioDecoder*());
+ MOCK_METHOD1(SetActiveCngDecoder, int(uint8_t rtp_payload_type));
+ MOCK_CONST_METHOD0(GetActiveCngDecoder, ComfortNoiseDecoder*());
};
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/mock/mock_delay_manager.h b/modules/audio_coding/neteq/mock/mock_delay_manager.h
index 61f209d..9b2ed49 100644
--- a/modules/audio_coding/neteq/mock/mock_delay_manager.h
+++ b/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -25,37 +25,25 @@
: DelayManager(max_packets_in_buffer, peak_detector, tick_timer) {}
virtual ~MockDelayManager() { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_CONST_METHOD0(iat_vector,
- const IATVector&());
+ MOCK_CONST_METHOD0(iat_vector, const IATVector&());
MOCK_METHOD3(Update,
- int(uint16_t sequence_number, uint32_t timestamp, int sample_rate_hz));
- MOCK_METHOD1(CalculateTargetLevel,
- int(int iat_packets));
- MOCK_METHOD1(SetPacketAudioLength,
- int(int length_ms));
- MOCK_METHOD0(Reset,
- void());
- MOCK_CONST_METHOD0(PeakFound,
- bool());
- MOCK_METHOD1(UpdateCounters,
- void(int elapsed_time_ms));
- MOCK_METHOD0(ResetPacketIatCount,
- void());
- MOCK_CONST_METHOD2(BufferLimits,
- void(int* lower_limit, int* higher_limit));
- MOCK_CONST_METHOD0(TargetLevel,
- int());
+ int(uint16_t sequence_number,
+ uint32_t timestamp,
+ int sample_rate_hz));
+ MOCK_METHOD1(CalculateTargetLevel, int(int iat_packets));
+ MOCK_METHOD1(SetPacketAudioLength, int(int length_ms));
+ MOCK_METHOD0(Reset, void());
+ MOCK_CONST_METHOD0(PeakFound, bool());
+ MOCK_METHOD1(UpdateCounters, void(int elapsed_time_ms));
+ MOCK_METHOD0(ResetPacketIatCount, void());
+ MOCK_CONST_METHOD2(BufferLimits, void(int* lower_limit, int* higher_limit));
+ MOCK_CONST_METHOD0(TargetLevel, int());
MOCK_METHOD0(RegisterEmptyPacket, void());
- MOCK_METHOD1(set_extra_delay_ms,
- void(int16_t delay));
- MOCK_CONST_METHOD0(base_target_level,
- int());
- MOCK_METHOD1(set_streaming_mode,
- void(bool value));
- MOCK_CONST_METHOD0(last_pack_cng_or_dtmf,
- int());
- MOCK_METHOD1(set_last_pack_cng_or_dtmf,
- void(int value));
+ MOCK_METHOD1(set_extra_delay_ms, void(int16_t delay));
+ MOCK_CONST_METHOD0(base_target_level, int());
+ MOCK_METHOD1(set_streaming_mode, void(bool value));
+ MOCK_CONST_METHOD0(last_pack_cng_or_dtmf, int());
+ MOCK_METHOD1(set_last_pack_cng_or_dtmf, void(int value));
};
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h b/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h
index 153a4d7..11b571f 100644
--- a/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h
+++ b/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h
@@ -22,16 +22,11 @@
MockDtmfBuffer(int fs) : DtmfBuffer(fs) {}
virtual ~MockDtmfBuffer() { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_METHOD0(Flush,
- void());
- MOCK_METHOD1(InsertEvent,
- int(const DtmfEvent& event));
- MOCK_METHOD2(GetEvent,
- bool(uint32_t current_timestamp, DtmfEvent* event));
- MOCK_CONST_METHOD0(Length,
- size_t());
- MOCK_CONST_METHOD0(Empty,
- bool());
+ MOCK_METHOD0(Flush, void());
+ MOCK_METHOD1(InsertEvent, int(const DtmfEvent& event));
+ MOCK_METHOD2(GetEvent, bool(uint32_t current_timestamp, DtmfEvent* event));
+ MOCK_CONST_METHOD0(Length, size_t());
+ MOCK_CONST_METHOD0(Empty, bool());
};
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h b/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
index 2cb5980..be4b7b5 100644
--- a/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
+++ b/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
@@ -21,14 +21,10 @@
public:
virtual ~MockDtmfToneGenerator() { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_METHOD3(Init,
- int(int fs, int event, int attenuation));
- MOCK_METHOD0(Reset,
- void());
- MOCK_METHOD2(Generate,
- int(size_t num_samples, AudioMultiVector* output));
- MOCK_CONST_METHOD0(initialized,
- bool());
+ MOCK_METHOD3(Init, int(int fs, int event, int attenuation));
+ MOCK_METHOD0(Reset, void());
+ MOCK_METHOD2(Generate, int(size_t num_samples, AudioMultiVector* output));
+ MOCK_CONST_METHOD0(initialized, bool());
};
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/mock/mock_expand.h b/modules/audio_coding/neteq/mock/mock_expand.h
index 05fdaec..aed0164 100644
--- a/modules/audio_coding/neteq/mock/mock_expand.h
+++ b/modules/audio_coding/neteq/mock/mock_expand.h
@@ -33,16 +33,11 @@
num_channels) {}
virtual ~MockExpand() { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_METHOD0(Reset,
- void());
- MOCK_METHOD1(Process,
- int(AudioMultiVector* output));
- MOCK_METHOD0(SetParametersForNormalAfterExpand,
- void());
- MOCK_METHOD0(SetParametersForMergeAfterExpand,
- void());
- MOCK_CONST_METHOD0(overlap_length,
- size_t());
+ MOCK_METHOD0(Reset, void());
+ MOCK_METHOD1(Process, int(AudioMultiVector* output));
+ MOCK_METHOD0(SetParametersForNormalAfterExpand, void());
+ MOCK_METHOD0(SetParametersForMergeAfterExpand, void());
+ MOCK_CONST_METHOD0(overlap_length, size_t());
};
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h b/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
index b315240..5aed6a9 100644
--- a/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
+++ b/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
@@ -75,17 +75,16 @@
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type));
- MOCK_CONST_METHOD0(HasDecodePlc,
- bool());
- MOCK_METHOD2(DecodePlc,
- size_t(size_t num_frames, int16_t* decoded));
+ MOCK_CONST_METHOD0(HasDecodePlc, bool());
+ MOCK_METHOD2(DecodePlc, size_t(size_t num_frames, int16_t* decoded));
MOCK_METHOD0(Reset, void());
MOCK_METHOD5(IncomingPacket,
- int(const uint8_t* payload, size_t payload_len,
- uint16_t rtp_sequence_number, uint32_t rtp_timestamp,
- uint32_t arrival_timestamp));
- MOCK_METHOD0(ErrorCode,
- int());
+ int(const uint8_t* payload,
+ size_t payload_len,
+ uint16_t rtp_sequence_number,
+ uint32_t rtp_timestamp,
+ uint32_t arrival_timestamp));
+ MOCK_METHOD0(ErrorCode, int());
int SampleRateHz() const /* override */ { return real_.SampleRateHz(); }
size_t Channels() const /* override */ { return real_.Channels(); }
diff --git a/modules/audio_coding/neteq/nack_tracker.h b/modules/audio_coding/neteq/nack_tracker.h
index 66383ce..1936a94 100644
--- a/modules/audio_coding/neteq/nack_tracker.h
+++ b/modules/audio_coding/neteq/nack_tracker.h
@@ -11,8 +11,8 @@
#ifndef MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
#define MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
-#include <vector>
#include <map>
+#include <vector>
#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "modules/include/module_common_types.h"
diff --git a/modules/audio_coding/neteq/neteq.cc b/modules/audio_coding/neteq/neteq.cc
index db12589..55af23e 100644
--- a/modules/audio_coding/neteq/neteq.cc
+++ b/modules/audio_coding/neteq/neteq.cc
@@ -27,14 +27,12 @@
std::string NetEq::Config::ToString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
- ss << "sample_rate_hz=" << sample_rate_hz
- << ", enable_post_decode_vad="
+ ss << "sample_rate_hz=" << sample_rate_hz << ", enable_post_decode_vad="
<< (enable_post_decode_vad ? "true" : "false")
<< ", max_packets_in_buffer=" << max_packets_in_buffer
- << ", playout_mode=" << playout_mode
- << ", enable_fast_accelerate="
- << (enable_fast_accelerate ? " true": "false")
- << ", enable_muted_state=" << (enable_muted_state ? " true": "false");
+ << ", playout_mode=" << playout_mode << ", enable_fast_accelerate="
+ << (enable_fast_accelerate ? " true" : "false")
+ << ", enable_muted_state=" << (enable_muted_state ? " true" : "false");
return ss.str();
}
diff --git a/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 03f5aa3..5c350bb 100644
--- a/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -55,8 +55,8 @@
}
virtual ~NetEqExternalDecoderUnitTest() {
- delete [] input_;
- delete [] encoded_;
+ delete[] input_;
+ delete[] encoded_;
// ~NetEqExternalDecoderTest() will delete |external_decoder_|, so expecting
// Die() to be called.
EXPECT_CALL(*external_decoder_, Die()).Times(1);
@@ -75,8 +75,8 @@
if (!input_file_->Read(frame_size_samples_, input_)) {
return -1;
}
- payload_size_bytes_ = WebRtcPcm16b_Encode(input_, frame_size_samples_,
- encoded_);
+ payload_size_bytes_ =
+ WebRtcPcm16b_Encode(input_, frame_size_samples_, encoded_);
int next_send_time = rtp_generator_->GetRtpHeader(
kPayloadType, frame_size_samples_, &rtp_header_);
@@ -111,9 +111,10 @@
uint32_t time_now = 0;
for (int k = 0; k < num_loops; ++k) {
while (time_now >= next_arrival_time) {
- InsertPacket(rtp_header_, rtc::ArrayView<const uint8_t>(
- encoded_, payload_size_bytes_),
- next_arrival_time);
+ InsertPacket(
+ rtp_header_,
+ rtc::ArrayView<const uint8_t>(encoded_, payload_size_bytes_),
+ next_arrival_time);
// Get next input packet.
do {
next_send_time = GetNewPacket();
@@ -148,6 +149,7 @@
}
int samples_per_ms() const { return samples_per_ms_; }
+
private:
std::unique_ptr<MockExternalPcm16B> external_decoder_;
int samples_per_ms_;
@@ -337,11 +339,9 @@
static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) > 0x7FFFFFFF,
"jump should be larger than half range");
// Replace the default RTP generator with one that jumps in timestamp.
- ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
- kStartSeqeunceNumber,
- kStartTimestamp,
- kJumpFromTimestamp,
- kJumpToTimestamp));
+ ResetRtpGenerator(new test::TimestampJumpRtpGenerator(
+ samples_per_ms(), kStartSeqeunceNumber, kStartTimestamp,
+ kJumpFromTimestamp, kJumpToTimestamp));
RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
EXPECT_EQ(kRecovered, test_state_);
@@ -361,11 +361,9 @@
static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) > 0x7FFFFFFF,
"jump should be larger than half range");
// Replace the default RTP generator with one that jumps in timestamp.
- ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
- kStartSeqeunceNumber,
- kStartTimestamp,
- kJumpFromTimestamp,
- kJumpToTimestamp));
+ ResetRtpGenerator(new test::TimestampJumpRtpGenerator(
+ samples_per_ms(), kStartSeqeunceNumber, kStartTimestamp,
+ kJumpFromTimestamp, kJumpToTimestamp));
RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
EXPECT_EQ(kRecovered, test_state_);
@@ -420,11 +418,9 @@
static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) < 0x7FFFFFFF,
"jump should be smaller than half range");
// Replace the default RTP generator with one that jumps in timestamp.
- ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
- kStartSeqeunceNumber,
- kStartTimestamp,
- kJumpFromTimestamp,
- kJumpToTimestamp));
+ ResetRtpGenerator(new test::TimestampJumpRtpGenerator(
+ samples_per_ms(), kStartSeqeunceNumber, kStartTimestamp,
+ kJumpFromTimestamp, kJumpToTimestamp));
RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
EXPECT_EQ(kRecovered, test_state_);
@@ -444,11 +440,9 @@
static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) < 0x7FFFFFFF,
"jump should be smaller than half range");
// Replace the default RTP generator with one that jumps in timestamp.
- ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
- kStartSeqeunceNumber,
- kStartTimestamp,
- kJumpFromTimestamp,
- kJumpToTimestamp));
+ ResetRtpGenerator(new test::TimestampJumpRtpGenerator(
+ samples_per_ms(), kStartSeqeunceNumber, kStartTimestamp,
+ kJumpFromTimestamp, kJumpToTimestamp));
RunTest(130); // Run 130 laps @ 10 ms each in the test loop.
EXPECT_EQ(kRecovered, test_state_);
diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc
index 40eae1b..afc15bf 100644
--- a/modules/audio_coding/neteq/neteq_impl.cc
+++ b/modules/audio_coding/neteq/neteq_impl.cc
@@ -681,8 +681,7 @@
decoder->IncomingPacket(packet_list.front().payload.data(),
packet_list.front().payload.size(),
packet_list.front().sequence_number,
- packet_list.front().timestamp,
- receive_timestamp);
+ packet_list.front().timestamp, receive_timestamp);
}
PacketList parsed_packet_list;
@@ -703,7 +702,7 @@
const auto sequence_number = packet.sequence_number;
const auto payload_type = packet.payload_type;
const Packet::Priority original_priority = packet.priority;
- auto packet_from_result = [&] (AudioDecoder::ParseResult& result) {
+ auto packet_from_result = [&](AudioDecoder::ParseResult& result) {
Packet new_packet;
new_packet.sequence_number = sequence_number;
new_packet.payload_type = payload_type;
@@ -788,8 +787,7 @@
assert(decoder_info);
if (decoder_info->SampleRateHz() != fs_hz_ ||
channels != algorithm_buffer_->Channels()) {
- SetSampleRateAndChannels(decoder_info->SampleRateHz(),
- channels);
+ SetSampleRateAndChannels(decoder_info->SampleRateHz(), channels);
}
if (nack_enabled_) {
RTC_DCHECK(nack_);
@@ -866,8 +864,8 @@
return 0;
}
- int return_value = GetDecision(&operation, &packet_list, &dtmf_event,
- &play_dtmf);
+ int return_value =
+ GetDecision(&operation, &packet_list, &dtmf_event, &play_dtmf);
if (return_value != 0) {
last_mode_ = kModeError;
return return_value;
@@ -876,12 +874,11 @@
AudioDecoder::SpeechType speech_type;
int length = 0;
const size_t start_num_packets = packet_list.size();
- int decode_return_value = Decode(&packet_list, &operation,
- &length, &speech_type);
+ int decode_return_value =
+ Decode(&packet_list, &operation, &length, &speech_type);
assert(vad_.get());
- bool sid_frame_available =
- (operation == kRfc3389Cng && !packet_list.empty());
+ bool sid_frame_available = (operation == kRfc3389Cng && !packet_list.empty());
vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
sid_frame_available, fs_hz_);
@@ -1033,8 +1030,7 @@
// Update the background noise parameters if last operation wrote data
// straight from the decoder to the |sync_buffer_|. That is, none of the
// operations that modify the signal can be followed by a parameter update.
- if ((last_mode_ == kModeNormal) ||
- (last_mode_ == kModeAccelerateFail) ||
+ if ((last_mode_ == kModeNormal) || (last_mode_ == kModeAccelerateFail) ||
(last_mode_ == kModePreemptiveExpandFail) ||
(last_mode_ == kModeRfc3389Cng) ||
(last_mode_ == kModeCodecInternalCng)) {
@@ -1051,7 +1047,8 @@
// If last operation was not expand, calculate the |playout_timestamp_| from
// the |sync_buffer_|. However, do not update the |playout_timestamp_| if it
// would be moved "backwards".
- uint32_t temp_timestamp = sync_buffer_->end_timestamp() -
+ uint32_t temp_timestamp =
+ sync_buffer_->end_timestamp() -
static_cast<uint32_t>(sync_buffer_->FutureLength());
if (static_cast<int32_t>(temp_timestamp - playout_timestamp_) > 0) {
playout_timestamp_ = temp_timestamp;
@@ -1070,13 +1067,13 @@
: timestamp_scaler_->ToExternal(playout_timestamp_) -
static_cast<uint32_t>(audio_frame->samples_per_channel_);
- if (!(last_mode_ == kModeRfc3389Cng ||
- last_mode_ == kModeCodecInternalCng ||
- last_mode_ == kModeExpand)) {
+ if (!(last_mode_ == kModeRfc3389Cng || last_mode_ == kModeCodecInternalCng ||
+ last_mode_ == kModeExpand)) {
generated_noise_stopwatch_.reset();
}
- if (decode_return_value) return decode_return_value;
+ if (decode_return_value)
+ return decode_return_value;
return return_value;
}
@@ -1100,11 +1097,10 @@
RTC_DCHECK(!generated_noise_stopwatch_ ||
generated_noise_stopwatch_->ElapsedTicks() >= 1);
uint64_t generated_noise_samples =
- generated_noise_stopwatch_
- ? (generated_noise_stopwatch_->ElapsedTicks() - 1) *
- output_size_samples_ +
- decision_logic_->noise_fast_forward()
- : 0;
+ generated_noise_stopwatch_ ? (generated_noise_stopwatch_->ElapsedTicks() -
+ 1) * output_size_samples_ +
+ decision_logic_->noise_fast_forward()
+ : 0;
if (decision_logic_->CngRfc3389On() || last_mode_ == kModeRfc3389Cng) {
// Because of timestamp peculiarities, we have to "manually" disallow using
@@ -1127,7 +1123,7 @@
assert(expand_.get());
const int samples_left = static_cast<int>(sync_buffer_->FutureLength() -
- expand_->overlap_length());
+ expand_->overlap_length());
if (last_mode_ == kModeAccelerateSuccess ||
last_mode_ == kModeAccelerateLowEnergy ||
last_mode_ == kModePreemptiveExpandSuccess ||
@@ -1139,9 +1135,8 @@
// Check if it is time to play a DTMF event.
if (dtmf_buffer_->GetEvent(
- static_cast<uint32_t>(
- end_timestamp + generated_noise_samples),
- dtmf_event)) {
+ static_cast<uint32_t>(end_timestamp + generated_noise_samples),
+ dtmf_event)) {
*play_dtmf = true;
}
@@ -1243,12 +1238,12 @@
decision_logic_->set_prev_time_scale(true);
return 0;
} else if (samples_left >= static_cast<int>(samples_10_ms) &&
- decoder_frame_length_ >= samples_30_ms) {
+ decoder_frame_length_ >= samples_30_ms) {
// Avoid decoding more data as it might overflow the playout buffer.
*operation = kNormal;
return 0;
} else if (samples_left < static_cast<int>(samples_20_ms) &&
- decoder_frame_length_ < samples_30_ms) {
+ decoder_frame_length_ < samples_30_ms) {
// Build up decoded data by decoding at least 20 ms of audio data. Do
// not perform accelerate yet, but wait until we only need to do one
// decoding.
@@ -1267,7 +1262,7 @@
// audio data.
if ((samples_left >= static_cast<int>(samples_30_ms)) ||
(samples_left >= static_cast<int>(samples_10_ms) &&
- decoder_frame_length_ >= samples_30_ms)) {
+ decoder_frame_length_ >= samples_30_ms)) {
// Already have enough data, so we do not need to extract any more.
// Or, avoid decoding more data as it might overflow the playout buffer.
// Still try preemptive expand, though.
@@ -1339,7 +1334,8 @@
return 0;
}
-int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
+int NetEqImpl::Decode(PacketList* packet_list,
+ Operations* operation,
int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
*speech_type = AudioDecoder::kSpeech;
@@ -1364,8 +1360,8 @@
decoder_database_->SetActiveDecoder(payload_type, &decoder_changed);
if (decoder_changed) {
// We have a new decoder. Re-init some values.
- const DecoderDatabase::DecoderInfo* decoder_info = decoder_database_
- ->GetDecoderInfo(payload_type);
+ const DecoderDatabase::DecoderInfo* decoder_info =
+ decoder_database_->GetDecoderInfo(payload_type);
assert(decoder_info);
if (!decoder_info) {
RTC_LOG(LS_WARNING)
@@ -1411,8 +1407,8 @@
RTC_DCHECK(packet_list->empty());
return_value = DecodeCng(decoder, decoded_length, speech_type);
} else {
- return_value = DecodeLoop(packet_list, *operation, decoder,
- decoded_length, speech_type);
+ return_value = DecodeLoop(packet_list, *operation, decoder, decoded_length,
+ speech_type);
}
if (*decoded_length < 0) {
@@ -1446,7 +1442,8 @@
return return_value;
}
-int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
+int NetEqImpl::DecodeCng(AudioDecoder* decoder,
+ int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
if (!decoder) {
// This happens when active decoder is not defined.
@@ -1456,9 +1453,9 @@
while (*decoded_length < rtc::dchecked_cast<int>(output_size_samples_)) {
const int length = decoder->Decode(
- nullptr, 0, fs_hz_,
- (decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
- &decoded_buffer_[*decoded_length], speech_type);
+ nullptr, 0, fs_hz_,
+ (decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
+ &decoded_buffer_[*decoded_length], speech_type);
if (length > 0) {
*decoded_length += length;
} else {
@@ -1476,15 +1473,16 @@
return 0;
}
-int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
- AudioDecoder* decoder, int* decoded_length,
+int NetEqImpl::DecodeLoop(PacketList* packet_list,
+ const Operations& operation,
+ AudioDecoder* decoder,
+ int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
RTC_DCHECK(last_decoded_timestamps_.empty());
// Do decoding.
- while (
- !packet_list->empty() &&
- !decoder_database_->IsComfortNoise(packet_list->front().payload_type)) {
+ while (!packet_list->empty() && !decoder_database_->IsComfortNoise(
+ packet_list->front().payload_type)) {
assert(decoder); // At this point, we must have a decoder object.
// The number of channels in the |sync_buffer_| should be the same as the
// number decoder channels.
@@ -1526,15 +1524,16 @@
// If the list is not empty at this point, either a decoding error terminated
// the while-loop, or list must hold exactly one CNG packet.
- assert(
- packet_list->empty() || *decoded_length < 0 ||
- (packet_list->size() == 1 &&
- decoder_database_->IsComfortNoise(packet_list->front().payload_type)));
+ assert(packet_list->empty() || *decoded_length < 0 ||
+ (packet_list->size() == 1 && decoder_database_->IsComfortNoise(
+ packet_list->front().payload_type)));
return 0;
}
-void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
- AudioDecoder::SpeechType speech_type, bool play_dtmf) {
+void NetEqImpl::DoNormal(const int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) {
assert(normal_.get());
normal_->Process(decoded_buffer, decoded_length, last_mode_,
algorithm_buffer_.get());
@@ -1543,9 +1542,8 @@
}
// If last packet was decoded as an inband CNG, set mode to CNG instead.
- if ((speech_type == AudioDecoder::kComfortNoise)
- || ((last_mode_ == kModeCodecInternalCng)
- && (decoded_length == 0))) {
+ if ((speech_type == AudioDecoder::kComfortNoise) ||
+ ((last_mode_ == kModeCodecInternalCng) && (decoded_length == 0))) {
// TODO(hlundin): Remove second part of || statement above.
last_mode_ = kModeCodecInternalCng;
}
@@ -1555,11 +1553,13 @@
}
}
-void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
- AudioDecoder::SpeechType speech_type, bool play_dtmf) {
+void NetEqImpl::DoMerge(int16_t* decoded_buffer,
+ size_t decoded_length,
+ AudioDecoder::SpeechType speech_type,
+ bool play_dtmf) {
assert(merge_.get());
- size_t new_length = merge_->Process(decoded_buffer, decoded_length,
- algorithm_buffer_.get());
+ size_t new_length =
+ merge_->Process(decoded_buffer, decoded_length, algorithm_buffer_.get());
// Correction can be negative.
int expand_length_correction =
rtc::dchecked_cast<int>(new_length) -
@@ -1587,7 +1587,7 @@
int NetEqImpl::DoExpand(bool play_dtmf) {
while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
- output_size_samples_) {
+ output_size_samples_) {
algorithm_buffer_->Clear();
int return_value = expand_->Process(algorithm_buffer_.get());
size_t length = algorithm_buffer_->Size();
@@ -1635,11 +1635,10 @@
size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms.
- borrowed_samples_per_channel = static_cast<int>(required_samples -
- decoded_length_per_channel);
+ borrowed_samples_per_channel =
+ static_cast<int>(required_samples - decoded_length_per_channel);
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
- decoded_buffer,
- sizeof(int16_t) * decoded_length);
+ decoded_buffer, sizeof(int16_t) * decoded_length);
sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
decoded_buffer);
decoded_length = required_samples * num_channels;
@@ -1672,17 +1671,16 @@
if (length < borrowed_samples_per_channel) {
// This destroys the beginning of the buffer, but will not cause any
// problems.
- sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
- sync_buffer_->Size() -
- borrowed_samples_per_channel);
+ sync_buffer_->ReplaceAtIndex(
+ *algorithm_buffer_,
+ sync_buffer_->Size() - borrowed_samples_per_channel);
sync_buffer_->PushFrontZeros(borrowed_samples_per_channel - length);
algorithm_buffer_->PopFront(length);
assert(algorithm_buffer_->Empty());
} else {
- sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
- borrowed_samples_per_channel,
- sync_buffer_->Size() -
- borrowed_samples_per_channel);
+ sync_buffer_->ReplaceAtIndex(
+ *algorithm_buffer_, borrowed_samples_per_channel,
+ sync_buffer_->Size() - borrowed_samples_per_channel);
algorithm_buffer_->PopFront(borrowed_samples_per_channel);
}
}
@@ -1714,11 +1712,11 @@
required_samples - decoded_length_per_channel;
// Calculate how many of these were already played out.
old_borrowed_samples_per_channel =
- (borrowed_samples_per_channel > sync_buffer_->FutureLength()) ?
- (borrowed_samples_per_channel - sync_buffer_->FutureLength()) : 0;
+ (borrowed_samples_per_channel > sync_buffer_->FutureLength())
+ ? (borrowed_samples_per_channel - sync_buffer_->FutureLength())
+ : 0;
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
- decoded_buffer,
- sizeof(int16_t) * decoded_length);
+ decoded_buffer, sizeof(int16_t) * decoded_length);
sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
decoded_buffer);
decoded_length = required_samples * num_channels;
@@ -1726,8 +1724,7 @@
size_t samples_added;
PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
- decoded_buffer, decoded_length,
- old_borrowed_samples_per_channel,
+ decoded_buffer, decoded_length, old_borrowed_samples_per_channel,
algorithm_buffer_.get(), &samples_added);
stats_.PreemptiveExpandedSamples(samples_added);
switch (return_code) {
@@ -1780,8 +1777,8 @@
return -comfort_noise_->internal_error_code();
}
}
- int cn_return = comfort_noise_->Generate(output_size_samples_,
- algorithm_buffer_.get());
+ int cn_return =
+ comfort_noise_->Generate(output_size_samples_, algorithm_buffer_.get());
expand_->Reset();
last_mode_ = kModeRfc3389Cng;
if (!play_dtmf) {
@@ -1909,16 +1906,17 @@
expand_->Reset();
}
-int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
+int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event,
+ size_t num_channels,
int16_t* output) const {
size_t out_index = 0;
size_t overdub_length = output_size_samples_; // Default value.
if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) {
// Special operation for transition from "DTMF only" to "DTMF overdub".
- out_index = std::min(
- sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
- output_size_samples_);
+ out_index =
+ std::min(sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
+ output_size_samples_);
overdub_length = output_size_samples_ - out_index;
}
@@ -1929,8 +1927,8 @@
dtmf_event.volume);
}
if (dtmf_return_value == 0) {
- dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length,
- &dtmf_output);
+ dtmf_return_value =
+ dtmf_tone_generator_->Generate(overdub_length, &dtmf_output);
assert(overdub_length == dtmf_output.Size());
}
dtmf_output.ReadInterleaved(overdub_length, &output[out_index]);
@@ -2051,7 +2049,7 @@
RTC_LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " "
<< channels;
// TODO(hlundin): Change to an enumerator and skip assert.
- assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
+ assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
assert(channels > 0);
fs_hz_ = fs_hz;
@@ -2085,7 +2083,7 @@
// Move index so that we create a small set of future samples (all 0).
sync_buffer_->set_next_index(sync_buffer_->next_index() -
- expand_->overlap_length());
+ expand_->overlap_length());
normal_.reset(new Normal(fs_hz, decoder_database_.get(), *background_noise_,
expand_.get()));
@@ -2095,8 +2093,8 @@
fs_hz, channels, *background_noise_, expand_->overlap_length()));
// Delete ComfortNoise object and create a new one.
- comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(),
- sync_buffer_.get()));
+ comfort_noise_.reset(
+ new ComfortNoise(fs_hz, decoder_database_.get(), sync_buffer_.get()));
// Verify that |decoded_buffer_| is long enough.
if (decoded_buffer_length_ < kMaxFrameSize * channels) {
diff --git a/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
index 585fd8f..57fc682 100644
--- a/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
@@ -86,8 +86,8 @@
return kPacketDuration;
}
- bool PacketHasFec(
- const uint8_t* encoded, size_t encoded_len) const /* override */ {
+ bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const
+ /* override */ {
ADD_FAILURE() << "Since going through ParsePayload, PacketHasFec should "
"never get called.";
return fec_enabled_;
@@ -123,40 +123,40 @@
static const int kPayloadSizeByte = 30;
static const int kFrameSizeMs = 20;
-enum logic {
- kIgnore,
- kEqual,
- kSmallerThan,
- kLargerThan,
-};
+ enum logic {
+ kIgnore,
+ kEqual,
+ kSmallerThan,
+ kLargerThan,
+ };
-struct NetEqNetworkStatsCheck {
- logic current_buffer_size_ms;
- logic preferred_buffer_size_ms;
- logic jitter_peaks_found;
- logic packet_loss_rate;
- logic expand_rate;
- logic speech_expand_rate;
- logic preemptive_rate;
- logic accelerate_rate;
- logic secondary_decoded_rate;
- logic secondary_discarded_rate;
- logic clockdrift_ppm;
- logic added_zero_samples;
- NetEqNetworkStatistics stats_ref;
-};
+ struct NetEqNetworkStatsCheck {
+ logic current_buffer_size_ms;
+ logic preferred_buffer_size_ms;
+ logic jitter_peaks_found;
+ logic packet_loss_rate;
+ logic expand_rate;
+ logic speech_expand_rate;
+ logic preemptive_rate;
+ logic accelerate_rate;
+ logic secondary_decoded_rate;
+ logic secondary_discarded_rate;
+ logic clockdrift_ppm;
+ logic added_zero_samples;
+ NetEqNetworkStatistics stats_ref;
+ };
-NetEqNetworkStatsTest(NetEqDecoder codec,
- int sample_rate_hz,
- MockAudioDecoder* decoder)
- : NetEqExternalDecoderTest(codec, sample_rate_hz, decoder),
- external_decoder_(decoder),
- samples_per_ms_(sample_rate_hz / 1000),
- frame_size_samples_(kFrameSizeMs * samples_per_ms_),
- rtp_generator_(new test::RtpGenerator(samples_per_ms_)),
- last_lost_time_(0),
- packet_loss_interval_(0xffffffff) {
- Init();
+ NetEqNetworkStatsTest(NetEqDecoder codec,
+ int sample_rate_hz,
+ MockAudioDecoder* decoder)
+ : NetEqExternalDecoderTest(codec, sample_rate_hz, decoder),
+ external_decoder_(decoder),
+ samples_per_ms_(sample_rate_hz / 1000),
+ frame_size_samples_(kFrameSizeMs * samples_per_ms_),
+ rtp_generator_(new test::RtpGenerator(samples_per_ms_)),
+ last_lost_time_(0),
+ packet_loss_interval_(0xffffffff) {
+ Init();
}
bool Lost(uint32_t send_time) {
@@ -168,8 +168,9 @@
}
void SetPacketLossRate(double loss_rate) {
- packet_loss_interval_ = (loss_rate >= 1e-3 ?
- static_cast<double>(kFrameSizeMs) / loss_rate : 0xffffffff);
+ packet_loss_interval_ =
+ (loss_rate >= 1e-3 ? static_cast<double>(kFrameSizeMs) / loss_rate
+ : 0xffffffff);
}
// |stats_ref|
@@ -181,19 +182,19 @@
NetEqNetworkStatistics stats;
neteq()->NetworkStatistics(&stats);
-#define CHECK_NETEQ_NETWORK_STATS(x)\
- switch (expects.x) {\
- case kEqual:\
- EXPECT_EQ(stats.x, expects.stats_ref.x);\
- break;\
- case kSmallerThan:\
- EXPECT_LT(stats.x, expects.stats_ref.x);\
- break;\
- case kLargerThan:\
- EXPECT_GT(stats.x, expects.stats_ref.x);\
- break;\
- default:\
- break;\
+#define CHECK_NETEQ_NETWORK_STATS(x) \
+ switch (expects.x) { \
+ case kEqual: \
+ EXPECT_EQ(stats.x, expects.stats_ref.x); \
+ break; \
+ case kSmallerThan: \
+ EXPECT_LT(stats.x, expects.stats_ref.x); \
+ break; \
+ case kLargerThan: \
+ EXPECT_GT(stats.x, expects.stats_ref.x); \
+ break; \
+ default: \
+ break; \
}
CHECK_NETEQ_NETWORK_STATS(current_buffer_size_ms);
@@ -220,15 +221,13 @@
uint32_t next_send_time;
// Initiate |last_lost_time_|.
- time_now = next_send_time = last_lost_time_ =
- rtp_generator_->GetRtpHeader(kPayloadType, frame_size_samples_,
- &rtp_header_);
+ time_now = next_send_time = last_lost_time_ = rtp_generator_->GetRtpHeader(
+ kPayloadType, frame_size_samples_, &rtp_header_);
for (int k = 0; k < num_loops; ++k) {
// Delay by one frame such that the FEC can come in.
while (time_now + kFrameSizeMs >= next_send_time) {
- next_send_time = rtp_generator_->GetRtpHeader(kPayloadType,
- frame_size_samples_,
- &rtp_header_);
+ next_send_time = rtp_generator_->GetRtpHeader(
+ kPayloadType, frame_size_samples_, &rtp_header_);
if (!Lost(next_send_time)) {
static const uint8_t payload[kPayloadSizeByte] = {0};
InsertPacket(rtp_header_, payload, next_send_time);
@@ -243,21 +242,19 @@
void DecodeFecTest() {
external_decoder_->set_fec_enabled(false);
- NetEqNetworkStatsCheck expects = {
- kIgnore, // current_buffer_size_ms
- kIgnore, // preferred_buffer_size_ms
- kIgnore, // jitter_peaks_found
- kEqual, // packet_loss_rate
- kEqual, // expand_rate
- kEqual, // voice_expand_rate
- kIgnore, // preemptive_rate
- kEqual, // accelerate_rate
- kEqual, // decoded_fec_rate
- kEqual, // discarded_fec_rate
- kIgnore, // clockdrift_ppm
- kEqual, // added_zero_samples
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- };
+ NetEqNetworkStatsCheck expects = {kIgnore, // current_buffer_size_ms
+ kIgnore, // preferred_buffer_size_ms
+ kIgnore, // jitter_peaks_found
+ kEqual, // packet_loss_rate
+ kEqual, // expand_rate
+ kEqual, // voice_expand_rate
+ kIgnore, // preemptive_rate
+ kEqual, // accelerate_rate
+ kEqual, // decoded_fec_rate
+ kEqual, // discarded_fec_rate
+ kIgnore, // clockdrift_ppm
+ kEqual, // added_zero_samples
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
RunTest(50, expects);
// Next we introduce packet losses.
@@ -277,21 +274,19 @@
}
void NoiseExpansionTest() {
- NetEqNetworkStatsCheck expects = {
- kIgnore, // current_buffer_size_ms
- kIgnore, // preferred_buffer_size_ms
- kIgnore, // jitter_peaks_found
- kEqual, // packet_loss_rate
- kEqual, // expand_rate
- kEqual, // speech_expand_rate
- kIgnore, // preemptive_rate
- kEqual, // accelerate_rate
- kEqual, // decoded_fec_rate
- kEqual, // discard_fec_rate
- kIgnore, // clockdrift_ppm
- kEqual, // added_zero_samples
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
- };
+ NetEqNetworkStatsCheck expects = {kIgnore, // current_buffer_size_ms
+ kIgnore, // preferred_buffer_size_ms
+ kIgnore, // jitter_peaks_found
+ kEqual, // packet_loss_rate
+ kEqual, // expand_rate
+ kEqual, // speech_expand_rate
+ kIgnore, // preemptive_rate
+ kEqual, // accelerate_rate
+ kEqual, // decoded_fec_rate
+ kEqual, // discard_fec_rate
+ kIgnore, // clockdrift_ppm
+ kEqual, // added_zero_samples
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
RunTest(50, expects);
SetPacketLossRate(1);
diff --git a/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/modules/audio_coding/neteq/neteq_stereo_unittest.cc
index 49facdd..ef4c235 100644
--- a/modules/audio_coding/neteq/neteq_stereo_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -11,9 +11,9 @@
// Test to verify correct stereo and multi-channel operation.
#include <algorithm>
+#include <list>
#include <memory>
#include <string>
-#include <list>
#include "api/audio/audio_frame.h"
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
@@ -72,17 +72,17 @@
input_ = new int16_t[frame_size_samples_];
encoded_ = new uint8_t[2 * frame_size_samples_];
input_multi_channel_ = new int16_t[frame_size_samples_ * num_channels_];
- encoded_multi_channel_ = new uint8_t[frame_size_samples_ * 2 *
- num_channels_];
+ encoded_multi_channel_ =
+ new uint8_t[frame_size_samples_ * 2 * num_channels_];
}
~NetEqStereoTest() {
delete neteq_mono_;
delete neteq_;
- delete [] input_;
- delete [] encoded_;
- delete [] input_multi_channel_;
- delete [] encoded_multi_channel_;
+ delete[] input_;
+ delete[] encoded_;
+ delete[] input_multi_channel_;
+ delete[] encoded_multi_channel_;
}
virtual void SetUp() {
@@ -142,17 +142,15 @@
if (!input_file_->Read(frame_size_samples_, input_)) {
return -1;
}
- payload_size_bytes_ = WebRtcPcm16b_Encode(input_, frame_size_samples_,
- encoded_);
+ payload_size_bytes_ =
+ WebRtcPcm16b_Encode(input_, frame_size_samples_, encoded_);
if (frame_size_samples_ * 2 != payload_size_bytes_) {
return -1;
}
- int next_send_time = rtp_generator_mono_.GetRtpHeader(kPayloadTypeMono,
- frame_size_samples_,
- &rtp_header_mono_);
- test::InputAudioFile::DuplicateInterleaved(input_, frame_size_samples_,
- num_channels_,
- input_multi_channel_);
+ int next_send_time = rtp_generator_mono_.GetRtpHeader(
+ kPayloadTypeMono, frame_size_samples_, &rtp_header_mono_);
+ test::InputAudioFile::DuplicateInterleaved(
+ input_, frame_size_samples_, num_channels_, input_multi_channel_);
multi_payload_size_bytes_ = WebRtcPcm16b_Encode(
input_multi_channel_, frame_size_samples_ * num_channels_,
encoded_multi_channel_);
@@ -267,8 +265,7 @@
class NetEqStereoTestNoJitter : public NetEqStereoTest {
protected:
- NetEqStereoTestNoJitter()
- : NetEqStereoTest() {
+ NetEqStereoTestNoJitter() : NetEqStereoTest() {
// Start the sender 100 ms before the receiver to pre-fill the buffer.
// This is to avoid doing preemptive expand early in the test.
// TODO(hlundin): Mock the decision making instead to control the modes.
@@ -282,17 +279,15 @@
class NetEqStereoTestPositiveDrift : public NetEqStereoTest {
protected:
- NetEqStereoTestPositiveDrift()
- : NetEqStereoTest(),
- drift_factor(0.9) {
+ NetEqStereoTestPositiveDrift() : NetEqStereoTest(), drift_factor(0.9) {
// Start the sender 100 ms before the receiver to pre-fill the buffer.
// This is to avoid doing preemptive expand early in the test.
// TODO(hlundin): Mock the decision making instead to control the modes.
last_arrival_time_ = -100;
}
virtual int GetArrivalTime(int send_time) {
- int arrival_time = last_arrival_time_ +
- drift_factor * (send_time - last_send_time_);
+ int arrival_time =
+ last_arrival_time_ + drift_factor * (send_time - last_send_time_);
last_send_time_ = send_time;
last_arrival_time_ = arrival_time;
return arrival_time;
@@ -307,8 +302,7 @@
class NetEqStereoTestNegativeDrift : public NetEqStereoTestPositiveDrift {
protected:
- NetEqStereoTestNegativeDrift()
- : NetEqStereoTestPositiveDrift() {
+ NetEqStereoTestNegativeDrift() : NetEqStereoTestPositiveDrift() {
drift_factor = 1.1;
last_arrival_time_ = 0;
}
@@ -322,10 +316,7 @@
protected:
static const int kDelayInterval = 10;
static const int kDelay = 1000;
- NetEqStereoTestDelays()
- : NetEqStereoTest(),
- frame_index_(0) {
- }
+ NetEqStereoTestDelays() : NetEqStereoTest(), frame_index_(0) {}
virtual int GetArrivalTime(int send_time) {
// Deliver immediately, unless we have a back-log.
@@ -349,22 +340,16 @@
class NetEqStereoTestLosses : public NetEqStereoTest {
protected:
static const int kLossInterval = 10;
- NetEqStereoTestLosses()
- : NetEqStereoTest(),
- frame_index_(0) {
- }
+ NetEqStereoTestLosses() : NetEqStereoTest(), frame_index_(0) {}
- virtual bool Lost() {
- return (++frame_index_) % kLossInterval == 0;
- }
+ virtual bool Lost() { return (++frame_index_) % kLossInterval == 0; }
// TODO(hlundin): NetEq is not giving bitexact results for these cases.
virtual void VerifyOutput(size_t num_samples) {
for (size_t i = 0; i < num_samples; ++i) {
const int16_t* output_data = output_.data();
const int16_t* output_multi_channel_data = output_multi_channel_.data();
- auto first_channel_sample =
- output_multi_channel_data[i * num_channels_];
+ auto first_channel_sample = output_multi_channel_data[i * num_channels_];
for (size_t j = 0; j < num_channels_; ++j) {
const int kErrorMargin = 200;
EXPECT_NEAR(output_data[i],
@@ -384,7 +369,6 @@
RunTest(100);
}
-
// Creates a list of parameter sets.
std::list<TestParameters> GetTestParameters() {
std::list<TestParameters> l;
@@ -412,9 +396,9 @@
// Pretty-printing the test parameters in case of an error.
void PrintTo(const TestParameters& p, ::std::ostream* os) {
- *os << "{frame_size = " << p.frame_size <<
- ", num_channels = " << p.num_channels <<
- ", sample_rate = " << p.sample_rate << "}";
+ *os << "{frame_size = " << p.frame_size
+ << ", num_channels = " << p.num_channels
+ << ", sample_rate = " << p.sample_rate << "}";
}
// Instantiate the tests. Each test is instantiated using the function above,
diff --git a/modules/audio_coding/neteq/neteq_unittest.cc b/modules/audio_coding/neteq/neteq_unittest.cc
index 6239985..4ed7a6b 100644
--- a/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_unittest.cc
@@ -61,17 +61,17 @@
const std::string& checksum_win_32,
const std::string& checksum_win_64) {
#if defined(WEBRTC_ANDROID)
- #ifdef WEBRTC_ARCH_64_BITS
- return checksum_android_64;
- #else
- return checksum_android_32;
- #endif // WEBRTC_ARCH_64_BITS
+#ifdef WEBRTC_ARCH_64_BITS
+ return checksum_android_64;
+#else
+ return checksum_android_32;
+#endif // WEBRTC_ARCH_64_BITS
#elif defined(WEBRTC_WIN)
- #ifdef WEBRTC_ARCH_64_BITS
- return checksum_win_64;
- #else
- return checksum_win_32;
- #endif // WEBRTC_ARCH_64_BITS
+#ifdef WEBRTC_ARCH_64_BITS
+ return checksum_win_64;
+#else
+ return checksum_win_32;
+#endif // WEBRTC_ARCH_64_BITS
#else
return checksum_general;
#endif // WEBRTC_WIN
@@ -107,7 +107,8 @@
stats->set_jitter(stats_raw.jitter);
}
-void AddMessage(FILE* file, rtc::MessageDigest* digest,
+void AddMessage(FILE* file,
+ rtc::MessageDigest* digest,
const std::string& message) {
int32_t size = message.length();
if (file)
@@ -164,7 +165,8 @@
explicit ResultSink(const std::string& output_file);
~ResultSink();
- template<typename T> void AddResult(const T* test_results, size_t length);
+ template <typename T>
+ void AddResult(const T* test_results, size_t length);
void AddResult(const NetEqNetworkStatistics& stats);
void AddResult(const RtcpStatistics& stats);
@@ -190,7 +192,7 @@
fclose(output_fp_);
}
-template<typename T>
+template <typename T>
void ResultSink::AddResult(const T* test_results, size_t length) {
if (output_fp_) {
ASSERT_EQ(length, fwrite(test_results, sizeof(T), length, output_fp_));
@@ -247,7 +249,7 @@
virtual void SetUp();
virtual void TearDown();
void SelectDecoders(NetEqDecoder* used_codec);
- void OpenInputFile(const std::string &rtp_file);
+ void OpenInputFile(const std::string& rtp_file);
void Process();
void DecodeAndCompare(const std::string& rtp_file,
@@ -265,9 +267,11 @@
uint8_t* payload,
size_t* payload_len);
- void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
+ void WrapTest(uint16_t start_seq_no,
+ uint32_t start_timestamp,
const std::set<uint16_t>& drop_seq_numbers,
- bool expect_seq_no_wrap, bool expect_timestamp_wrap);
+ bool expect_seq_no_wrap,
+ bool expect_timestamp_wrap);
void LongCngWithClockDrift(double drift_factor,
double network_freeze_ms,
@@ -316,7 +320,7 @@
delete neteq_;
}
-void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
+void NetEqDecodingTest::OpenInputFile(const std::string& rtp_file) {
rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
}
@@ -384,8 +388,8 @@
ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
ASSERT_NO_FATAL_FAILURE(Process());
- ASSERT_NO_FATAL_FAILURE(output.AddResult(
- out_frame_.data(), out_frame_.samples_per_channel_));
+ ASSERT_NO_FATAL_FAILURE(
+ output.AddResult(out_frame_.data(), out_frame_.samples_per_channel_));
// Query the network statistics API once per second
if (sim_clock_ % 1000 == 0) {
@@ -447,7 +451,7 @@
rtp_info->ssrc = 0x1234; // Just an arbitrary SSRC.
rtp_info->payloadType = 98; // WB CNG.
rtp_info->markerBit = 0;
- payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
+ payload[0] = 64; // Noise level -64 dBov, quite arbitrarily chosen.
*payload_len = 1; // Only noise level, no spectral parameters.
}
@@ -462,36 +466,29 @@
const std::string input_rtp_file =
webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
- const std::string output_checksum = PlatformChecksum(
- "0c6dc227f781c81a229970f8fceda1a012498cba",
- "15c4a2202877a414515e218bdb7992f0ad53e5af",
- "not used",
- "0c6dc227f781c81a229970f8fceda1a012498cba",
- "25fc4c863caa499aa447a5b8d059f5452cbcc500");
+ const std::string output_checksum =
+ PlatformChecksum("0c6dc227f781c81a229970f8fceda1a012498cba",
+ "15c4a2202877a414515e218bdb7992f0ad53e5af", "not used",
+ "0c6dc227f781c81a229970f8fceda1a012498cba",
+ "25fc4c863caa499aa447a5b8d059f5452cbcc500");
const std::string network_stats_checksum =
PlatformChecksum("4b2370f5c794741d2a46be5c7935c66ef3fb53e9",
- "e339cb2adf5ab3dfc21cb7205d670a34751e8336",
- "not used",
+ "e339cb2adf5ab3dfc21cb7205d670a34751e8336", "not used",
"4b2370f5c794741d2a46be5c7935c66ef3fb53e9",
"4b2370f5c794741d2a46be5c7935c66ef3fb53e9");
- const std::string rtcp_stats_checksum = PlatformChecksum(
- "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
- "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4",
- "not used",
- "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
- "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
+ const std::string rtcp_stats_checksum =
+ PlatformChecksum("b8880bf9fed2487efbddcb8d94b9937a29ae521d",
+ "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4", "not used",
+ "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
+ "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
- DecodeAndCompare(input_rtp_file,
- output_checksum,
- network_stats_checksum,
- rtcp_stats_checksum,
- FLAG_gen_ref);
+ DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
+ rtcp_stats_checksum, FLAG_gen_ref);
}
-#if !defined(WEBRTC_IOS) && \
- defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
+#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
defined(WEBRTC_CODEC_OPUS)
#define MAYBE_TestOpusBitExactness TestOpusBitExactness
#else
@@ -501,12 +498,12 @@
const std::string input_rtp_file =
webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
- const std::string output_checksum = PlatformChecksum(
- "14a63b3c7b925c82296be4bafc71bec85f2915c2",
- "b7b7ed802b0e18ee416973bf3b9ae98599b0181d",
- "5876e52dda90d5ca433c3726555b907b97c86374",
- "14a63b3c7b925c82296be4bafc71bec85f2915c2",
- "14a63b3c7b925c82296be4bafc71bec85f2915c2");
+ const std::string output_checksum =
+ PlatformChecksum("14a63b3c7b925c82296be4bafc71bec85f2915c2",
+ "b7b7ed802b0e18ee416973bf3b9ae98599b0181d",
+ "5876e52dda90d5ca433c3726555b907b97c86374",
+ "14a63b3c7b925c82296be4bafc71bec85f2915c2",
+ "14a63b3c7b925c82296be4bafc71bec85f2915c2");
const std::string network_stats_checksum =
PlatformChecksum("adb3272498e436d1c019cbfd71610e9510c54497",
@@ -515,22 +512,18 @@
"adb3272498e436d1c019cbfd71610e9510c54497",
"adb3272498e436d1c019cbfd71610e9510c54497");
- const std::string rtcp_stats_checksum = PlatformChecksum(
- "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
- "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
- "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
- "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
- "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
+ const std::string rtcp_stats_checksum =
+ PlatformChecksum("e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
- DecodeAndCompare(input_rtp_file,
- output_checksum,
- network_stats_checksum,
- rtcp_stats_checksum,
- FLAG_gen_ref);
+ DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
+ rtcp_stats_checksum, FLAG_gen_ref);
}
-#if !defined(WEBRTC_IOS) && \
- defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
+#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
defined(WEBRTC_CODEC_OPUS)
#define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness
#else
@@ -805,10 +798,8 @@
const bool kGetAudioDuringFreezeRecovery = false;
const int kDelayToleranceMs = 20;
const int kMaxTimeToSpeechMs = 100;
- LongCngWithClockDrift(kDriftFactor,
- kNetworkFreezeTimeMs,
- kGetAudioDuringFreezeRecovery,
- kDelayToleranceMs,
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
kMaxTimeToSpeechMs);
}
@@ -819,10 +810,8 @@
const bool kGetAudioDuringFreezeRecovery = false;
const int kDelayToleranceMs = 20;
const int kMaxTimeToSpeechMs = 100;
- LongCngWithClockDrift(kDriftFactor,
- kNetworkFreezeTimeMs,
- kGetAudioDuringFreezeRecovery,
- kDelayToleranceMs,
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
kMaxTimeToSpeechMs);
}
@@ -833,10 +822,8 @@
const bool kGetAudioDuringFreezeRecovery = false;
const int kDelayToleranceMs = 50;
const int kMaxTimeToSpeechMs = 200;
- LongCngWithClockDrift(kDriftFactor,
- kNetworkFreezeTimeMs,
- kGetAudioDuringFreezeRecovery,
- kDelayToleranceMs,
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
kMaxTimeToSpeechMs);
}
@@ -847,10 +834,8 @@
const bool kGetAudioDuringFreezeRecovery = false;
const int kDelayToleranceMs = 20;
const int kMaxTimeToSpeechMs = 100;
- LongCngWithClockDrift(kDriftFactor,
- kNetworkFreezeTimeMs,
- kGetAudioDuringFreezeRecovery,
- kDelayToleranceMs,
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
kMaxTimeToSpeechMs);
}
@@ -861,10 +846,8 @@
const bool kGetAudioDuringFreezeRecovery = true;
const int kDelayToleranceMs = 20;
const int kMaxTimeToSpeechMs = 100;
- LongCngWithClockDrift(kDriftFactor,
- kNetworkFreezeTimeMs,
- kGetAudioDuringFreezeRecovery,
- kDelayToleranceMs,
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
kMaxTimeToSpeechMs);
}
@@ -874,10 +857,8 @@
const bool kGetAudioDuringFreezeRecovery = false;
const int kDelayToleranceMs = 10;
const int kMaxTimeToSpeechMs = 50;
- LongCngWithClockDrift(kDriftFactor,
- kNetworkFreezeTimeMs,
- kGetAudioDuringFreezeRecovery,
- kDelayToleranceMs,
+ LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs,
+ kGetAudioDuringFreezeRecovery, kDelayToleranceMs,
kMaxTimeToSpeechMs);
}
@@ -1002,11 +983,11 @@
ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
// Next packet.
- rtp_info.timestamp += rtc::checked_cast<uint32_t>(
- expected_samples_per_channel);
+ rtp_info.timestamp +=
+ rtc::checked_cast<uint32_t>(expected_samples_per_channel);
rtp_info.sequenceNumber++;
- receive_timestamp += rtc::checked_cast<uint32_t>(
- expected_samples_per_channel);
+ receive_timestamp +=
+ rtc::checked_cast<uint32_t>(expected_samples_per_channel);
}
output.Reset();
@@ -1099,8 +1080,8 @@
if (packets_inserted > 4) {
// Expect preferred and actual buffer size to be no more than 2 frames.
EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
- EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2 +
- algorithmic_delay_ms_);
+ EXPECT_LE(network_stats.current_buffer_size_ms,
+ kFrameSizeMs * 2 + algorithmic_delay_ms_);
}
last_seq_no = seq_no;
last_timestamp = timestamp;
@@ -1166,8 +1147,8 @@
const int kSamples = kFrameSizeMs * kSampleRateKhz;
const size_t kPayloadBytes = kSamples * 2;
- const int algorithmic_delay_samples = std::max(
- algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
+ const int algorithmic_delay_samples =
+ std::max(algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
// Insert three speech packets. Three are needed to get the frame length
// correct.
uint8_t payload[kPayloadBytes] = {0};
@@ -1239,7 +1220,9 @@
*playout_timestamp);
}
-TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
+TEST_F(NetEqDecodingTest, DiscardDuplicateCng) {
+ DuplicateCng();
+}
TEST_F(NetEqDecodingTest, CngFirst) {
uint16_t seq_no = 0;
@@ -1493,25 +1476,25 @@
return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
<< " != " << b.timestamp_ << ")";
if (a.sample_rate_hz_ != b.sample_rate_hz_)
- return ::testing::AssertionFailure() << "sample_rate_hz_ diff ("
- << a.sample_rate_hz_
- << " != " << b.sample_rate_hz_ << ")";
+ return ::testing::AssertionFailure()
+ << "sample_rate_hz_ diff (" << a.sample_rate_hz_
+ << " != " << b.sample_rate_hz_ << ")";
if (a.samples_per_channel_ != b.samples_per_channel_)
return ::testing::AssertionFailure()
<< "samples_per_channel_ diff (" << a.samples_per_channel_
<< " != " << b.samples_per_channel_ << ")";
if (a.num_channels_ != b.num_channels_)
- return ::testing::AssertionFailure() << "num_channels_ diff ("
- << a.num_channels_
- << " != " << b.num_channels_ << ")";
+ return ::testing::AssertionFailure()
+ << "num_channels_ diff (" << a.num_channels_
+ << " != " << b.num_channels_ << ")";
if (a.speech_type_ != b.speech_type_)
- return ::testing::AssertionFailure() << "speech_type_ diff ("
- << a.speech_type_
- << " != " << b.speech_type_ << ")";
+ return ::testing::AssertionFailure()
+ << "speech_type_ diff (" << a.speech_type_
+ << " != " << b.speech_type_ << ")";
if (a.vad_activity_ != b.vad_activity_)
- return ::testing::AssertionFailure() << "vad_activity_ diff ("
- << a.vad_activity_
- << " != " << b.vad_activity_ << ")";
+ return ::testing::AssertionFailure()
+ << "vad_activity_ diff (" << a.vad_activity_
+ << " != " << b.vad_activity_ << ")";
return ::testing::AssertionSuccess();
}
@@ -1520,9 +1503,9 @@
::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
if (!res)
return res;
- if (memcmp(
- a.data(), b.data(),
- a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) != 0) {
+ if (memcmp(a.data(), b.data(),
+ a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) !=
+ 0) {
return ::testing::AssertionFailure() << "data_ diff";
}
return ::testing::AssertionSuccess();
diff --git a/modules/audio_coding/neteq/normal.cc b/modules/audio_coding/neteq/normal.cc
index f10158c..83f7616 100644
--- a/modules/audio_coding/neteq/normal.cc
+++ b/modules/audio_coding/neteq/normal.cc
@@ -76,8 +76,7 @@
// Adjust muting factor if needed (to BGN level).
size_t energy_length =
std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
- int scaling = 6 + fs_shift
- - WebRtcSpl_NormW32(decoded_max * decoded_max);
+ int scaling = 6 + fs_shift - WebRtcSpl_NormW32(decoded_max * decoded_max);
scaling = std::max(scaling, 0); // |scaling| should always be >= 0.
int32_t energy = WebRtcSpl_DotProductWithScale(signal.get(), signal.get(),
energy_length, scaling);
@@ -90,8 +89,7 @@
}
int local_mute_factor = 16384; // 1.0 in Q14.
- if ((energy != 0) &&
- (energy > background_noise_.Energy(channel_ix))) {
+ if ((energy != 0) && (energy > background_noise_.Energy(channel_ix))) {
// Normalize new frame energy to 15 bits.
scaling = WebRtcSpl_NormW32(energy) - 16;
// We want background_noise_.energy() / energy in Q14.
diff --git a/modules/audio_coding/neteq/normal.h b/modules/audio_coding/neteq/normal.h
index 14323ea..41bd30a 100644
--- a/modules/audio_coding/neteq/normal.h
+++ b/modules/audio_coding/neteq/normal.h
@@ -54,7 +54,8 @@
// |output| defines the number of channels that will be used when
// de-interleaving |input|. |last_mode| contains the mode used in the previous
// GetAudio call (i.e., not the current one).
- int Process(const int16_t* input, size_t length,
+ int Process(const int16_t* input,
+ size_t length,
Modes last_mode,
AudioMultiVector* output);
diff --git a/modules/audio_coding/neteq/normal_unittest.cc b/modules/audio_coding/neteq/normal_unittest.cc
index ab99d9a..106762a 100644
--- a/modules/audio_coding/neteq/normal_unittest.cc
+++ b/modules/audio_coding/neteq/normal_unittest.cc
@@ -39,7 +39,7 @@
return 0;
}
-} // namespace
+} // namespace
TEST(Normal, CreateAndDestroy) {
MockDecoderDatabase db;
@@ -84,10 +84,7 @@
// and using this as a denominator would lead to problems.
int input_size_samples = 63;
EXPECT_EQ(input_size_samples,
- normal.Process(input,
- input_size_samples,
- kModeExpand,
- &output));
+ normal.Process(input, input_size_samples, kModeExpand, &output));
EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope.
@@ -139,10 +136,7 @@
EXPECT_CALL(expand, Process(_)).WillOnce(Invoke(ExpandProcess120ms));
EXPECT_CALL(expand, Reset());
EXPECT_EQ(static_cast<int>(kPacketsizeBytes),
- normal.Process(input,
- kPacketsizeBytes,
- kModeExpand,
- &output));
+ normal.Process(input, kPacketsizeBytes, kModeExpand, &output));
EXPECT_EQ(kPacketsizeBytes, output.Size());
diff --git a/modules/audio_coding/neteq/packet_buffer.cc b/modules/audio_coding/neteq/packet_buffer.cc
index f7b622d..c04534e 100644
--- a/modules/audio_coding/neteq/packet_buffer.cc
+++ b/modules/audio_coding/neteq/packet_buffer.cc
@@ -29,11 +29,8 @@
class NewTimestampIsLarger {
public:
explicit NewTimestampIsLarger(const Packet& new_packet)
- : new_packet_(new_packet) {
- }
- bool operator()(const Packet& packet) {
- return (new_packet_ >= packet);
- }
+ : new_packet_(new_packet) {}
+ bool operator()(const Packet& packet) { return (new_packet_ >= packet); }
private:
const Packet& new_packet_;
@@ -102,8 +99,7 @@
// should be inserted. The list is searched from the back, since the most
// likely case is that the new packet should be near the end of the list.
PacketList::reverse_iterator rit = std::find_if(
- buffer_.rbegin(), buffer_.rend(),
- NewTimestampIsLarger(packet));
+ buffer_.rbegin(), buffer_.rend(), NewTimestampIsLarger(packet));
// The new packet is to be inserted to the right of |rit|. If it has the same
// timestamp as |rit|, which has a higher priority, do not insert the new
diff --git a/modules/audio_coding/neteq/post_decode_vad.cc b/modules/audio_coding/neteq/post_decode_vad.cc
index a09d18f..9999d67 100644
--- a/modules/audio_coding/neteq/post_decode_vad.cc
+++ b/modules/audio_coding/neteq/post_decode_vad.cc
@@ -45,7 +45,8 @@
}
}
-void PostDecodeVad::Update(int16_t* signal, size_t length,
+void PostDecodeVad::Update(int16_t* signal,
+ size_t length,
AudioDecoder::SpeechType speech_type,
bool sid_frame,
int fs_hz) {
@@ -72,13 +73,13 @@
active_speech_ = false;
// Loop through frame sizes 30, 20, and 10 ms.
for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
- vad_frame_size_ms -= 10) {
+ vad_frame_size_ms -= 10) {
size_t vad_frame_size_samples =
static_cast<size_t>(vad_frame_size_ms * fs_hz / 1000);
while (length - vad_sample_index >= vad_frame_size_samples) {
- int vad_return = WebRtcVad_Process(
- vad_instance_, fs_hz, &signal[vad_sample_index],
- vad_frame_size_samples);
+ int vad_return =
+ WebRtcVad_Process(vad_instance_, fs_hz, &signal[vad_sample_index],
+ vad_frame_size_samples);
active_speech_ |= (vad_return == 1);
vad_sample_index += vad_frame_size_samples;
}
diff --git a/modules/audio_coding/neteq/post_decode_vad.h b/modules/audio_coding/neteq/post_decode_vad.h
index 7b67bbe..dac95f0 100644
--- a/modules/audio_coding/neteq/post_decode_vad.h
+++ b/modules/audio_coding/neteq/post_decode_vad.h
@@ -30,8 +30,7 @@
running_(false),
active_speech_(true),
sid_interval_counter_(0),
- vad_instance_(NULL) {
- }
+ vad_instance_(NULL) {}
virtual ~PostDecodeVad();
@@ -46,8 +45,11 @@
// Updates post-decode VAD with the audio data in |signal| having |length|
// samples. The data is of type |speech_type|, at the sample rate |fs_hz|.
- void Update(int16_t* signal, size_t length,
- AudioDecoder::SpeechType speech_type, bool sid_frame, int fs_hz);
+ void Update(int16_t* signal,
+ size_t length,
+ AudioDecoder::SpeechType speech_type,
+ bool sid_frame,
+ int fs_hz);
// Accessors.
bool enabled() const { return enabled_; }
diff --git a/modules/audio_coding/neteq/preemptive_expand.cc b/modules/audio_coding/neteq/preemptive_expand.cc
index bc75389..4702078 100644
--- a/modules/audio_coding/neteq/preemptive_expand.cc
+++ b/modules/audio_coding/neteq/preemptive_expand.cc
@@ -50,8 +50,7 @@
// but we must ensure that best_correlation is not larger than the length of
// the new data.
// but we must ensure that best_correlation is not larger than the new data.
- *peak_index = std::min(*peak_index,
- len - old_data_length_per_channel_);
+ *peak_index = std::min(*peak_index, len - old_data_length_per_channel_);
}
PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
@@ -68,13 +67,13 @@
// Check for strong correlation (>0.9 in Q14) and at least 15 ms new data,
// or passive speech.
if (((best_correlation > kCorrelationThreshold) &&
- (old_data_length_per_channel_ <= fs_mult_120)) ||
+ (old_data_length_per_channel_ <= fs_mult_120)) ||
!active_speech) {
// Do accelerate operation by overlap add.
// Set length of the first part, not to be modified.
- size_t unmodified_length = std::max(old_data_length_per_channel_,
- fs_mult_120);
+ size_t unmodified_length =
+ std::max(old_data_length_per_channel_, fs_mult_120);
// Copy first part, including cross-fade region.
output->PushBackInterleaved(
input, (unmodified_length + peak_index) * num_channels_);
@@ -107,8 +106,8 @@
size_t num_channels,
const BackgroundNoise& background_noise,
size_t overlap_samples) const {
- return new PreemptiveExpand(
- sample_rate_hz, num_channels, background_noise, overlap_samples);
+ return new PreemptiveExpand(sample_rate_hz, num_channels, background_noise,
+ overlap_samples);
}
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/preemptive_expand.h b/modules/audio_coding/neteq/preemptive_expand.h
index 303501d..197d3f1 100644
--- a/modules/audio_coding/neteq/preemptive_expand.h
+++ b/modules/audio_coding/neteq/preemptive_expand.h
@@ -35,15 +35,14 @@
size_t overlap_samples)
: TimeStretch(sample_rate_hz, num_channels, background_noise),
old_data_length_per_channel_(0),
- overlap_samples_(overlap_samples) {
- }
+ overlap_samples_(overlap_samples) {}
// This method performs the actual PreemptiveExpand operation. The samples are
// read from |input|, of length |input_length| elements, and are written to
// |output|. The number of samples added through time-stretching is
// is provided in the output |length_change_samples|. The method returns
// the outcome of the operation as an enumerator value.
- ReturnCodes Process(const int16_t *pw16_decoded,
+ ReturnCodes Process(const int16_t* pw16_decoded,
size_t len,
size_t old_data_len,
AudioMultiVector* output,
@@ -77,11 +76,10 @@
PreemptiveExpandFactory() {}
virtual ~PreemptiveExpandFactory() {}
- virtual PreemptiveExpand* Create(
- int sample_rate_hz,
- size_t num_channels,
- const BackgroundNoise& background_noise,
- size_t overlap_samples) const;
+ virtual PreemptiveExpand* Create(int sample_rate_hz,
+ size_t num_channels,
+ const BackgroundNoise& background_noise,
+ size_t overlap_samples) const;
};
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/random_vector.cc b/modules/audio_coding/neteq/random_vector.cc
index c2df8cf..ada1758 100644
--- a/modules/audio_coding/neteq/random_vector.cc
+++ b/modules/audio_coding/neteq/random_vector.cc
@@ -13,29 +13,35 @@
namespace webrtc {
const int16_t RandomVector::kRandomTable[RandomVector::kRandomTableSize] = {
- 2680, 5532, 441, 5520, 16170, -5146, -1024, -8733, 3115, 9598, -10380,
- -4959, -1280, -21716, 7133, -1522, 13458, -3902, 2789, -675, 3441, 5016,
- -13599, -4003, -2739, 3922, -7209, 13352, -11617, -7241, 12905, -2314, 5426,
- 10121, -9702, 11207, -13542, 1373, 816, -5934, -12504, 4798, 1811, 4112,
- -613, 201, -10367, -2960, -2419, 3442, 4299, -6116, -6092, 1552, -1650,
- -480, -1237, 18720, -11858, -8303, -8212, 865, -2890, -16968, 12052, -5845,
- -5912, 9777, -5665, -6294, 5426, -4737, -6335, 1652, 761, 3832, 641, -8552,
- -9084, -5753, 8146, 12156, -4915, 15086, -1231, -1869, 11749, -9319, -6403,
- 11407, 6232, -1683, 24340, -11166, 4017, -10448, 3153, -2936, 6212, 2891,
- -866, -404, -4807, -2324, -1917, -2388, -6470, -3895, -10300, 5323, -5403,
- 2205, 4640, 7022, -21186, -6244, -882, -10031, -3395, -12885, 7155, -5339,
- 5079, -2645, -9515, 6622, 14651, 15852, 359, 122, 8246, -3502, -6696, -3679,
- -13535, -1409, -704, -7403, -4007, 1798, 279, -420, -12796, -14219, 1141,
- 3359, 11434, 7049, -6684, -7473, 14283, -4115, -9123, -8969, 4152, 4117,
- 13792, 5742, 16168, 8661, -1609, -6095, 1881, 14380, -5588, 6758, -6425,
- -22969, -7269, 7031, 1119, -1611, -5850, -11281, 3559, -8952, -10146, -4667,
- -16251, -1538, 2062, -1012, -13073, 227, -3142, -5265, 20, 5770, -7559,
- 4740, -4819, 992, -8208, -7130, -4652, 6725, 7369, -1036, 13144, -1588,
- -5304, -2344, -449, -5705, -8894, 5205, -17904, -11188, -1022, 4852, 10101,
- -5255, -4200, -752, 7941, -1543, 5959, 14719, 13346, 17045, -15605, -1678,
- -1600, -9230, 68, 23348, 1172, 7750, 11212, -18227, 9956, 4161, 883, 3947,
- 4341, 1014, -4889, -2603, 1246, -5630, -3596, -870, -1298, 2784, -3317,
- -6612, -20541, 4166, 4181, -8625, 3562, 12890, 4761, 3205, -12259, -8579 };
+ 2680, 5532, 441, 5520, 16170, -5146, -1024, -8733, 3115,
+ 9598, -10380, -4959, -1280, -21716, 7133, -1522, 13458, -3902,
+ 2789, -675, 3441, 5016, -13599, -4003, -2739, 3922, -7209,
+ 13352, -11617, -7241, 12905, -2314, 5426, 10121, -9702, 11207,
+ -13542, 1373, 816, -5934, -12504, 4798, 1811, 4112, -613,
+ 201, -10367, -2960, -2419, 3442, 4299, -6116, -6092, 1552,
+ -1650, -480, -1237, 18720, -11858, -8303, -8212, 865, -2890,
+ -16968, 12052, -5845, -5912, 9777, -5665, -6294, 5426, -4737,
+ -6335, 1652, 761, 3832, 641, -8552, -9084, -5753, 8146,
+ 12156, -4915, 15086, -1231, -1869, 11749, -9319, -6403, 11407,
+ 6232, -1683, 24340, -11166, 4017, -10448, 3153, -2936, 6212,
+ 2891, -866, -404, -4807, -2324, -1917, -2388, -6470, -3895,
+ -10300, 5323, -5403, 2205, 4640, 7022, -21186, -6244, -882,
+ -10031, -3395, -12885, 7155, -5339, 5079, -2645, -9515, 6622,
+ 14651, 15852, 359, 122, 8246, -3502, -6696, -3679, -13535,
+ -1409, -704, -7403, -4007, 1798, 279, -420, -12796, -14219,
+ 1141, 3359, 11434, 7049, -6684, -7473, 14283, -4115, -9123,
+ -8969, 4152, 4117, 13792, 5742, 16168, 8661, -1609, -6095,
+ 1881, 14380, -5588, 6758, -6425, -22969, -7269, 7031, 1119,
+ -1611, -5850, -11281, 3559, -8952, -10146, -4667, -16251, -1538,
+ 2062, -1012, -13073, 227, -3142, -5265, 20, 5770, -7559,
+ 4740, -4819, 992, -8208, -7130, -4652, 6725, 7369, -1036,
+ 13144, -1588, -5304, -2344, -449, -5705, -8894, 5205, -17904,
+ -11188, -1022, 4852, 10101, -5255, -4200, -752, 7941, -1543,
+ 5959, 14719, 13346, 17045, -15605, -1678, -1600, -9230, 68,
+ 23348, 1172, 7750, 11212, -18227, 9956, 4161, 883, 3947,
+ 4341, 1014, -4889, -2603, 1246, -5630, -3596, -870, -1298,
+ 2784, -3317, -6612, -20541, 4166, 4181, -8625, 3562, 12890,
+ 4761, 3205, -12259, -8579};
void RandomVector::Reset() {
seed_ = 777;
@@ -51,7 +57,7 @@
}
void RandomVector::IncreaseSeedIncrement(int16_t increase_by) {
- seed_increment_+= increase_by;
+ seed_increment_ += increase_by;
seed_increment_ &= kRandomTableSize - 1;
}
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/random_vector.h b/modules/audio_coding/neteq/random_vector.h
index 18adbe0..2c6e06c 100644
--- a/modules/audio_coding/neteq/random_vector.h
+++ b/modules/audio_coding/neteq/random_vector.h
@@ -24,10 +24,7 @@
static const size_t kRandomTableSize = 256;
static const int16_t kRandomTable[kRandomTableSize];
- RandomVector()
- : seed_(777),
- seed_increment_(1) {
- }
+ RandomVector() : seed_(777), seed_increment_(1) {}
void Reset();
diff --git a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
index c3d9f33..73cd66c 100644
--- a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
+++ b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
@@ -100,8 +100,8 @@
// Not the last block; set F = 1.
*payload_ptr |= 0x80;
++payload_ptr;
- int this_offset = rtc::checked_cast<int>(
- (num_payloads - i - 1) * timestamp_offset);
+ int this_offset =
+ rtc::checked_cast<int>((num_payloads - i - 1) * timestamp_offset);
*payload_ptr = this_offset >> 6;
++payload_ptr;
assert(kPayloadLength <= 1023); // Max length described by 10 bits.
diff --git a/modules/audio_coding/neteq/rtcp.h b/modules/audio_coding/neteq/rtcp.h
index ce2035b..45bb058 100644
--- a/modules/audio_coding/neteq/rtcp.h
+++ b/modules/audio_coding/neteq/rtcp.h
@@ -22,9 +22,7 @@
class Rtcp {
public:
- Rtcp() {
- Init(0);
- }
+ Rtcp() { Init(0); }
~Rtcp() {}
@@ -39,17 +37,17 @@
void GetStatistics(bool no_reset, RtcpStatistics* stats);
private:
- uint16_t cycles_; // The number of wrap-arounds for the sequence number.
- uint16_t max_seq_no_; // The maximum sequence number received. Starts over
- // from 0 after wrap-around.
+ uint16_t cycles_; // The number of wrap-arounds for the sequence number.
+ uint16_t max_seq_no_; // The maximum sequence number received. Starts over
+ // from 0 after wrap-around.
uint16_t base_seq_no_; // The sequence number of the first received packet.
uint32_t received_packets_; // The number of packets that have been received.
uint32_t received_packets_prior_; // Number of packets received when last
// report was generated.
uint32_t expected_prior_; // Expected number of packets, at the time of the
// last report.
- int64_t jitter_; // Current jitter value in Q4.
- int32_t transit_; // Clock difference for previous packet.
+ int64_t jitter_; // Current jitter value in Q4.
+ int32_t transit_; // Clock difference for previous packet.
RTC_DISALLOW_COPY_AND_ASSIGN(Rtcp);
};
diff --git a/modules/audio_coding/neteq/statistics_calculator.cc b/modules/audio_coding/neteq/statistics_calculator.cc
index c698790..3d5744c 100644
--- a/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/modules/audio_coding/neteq/statistics_calculator.cc
@@ -42,8 +42,7 @@
: uma_name_(uma_name),
report_interval_ms_(report_interval_ms),
max_value_(max_value),
- timer_(0) {
-}
+ timer_(0) {}
StatisticsCalculator::PeriodicUmaLogger::~PeriodicUmaLogger() = default;
@@ -66,8 +65,7 @@
const std::string& uma_name,
int report_interval_ms,
int max_value)
- : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {
-}
+ : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {}
StatisticsCalculator::PeriodicUmaCount::~PeriodicUmaCount() {
// Log the count for the current (incomplete) interval.
@@ -90,8 +88,7 @@
const std::string& uma_name,
int report_interval_ms,
int max_value)
- : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {
-}
+ : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {}
StatisticsCalculator::PeriodicUmaAverage::~PeriodicUmaAverage() {
// Log the average for the current (incomplete) interval.
@@ -266,11 +263,10 @@
waiting_times_.push_back(waiting_time_ms);
}
-void StatisticsCalculator::GetNetworkStatistics(
- int fs_hz,
- size_t num_samples_in_buffers,
- size_t samples_per_packet,
- NetEqNetworkStatistics *stats) {
+void StatisticsCalculator::GetNetworkStatistics(int fs_hz,
+ size_t num_samples_in_buffers,
+ size_t samples_per_packet,
+ NetEqNetworkStatistics* stats) {
RTC_DCHECK_GT(fs_hz, 0);
RTC_DCHECK(stats);
@@ -291,20 +287,18 @@
CalculateQ14Ratio(expanded_speech_samples_ + expanded_noise_samples_,
timestamps_since_last_report_);
- stats->speech_expand_rate =
- CalculateQ14Ratio(expanded_speech_samples_,
- timestamps_since_last_report_);
+ stats->speech_expand_rate = CalculateQ14Ratio(expanded_speech_samples_,
+ timestamps_since_last_report_);
- stats->secondary_decoded_rate =
- CalculateQ14Ratio(secondary_decoded_samples_,
- timestamps_since_last_report_);
+ stats->secondary_decoded_rate = CalculateQ14Ratio(
+ secondary_decoded_samples_, timestamps_since_last_report_);
const size_t discarded_secondary_samples =
discarded_secondary_packets_ * samples_per_packet;
- stats->secondary_discarded_rate = CalculateQ14Ratio(
- discarded_secondary_samples,
- static_cast<uint32_t>(discarded_secondary_samples +
- secondary_decoded_samples_));
+ stats->secondary_discarded_rate =
+ CalculateQ14Ratio(discarded_secondary_samples,
+ static_cast<uint32_t>(discarded_secondary_samples +
+ secondary_decoded_samples_));
if (waiting_times_.size() == 0) {
stats->mean_waiting_time_ms = -1;
diff --git a/modules/audio_coding/neteq/statistics_calculator.h b/modules/audio_coding/neteq/statistics_calculator.h
index a06ddfb..42fd4c9 100644
--- a/modules/audio_coding/neteq/statistics_calculator.h
+++ b/modules/audio_coding/neteq/statistics_calculator.h
@@ -98,7 +98,7 @@
void GetNetworkStatistics(int fs_hz,
size_t num_samples_in_buffers,
size_t samples_per_packet,
- NetEqNetworkStatistics *stats);
+ NetEqNetworkStatistics* stats);
// Populates |preferred_buffer_size_ms|, |jitter_peaks_found| and
// |clockdrift_ppm| in |stats|. This is a convenience method, and does not
diff --git a/modules/audio_coding/neteq/sync_buffer.cc b/modules/audio_coding/neteq/sync_buffer.cc
index 28d7649..82ca16f 100644
--- a/modules/audio_coding/neteq/sync_buffer.cc
+++ b/modules/audio_coding/neteq/sync_buffer.cc
@@ -27,7 +27,7 @@
next_index_ -= samples_added;
} else {
// This means that we are pushing out future data that was never used.
-// assert(false);
+ // assert(false);
// TODO(hlundin): This assert must be disabled to support 60 ms frames.
// This should not happen even for 60 ms frames, but it does. Investigate
// why.
@@ -75,9 +75,8 @@
RTC_DCHECK(output);
const size_t samples_to_read = std::min(FutureLength(), requested_len);
output->ResetWithoutMuting();
- const size_t tot_samples_read =
- ReadInterleavedFromIndex(next_index_, samples_to_read,
- output->mutable_data());
+ const size_t tot_samples_read = ReadInterleavedFromIndex(
+ next_index_, samples_to_read, output->mutable_data());
const size_t samples_read_per_channel = tot_samples_read / Channels();
next_index_ += samples_read_per_channel;
output->num_channels_ = Channels();
diff --git a/modules/audio_coding/neteq/sync_buffer.h b/modules/audio_coding/neteq/sync_buffer.h
index d880356..8a35326 100644
--- a/modules/audio_coding/neteq/sync_buffer.h
+++ b/modules/audio_coding/neteq/sync_buffer.h
@@ -92,7 +92,7 @@
private:
size_t next_index_;
uint32_t end_timestamp_; // The timestamp of the last sample in the buffer.
- size_t dtmf_index_; // Index to the first non-DTMF sample in the buffer.
+ size_t dtmf_index_; // Index to the first non-DTMF sample in the buffer.
RTC_DISALLOW_COPY_AND_ASSIGN(SyncBuffer);
};
diff --git a/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
index bca401a..ad61235 100644
--- a/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -52,7 +52,8 @@
int EncodeBlock(int16_t* in_data,
size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes) override {
+ rtc::Buffer* payload,
+ size_t max_bytes) override {
const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
diff --git a/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index d88f789..94984b87 100644
--- a/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -30,8 +30,11 @@
NetEqIsacQualityTest();
void SetUp() override;
void TearDown() override;
- int EncodeBlock(int16_t* in_data, size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes) override;
+ int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) override;
+
private:
ISACFIX_MainStruct* isac_encoder_;
int bit_rate_kbps_;
@@ -44,10 +47,10 @@
NetEqDecoder::kDecoderISAC),
isac_encoder_(NULL),
bit_rate_kbps_(FLAG_bit_rate_kbps) {
- // Flag validation
- RTC_CHECK(FLAG_bit_rate_kbps >= 10 && FLAG_bit_rate_kbps <= 32)
- << "Invalid bit rate, should be between 10 and 32 kbps.";
- }
+ // Flag validation
+ RTC_CHECK(FLAG_bit_rate_kbps >= 10 && FLAG_bit_rate_kbps <= 32)
+ << "Invalid bit rate, should be between 10 and 32 kbps.";
+}
void NetEqIsacQualityTest::SetUp() {
ASSERT_EQ(1u, channels_) << "iSAC supports only mono audio.";
@@ -69,7 +72,8 @@
int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes) {
+ rtc::Buffer* payload,
+ size_t max_bytes) {
// ISAC takes 10 ms for every call.
const int subblocks = kIsacBlockDurationMs / 10;
const int subblock_length = 10 * kIsacInputSamplingKhz;
@@ -80,11 +84,11 @@
// The Isac encoder does not perform encoding (and returns 0) until it
// receives a sequence of sub-blocks that amount to the frame duration.
EXPECT_EQ(0, value);
- payload->AppendData(max_bytes, [&] (rtc::ArrayView<uint8_t> payload) {
- value = WebRtcIsacfix_Encode(isac_encoder_, &in_data[pointer],
- payload.data());
- return (value >= 0) ? static_cast<size_t>(value) : 0;
- });
+ payload->AppendData(max_bytes, [&](rtc::ArrayView<uint8_t> payload) {
+ value = WebRtcIsacfix_Encode(isac_encoder_, &in_data[pointer],
+ payload.data());
+ return (value >= 0) ? static_cast<size_t>(value) : 0;
+ });
}
EXPECT_GT(value, 0);
return value;
diff --git a/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc b/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
index c2542b6..6861e4c 100644
--- a/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "modules/audio_coding/codecs/opus/opus_interface.h"
#include "modules/audio_coding/codecs/opus/opus_inst.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
#include "rtc_base/flags.h"
@@ -24,8 +24,10 @@
DEFINE_int(bit_rate_kbps, 32, "Target bit rate (kbps).");
-DEFINE_int(complexity, 10, "Complexity: 0 ~ 10 -- defined as in Opus"
- "specification.");
+DEFINE_int(complexity,
+ 10,
+ "Complexity: 0 ~ 10 -- defined as in Opus"
+ "specification.");
DEFINE_int(maxplaybackrate, 48000, "Maximum playback rate (Hz).");
@@ -46,8 +48,11 @@
NetEqOpusQualityTest();
void SetUp() override;
void TearDown() override;
- int EncodeBlock(int16_t* in_data, size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes) override;
+ int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) override;
+
private:
WebRtcOpusEncInst* opus_encoder_;
OpusRepacketizer* repacketizer_;
@@ -120,8 +125,7 @@
}
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, complexity_));
EXPECT_EQ(0, WebRtcOpus_SetMaxPlaybackRate(opus_encoder_, maxplaybackrate_));
- EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_encoder_,
- target_loss_rate_));
+ EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_encoder_, target_loss_rate_));
NetEqQualityTest::SetUp();
}
@@ -134,26 +138,25 @@
int NetEqOpusQualityTest::EncodeBlock(int16_t* in_data,
size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes) {
+ rtc::Buffer* payload,
+ size_t max_bytes) {
EXPECT_EQ(block_size_samples, sub_block_size_samples_ * sub_packets_);
int16_t* pointer = in_data;
int value;
opus_repacketizer_init(repacketizer_);
for (int idx = 0; idx < sub_packets_; idx++) {
- payload->AppendData(max_bytes, [&] (rtc::ArrayView<uint8_t> payload) {
- value = WebRtcOpus_Encode(opus_encoder_,
- pointer, sub_block_size_samples_,
- max_bytes, payload.data());
+ payload->AppendData(max_bytes, [&](rtc::ArrayView<uint8_t> payload) {
+ value = WebRtcOpus_Encode(opus_encoder_, pointer, sub_block_size_samples_,
+ max_bytes, payload.data());
- Log() << "Encoded a frame with Opus mode "
- << (value == 0 ? 0 : payload[0] >> 3)
- << std::endl;
+ Log() << "Encoded a frame with Opus mode "
+ << (value == 0 ? 0 : payload[0] >> 3) << std::endl;
- return (value >= 0) ? static_cast<size_t>(value) : 0;
- });
+ return (value >= 0) ? static_cast<size_t>(value) : 0;
+ });
- if (OPUS_OK != opus_repacketizer_cat(repacketizer_,
- payload->data(), value)) {
+ if (OPUS_OK !=
+ opus_repacketizer_cat(repacketizer_, payload->data(), value)) {
opus_repacketizer_init(repacketizer_);
// If the repacketization fails, we discard this frame.
return 0;
diff --git a/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
index bc3c168..54ff849 100644
--- a/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -52,7 +52,8 @@
int EncodeBlock(int16_t* in_data,
size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes) override {
+ rtc::Buffer* payload,
+ size_t max_bytes) override {
const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
diff --git a/modules/audio_coding/neteq/test/neteq_performance_unittest.cc b/modules/audio_coding/neteq/test/neteq_performance_unittest.cc
index 0510af8..6b1c223 100644
--- a/modules/audio_coding/neteq/test/neteq_performance_unittest.cc
+++ b/modules/audio_coding/neteq/test/neteq_performance_unittest.cc
@@ -9,10 +9,10 @@
*/
#include "modules/audio_coding/neteq/tools/neteq_performance_test.h"
+#include "system_wrappers/include/field_trial.h"
#include "test/gtest.h"
#include "test/testsupport/perf_test.h"
#include "typedefs.h" // NOLINT(build/include)
-#include "system_wrappers/include/field_trial.h"
// Runs a test with 10% packet losses and 10% clock drift, to exercise
// both loss concealment and time-stretching code.
@@ -27,8 +27,8 @@
: kSimulationTimeMs,
kLossPeriod, kDriftFactor);
ASSERT_GT(runtime, 0);
- webrtc::test::PrintResult(
- "neteq_performance", "", "10_pl_10_drift", runtime, "ms", true);
+ webrtc::test::PrintResult("neteq_performance", "", "10_pl_10_drift", runtime,
+ "ms", true);
}
// Runs a test with neither packet losses nor clock drift, to put
@@ -37,7 +37,7 @@
TEST(NetEqPerformanceTest, RunClean) {
const int kSimulationTimeMs = 10000000;
const int kQuickSimulationTimeMs = 100000;
- const int kLossPeriod = 0; // No losses.
+ const int kLossPeriod = 0; // No losses.
const double kDriftFactor = 0.0; // No clock drift.
int64_t runtime = webrtc::test::NetEqPerformanceTest::Run(
webrtc::field_trial::IsEnabled("WebRTC-QuickPerfTest")
@@ -45,6 +45,6 @@
: kSimulationTimeMs,
kLossPeriod, kDriftFactor);
ASSERT_GT(runtime, 0);
- webrtc::test::PrintResult(
- "neteq_performance", "", "0_pl_0_drift", runtime, "ms", true);
+ webrtc::test::PrintResult("neteq_performance", "", "0_pl_0_drift", runtime,
+ "ms", true);
}
diff --git a/modules/audio_coding/neteq/test/neteq_speed_test.cc b/modules/audio_coding/neteq/test/neteq_speed_test.cc
index ad123fe..76b6878 100644
--- a/modules/audio_coding/neteq/test/neteq_speed_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_speed_test.cc
@@ -19,23 +19,24 @@
// Define command line flags.
DEFINE_int(runtime_ms, 10000, "Simulated runtime in ms.");
-DEFINE_int(lossrate, 10,
- "Packet lossrate; drop every N packets.");
-DEFINE_float(drift, 0.1f,
- "Clockdrift factor.");
+DEFINE_int(lossrate, 10, "Packet lossrate; drop every N packets.");
+DEFINE_float(drift, 0.1f, "Clockdrift factor.");
DEFINE_bool(help, false, "Print this message.");
int main(int argc, char* argv[]) {
std::string program_name = argv[0];
- std::string usage = "Tool for measuring the speed of NetEq.\n"
- "Usage: " + program_name + " [options]\n\n"
+ std::string usage =
+ "Tool for measuring the speed of NetEq.\n"
+ "Usage: " +
+ program_name +
+ " [options]\n\n"
" --runtime_ms=N runtime in ms; default is 10000 ms\n"
" --lossrate=N drop every N packets; default is 10\n"
" --drift=F clockdrift factor between 0.0 and 1.0; "
"default is 0.1\n";
webrtc::test::SetExecutablePath(argv[0]);
- if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) ||
- FLAG_help || argc != 1) {
+ if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) || FLAG_help ||
+ argc != 1) {
printf("%s", usage.c_str());
if (FLAG_help) {
rtc::FlagList::Print(nullptr, false);
@@ -47,9 +48,8 @@
RTC_CHECK_GE(FLAG_lossrate, 0);
RTC_CHECK(FLAG_drift >= 0.0 && FLAG_drift < 1.0);
- int64_t result =
- webrtc::test::NetEqPerformanceTest::Run(FLAG_runtime_ms, FLAG_lossrate,
- FLAG_drift);
+ int64_t result = webrtc::test::NetEqPerformanceTest::Run(
+ FLAG_runtime_ms, FLAG_lossrate, FLAG_drift);
if (result <= 0) {
std::cout << "There was an error" << std::endl;
return -1;
diff --git a/modules/audio_coding/neteq/time_stretch.cc b/modules/audio_coding/neteq/time_stretch.cc
index 8a1bfa2..560d9be 100644
--- a/modules/audio_coding/neteq/time_stretch.cc
+++ b/modules/audio_coding/neteq/time_stretch.cc
@@ -80,7 +80,7 @@
// Calculate scaling to ensure that |peak_index| samples can be square-summed
// without overflowing.
int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) -
- WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
+ WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
scaling = std::max(0, scaling);
// |vec1| starts at 15 ms minus one pitch period.
@@ -99,8 +99,8 @@
WebRtcSpl_DotProductWithScale(vec1, vec2, peak_index, scaling);
// Check if the signal seems to be active speech or not (simple VAD).
- bool active_speech = SpeechDetection(vec1_energy, vec2_energy, peak_index,
- scaling);
+ bool active_speech =
+ SpeechDetection(vec1_energy, vec2_energy, peak_index, scaling);
int16_t best_correlation;
if (!active_speech) {
@@ -126,8 +126,8 @@
static_cast<int16_t>(vec2_energy >> energy2_scale);
// Calculate square-root of energy product.
- int16_t sqrt_energy_prod = WebRtcSpl_SqrtFloor(vec1_energy_int16 *
- vec2_energy_int16);
+ int16_t sqrt_energy_prod =
+ WebRtcSpl_SqrtFloor(vec1_energy_int16 * vec2_energy_int16);
// Calculate cross_corr / sqrt(en1*en2) in Q14.
int temp_scale = 14 - (energy1_scale + energy2_scale) / 2;
@@ -138,7 +138,6 @@
best_correlation = std::min(static_cast<int16_t>(16384), best_correlation);
}
-
// Check accelerate criteria and stretch the signal.
ReturnCodes return_value =
CheckCriteriaAndStretch(input, input_len, peak_index, best_correlation,
@@ -172,8 +171,10 @@
auto_corr, scaling);
}
-bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
- size_t peak_index, int scaling) const {
+bool TimeStretch::SpeechDetection(int32_t vec1_energy,
+ int32_t vec2_energy,
+ size_t peak_index,
+ int scaling) const {
// Check if the signal seems to be active speech or not (simple VAD).
// If (vec1_energy + vec2_energy) / (2 * peak_index) <=
// 8 * background_noise_energy, then we say that the signal contains no
diff --git a/modules/audio_coding/neteq/time_stretch.h b/modules/audio_coding/neteq/time_stretch.h
index ace10cd..606d1d0 100644
--- a/modules/audio_coding/neteq/time_stretch.h
+++ b/modules/audio_coding/neteq/time_stretch.h
@@ -35,7 +35,8 @@
kError = -1
};
- TimeStretch(int sample_rate_hz, size_t num_channels,
+ TimeStretch(int sample_rate_hz,
+ size_t num_channels,
const BackgroundNoise& background_noise)
: sample_rate_hz_(sample_rate_hz),
fs_mult_(sample_rate_hz / 8000),
@@ -43,10 +44,8 @@
master_channel_(0), // First channel is master.
background_noise_(background_noise),
max_input_value_(0) {
- assert(sample_rate_hz_ == 8000 ||
- sample_rate_hz_ == 16000 ||
- sample_rate_hz_ == 32000 ||
- sample_rate_hz_ == 48000);
+ assert(sample_rate_hz_ == 8000 || sample_rate_hz_ == 16000 ||
+ sample_rate_hz_ == 32000 || sample_rate_hz_ == 48000);
assert(num_channels_ > 0);
assert(master_channel_ < num_channels_);
memset(auto_correlation_, 0, sizeof(auto_correlation_));
@@ -106,8 +105,10 @@
void AutoCorrelation();
// Performs a simple voice-activity detection based on the input parameters.
- bool SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
- size_t peak_index, int scaling) const;
+ bool SpeechDetection(int32_t vec1_energy,
+ int32_t vec2_energy,
+ size_t peak_index,
+ int scaling) const;
RTC_DISALLOW_COPY_AND_ASSIGN(TimeStretch);
};
diff --git a/modules/audio_coding/neteq/time_stretch_unittest.cc b/modules/audio_coding/neteq/time_stretch_unittest.cc
index 8d0f4d4..c96c7d4 100644
--- a/modules/audio_coding/neteq/time_stretch_unittest.cc
+++ b/modules/audio_coding/neteq/time_stretch_unittest.cc
@@ -34,8 +34,8 @@
const int kOverlapSamples = 5 * kSampleRate / 8000;
BackgroundNoise bgn(kNumChannels);
Accelerate accelerate(kSampleRate, kNumChannels, bgn);
- PreemptiveExpand preemptive_expand(
- kSampleRate, kNumChannels, bgn, kOverlapSamples);
+ PreemptiveExpand preemptive_expand(kSampleRate, kNumChannels, bgn,
+ kOverlapSamples);
}
TEST(TimeStretch, CreateUsingFactory) {
diff --git a/modules/audio_coding/neteq/timestamp_scaler.cc b/modules/audio_coding/neteq/timestamp_scaler.cc
index d7aa9fe..07d945e 100644
--- a/modules/audio_coding/neteq/timestamp_scaler.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler.cc
@@ -70,7 +70,6 @@
}
}
-
uint32_t TimestampScaler::ToExternal(uint32_t internal_timestamp) const {
if (!first_packet_received_ || (numerator_ == denominator_)) {
// Not initialized, or scale factor is 1.
diff --git a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
index eeaf772..1f1445a 100644
--- a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
#include "modules/audio_coding/neteq/packet.h"
-#include "modules/audio_coding/neteq/timestamp_scaler.h"
#include "test/gmock.h"
#include "test/gtest.h"
@@ -60,7 +60,7 @@
// |external_timestamp| will be a large positive value.
start_timestamp = start_timestamp - 5 * kStep;
for (uint32_t timestamp = start_timestamp; timestamp != 5 * kStep;
- timestamp += kStep) {
+ timestamp += kStep) {
// Scale to internal timestamp.
EXPECT_EQ(timestamp, scaler.ToInternal(timestamp, kRtpPayloadType));
// Scale back.
diff --git a/modules/audio_coding/neteq/tools/audio_loop.cc b/modules/audio_coding/neteq/tools/audio_loop.cc
index b5ad881..972921b 100644
--- a/modules/audio_coding/neteq/tools/audio_loop.cc
+++ b/modules/audio_coding/neteq/tools/audio_loop.cc
@@ -21,16 +21,18 @@
size_t max_loop_length_samples,
size_t block_length_samples) {
FILE* fp = fopen(file_name.c_str(), "rb");
- if (!fp) return false;
+ if (!fp)
+ return false;
- audio_array_.reset(new int16_t[max_loop_length_samples +
- block_length_samples]);
- size_t samples_read = fread(audio_array_.get(), sizeof(int16_t),
- max_loop_length_samples, fp);
+ audio_array_.reset(
+ new int16_t[max_loop_length_samples + block_length_samples]);
+ size_t samples_read =
+ fread(audio_array_.get(), sizeof(int16_t), max_loop_length_samples, fp);
fclose(fp);
// Block length must be shorter than the loop length.
- if (block_length_samples > samples_read) return false;
+ if (block_length_samples > samples_read)
+ return false;
// Add an extra block length of samples to the end of the array, starting
// over again from the beginning of the array. This is done to simplify
@@ -54,6 +56,5 @@
return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
}
-
} // namespace test
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/audio_loop.h b/modules/audio_coding/neteq/tools/audio_loop.h
index abb1a36..876c2d7 100644
--- a/modules/audio_coding/neteq/tools/audio_loop.h
+++ b/modules/audio_coding/neteq/tools/audio_loop.h
@@ -26,10 +26,7 @@
class AudioLoop {
public:
AudioLoop()
- : next_index_(0),
- loop_length_samples_(0),
- block_length_samples_(0) {
- }
+ : next_index_(0), loop_length_samples_(0), block_length_samples_(0) {}
virtual ~AudioLoop() {}
@@ -38,7 +35,8 @@
// greater. Otherwise, the loop length is the same as the file length.
// The audio will be delivered in blocks of |block_length_samples|.
// Returns false if the initialization failed, otherwise true.
- bool Init(const std::string file_name, size_t max_loop_length_samples,
+ bool Init(const std::string file_name,
+ size_t max_loop_length_samples,
size_t block_length_samples);
// Returns a (pointer,size) pair for the next block of audio. The size is
diff --git a/modules/audio_coding/neteq/tools/audio_sink.h b/modules/audio_coding/neteq/tools/audio_sink.h
index 18ac6fc..05e6fe8 100644
--- a/modules/audio_coding/neteq/tools/audio_sink.h
+++ b/modules/audio_coding/neteq/tools/audio_sink.h
@@ -32,9 +32,8 @@
// Writes |audio_frame| to the AudioSink. Returns true if successful,
// otherwise false.
bool WriteAudioFrame(const AudioFrame& audio_frame) {
- return WriteArray(
- audio_frame.data(),
- audio_frame.samples_per_channel_ * audio_frame.num_channels_);
+ return WriteArray(audio_frame.data(), audio_frame.samples_per_channel_ *
+ audio_frame.num_channels_);
}
private:
diff --git a/modules/audio_coding/neteq/tools/input_audio_file.cc b/modules/audio_coding/neteq/tools/input_audio_file.cc
index 330a874..6d11064 100644
--- a/modules/audio_coding/neteq/tools/input_audio_file.cc
+++ b/modules/audio_coding/neteq/tools/input_audio_file.cc
@@ -20,7 +20,9 @@
fp_ = fopen(file_name.c_str(), "rb");
}
-InputAudioFile::~InputAudioFile() { fclose(fp_); }
+InputAudioFile::~InputAudioFile() {
+ fclose(fp_);
+}
bool InputAudioFile::Read(size_t samples, int16_t* destination) {
if (!fp_) {
@@ -73,7 +75,8 @@
return true;
}
-void InputAudioFile::DuplicateInterleaved(const int16_t* source, size_t samples,
+void InputAudioFile::DuplicateInterleaved(const int16_t* source,
+ size_t samples,
size_t channels,
int16_t* destination) {
// Start from the end of |source| and |destination|, and work towards the
diff --git a/modules/audio_coding/neteq/tools/input_audio_file.h b/modules/audio_coding/neteq/tools/input_audio_file.h
index 6bfa369..db5a944 100644
--- a/modules/audio_coding/neteq/tools/input_audio_file.h
+++ b/modules/audio_coding/neteq/tools/input_audio_file.h
@@ -45,8 +45,10 @@
// channels are identical. The output |destination| must have the capacity to
// hold samples * channels elements. Note that |source| and |destination| can
// be the same array (i.e., point to the same address).
- static void DuplicateInterleaved(const int16_t* source, size_t samples,
- size_t channels, int16_t* destination);
+ static void DuplicateInterleaved(const int16_t* source,
+ size_t samples,
+ size_t channels,
+ int16_t* destination);
private:
FILE* fp_;
diff --git a/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
index 2c23e5c..3bd218b 100644
--- a/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
+++ b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
#include "api/audio/audio_frame.h"
@@ -32,9 +31,8 @@
}
void NetEqExternalDecoderTest::Init() {
- ASSERT_EQ(NetEq::kOK,
- neteq_->RegisterExternalDecoder(decoder_, codec_, name_,
- kPayloadType));
+ ASSERT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(decoder_, codec_, name_,
+ kPayloadType));
}
void NetEqExternalDecoderTest::InsertPacket(
diff --git a/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
index b8670a3..78f0085 100644
--- a/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
+++ b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
@@ -31,7 +31,7 @@
int sample_rate_hz,
AudioDecoder* decoder);
- virtual ~NetEqExternalDecoderTest() { }
+ virtual ~NetEqExternalDecoderTest() {}
// In Init(), we register the external decoder.
void Init();
diff --git a/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 80aa809..e0dfebf 100644
--- a/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -95,9 +95,8 @@
}
// Get next packet.
- packet_input_time_ms = rtp_gen.GetRtpHeader(kPayloadType,
- kInputBlockSizeSamples,
- &rtp_header);
+ packet_input_time_ms = rtp_gen.GetRtpHeader(
+ kPayloadType, kInputBlockSizeSamples, &rtp_header);
input_samples = audio_loop.GetNextBlock();
if (input_samples.empty())
return -1;
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index 82fa90e..faca895 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -47,7 +47,9 @@
return true;
}
-DEFINE_string(in_filename, DefaultInFilename().c_str(),
+DEFINE_string(
+ in_filename,
+ DefaultInFilename().c_str(),
"Filename for input audio (specify sample rate with --input_sample_rate, "
"and channels with --channels).");
@@ -55,8 +57,9 @@
DEFINE_int(channels, 1, "Number of channels in input audio.");
-DEFINE_string(out_filename, DefaultOutFilename().c_str(),
- "Name of output audio file.");
+DEFINE_string(out_filename,
+ DefaultOutFilename().c_str(),
+ "Name of output audio file.");
DEFINE_int(runtime_ms, 10000, "Simulated runtime (milliseconds).");
@@ -67,8 +70,9 @@
"Random loss mode: 0--no loss, 1--uniform loss, 2--Gilbert Elliot "
"loss, 3--fixed loss.");
-DEFINE_int(burst_length, 30,
- "Burst length in milliseconds, only valid for Gilbert Elliot loss.");
+DEFINE_int(burst_length,
+ 30,
+ "Burst length in milliseconds, only valid for Gilbert Elliot loss.");
DEFINE_float(drift_factor, 0.0, "Time drift factor.");
@@ -85,21 +89,22 @@
// to achieve the target packet loss rate |loss_rate|, when a packet is not
// lost only if all |units| drawings within the duration of the packet result in
// no-loss.
-static double ProbTrans00Solver(int units, double loss_rate,
+static double ProbTrans00Solver(int units,
+ double loss_rate,
double prob_trans_10) {
if (units == 1)
return prob_trans_10 / (1.0f - loss_rate) - prob_trans_10;
-// 0 == prob_trans_00 ^ (units - 1) + (1 - loss_rate) / prob_trans_10 *
-// prob_trans_00 - (1 - loss_rate) * (1 + 1 / prob_trans_10).
-// There is a unique solution between 0.0 and 1.0, due to the monotonicity and
-// an opposite sign at 0.0 and 1.0.
-// For simplicity, we reformulate the equation as
-// f(x) = x ^ (units - 1) + a x + b.
-// Its derivative is
-// f'(x) = (units - 1) x ^ (units - 2) + a.
-// The derivative is strictly greater than 0 when x is between 0 and 1.
-// We use Newton's method to solve the equation, iteration is
-// x(k+1) = x(k) - f(x) / f'(x);
+ // 0 == prob_trans_00 ^ (units - 1) + (1 - loss_rate) / prob_trans_10 *
+ // prob_trans_00 - (1 - loss_rate) * (1 + 1 / prob_trans_10).
+ // There is a unique solution between 0.0 and 1.0, due to the monotonicity and
+ // an opposite sign at 0.0 and 1.0.
+ // For simplicity, we reformulate the equation as
+ // f(x) = x ^ (units - 1) + a x + b.
+ // Its derivative is
+ // f'(x) = (units - 1) x ^ (units - 2) + a.
+ // The derivative is strictly greater than 0 when x is between 0 and 1.
+ // We use Newton's method to solve the equation, iteration is
+ // x(k+1) = x(k) - f(x) / f'(x);
const double kPrecision = 0.001f;
const int kIterations = 100;
const double a = (1.0f - loss_rate) / prob_trans_10;
@@ -117,7 +122,7 @@
x = 0.0f;
}
f = pow(x, units - 1) + a * x + b;
- iter ++;
+ iter++;
}
return x;
}
@@ -210,9 +215,7 @@
return false;
}
-UniformLoss::UniformLoss(double loss_rate)
- : loss_rate_(loss_rate) {
-}
+UniformLoss::UniformLoss(double loss_rate) : loss_rate_(loss_rate) {}
bool UniformLoss::Lost(int now_ms) {
int drop_this = rand();
@@ -223,8 +226,7 @@
: prob_trans_11_(prob_trans_11),
prob_trans_01_(prob_trans_01),
lost_last_(false),
- uniform_loss_model_(new UniformLoss(0)) {
-}
+ uniform_loss_model_(new UniformLoss(0)) {}
GilbertElliotLoss::~GilbertElliotLoss() {}
@@ -277,8 +279,8 @@
// a full packet duration is drawn with a loss, |unit_loss_rate| fulfills
// (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
// 1 - packet_loss_rate.
- double unit_loss_rate = (1.0f - pow(1.0f - 0.01f * packet_loss_rate_,
- 1.0f / units));
+ double unit_loss_rate =
+ (1.0f - pow(1.0f - 0.01f * packet_loss_rate_, 1.0f / units));
loss_model_.reset(new UniformLoss(unit_loss_rate));
break;
}
@@ -304,8 +306,8 @@
double loss_rate = 0.01f * packet_loss_rate_;
double prob_trans_10 = 1.0f * kPacketLossTimeUnitMs / FLAG_burst_length;
double prob_trans_00 = ProbTrans00Solver(units, loss_rate, prob_trans_10);
- loss_model_.reset(new GilbertElliotLoss(1.0f - prob_trans_10,
- 1.0f - prob_trans_00));
+ loss_model_.reset(
+ new GilbertElliotLoss(1.0f - prob_trans_10, 1.0f - prob_trans_00));
break;
}
case kFixedLoss: {
@@ -347,7 +349,7 @@
// The loop is to make sure that codecs with different block lengths share the
// same packet loss profile.
bool lost = false;
- for (int idx = 0; idx < cycles; idx ++) {
+ for (int idx = 0; idx < cycles; idx++) {
if (loss_model_->Lost(decoded_time_ms_)) {
// The packet will be lost if any of the drawings indicates a loss, but
// the loop has to go on to make sure that codecs with different block
@@ -359,14 +361,10 @@
}
int NetEqQualityTest::Transmit() {
- int packet_input_time_ms =
- rtp_generator_->GetRtpHeader(kPayloadType, in_size_samples_,
- &rtp_header_);
- Log() << "Packet of size "
- << payload_size_bytes_
- << " bytes, for frame at "
- << packet_input_time_ms
- << " ms ";
+ int packet_input_time_ms = rtp_generator_->GetRtpHeader(
+ kPayloadType, in_size_samples_, &rtp_header_);
+ Log() << "Packet of size " << payload_size_bytes_ << " bytes, for frame at "
+ << packet_input_time_ms << " ms ";
if (payload_size_bytes_ > 0) {
if (!PacketLost()) {
int ret = neteq_->InsertPacket(
@@ -411,9 +409,8 @@
decoded_time_ms_) {
ASSERT_TRUE(in_file_->Read(in_size_samples_ * channels_, &in_data_[0]));
payload_.Clear();
- payload_size_bytes_ = EncodeBlock(&in_data_[0],
- in_size_samples_, &payload_,
- max_payload_bytes_);
+ payload_size_bytes_ = EncodeBlock(&in_data_[0], in_size_samples_,
+ &payload_, max_payload_bytes_);
total_payload_size_bytes_ += payload_size_bytes_;
decodable_time_ms_ = Transmit() + block_duration_ms_;
}
@@ -423,8 +420,7 @@
}
}
Log() << "Average bit rate was "
- << 8.0f * total_payload_size_bytes_ / FLAG_runtime_ms
- << " kbps"
+ << 8.0f * total_payload_size_bytes_ / FLAG_runtime_ms << " kbps"
<< std::endl;
}
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.h b/modules/audio_coding/neteq/tools/neteq_quality_test.h
index 2b82b0a..b19460c 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -36,7 +36,7 @@
class LossModel {
public:
- virtual ~LossModel() {};
+ virtual ~LossModel(){};
virtual bool Lost(int now_ms) = 0;
};
@@ -110,8 +110,10 @@
// |block_size_samples| (samples per channel),
// 2. save the bit stream to |payload| of |max_bytes| bytes in size,
// 3. returns the length of the payload (in bytes),
- virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes) = 0;
+ virtual int EncodeBlock(int16_t* in_data,
+ size_t block_size_samples,
+ rtc::Buffer* payload,
+ size_t max_bytes) = 0;
// PacketLost(...) determines weather a packet sent at an indicated time gets
// lost or not.
diff --git a/modules/audio_coding/neteq/tools/neteq_replacement_input.h b/modules/audio_coding/neteq/tools/neteq_replacement_input.h
index 1113001..9ce9b9d 100644
--- a/modules/audio_coding/neteq/tools/neteq_replacement_input.h
+++ b/modules/audio_coding/neteq/tools/neteq_replacement_input.h
@@ -42,7 +42,7 @@
const uint8_t replacement_payload_type_;
const std::set<uint8_t> comfort_noise_types_;
const std::set<uint8_t> forbidden_types_;
- std::unique_ptr<PacketData> packet_; // The next packet to deliver.
+ std::unique_ptr<PacketData> packet_; // The next packet to deliver.
uint32_t last_frame_size_timestamps_ = 960; // Initial guess: 20 ms @ 48 kHz.
};
diff --git a/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index d69b1a7..673c8fd 100644
--- a/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -10,20 +10,20 @@
#include <errno.h>
#include <inttypes.h>
-#include <iostream>
#include <limits.h> // For ULONG_MAX returned by strtoul.
-#include <memory>
#include <stdio.h>
#include <stdlib.h> // For strtoul.
+#include <iostream>
+#include <memory>
#include <string>
#include "modules/audio_coding/neteq/include/neteq.h"
#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "modules/audio_coding/neteq/tools/input_audio_file.h"
#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
-#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h"
#include "modules/audio_coding/neteq/tools/neteq_test.h"
#include "modules/audio_coding/neteq/tools/output_audio_file.h"
#include "modules/audio_coding/neteq/tools/output_wav_file.h"
@@ -71,7 +71,7 @@
bool ValidateSsrcValue(const std::string& str) {
uint32_t dummy_ssrc;
- if (ParseSsrc(str, &dummy_ssrc)) // Value is ok.
+ if (ParseSsrc(str, &dummy_ssrc)) // Value is ok.
return true;
printf("Invalid SSRC: %s\n", str.c_str());
return false;
@@ -106,10 +106,15 @@
DEFINE_int(cn_wb, 98, "RTP payload type for comfort noise (16 kHz)");
DEFINE_int(cn_swb32, 99, "RTP payload type for comfort noise (32 kHz)");
DEFINE_int(cn_swb48, 100, "RTP payload type for comfort noise (48 kHz)");
-DEFINE_bool(codec_map, false, "Prints the mapping between RTP payload type and "
- "codec");
-DEFINE_string(replacement_audio_file, "",
- "A PCM file that will be used to populate ""dummy"" RTP packets");
+DEFINE_bool(codec_map,
+ false,
+ "Prints the mapping between RTP payload type and "
+ "codec");
+DEFINE_string(replacement_audio_file,
+ "",
+ "A PCM file that will be used to populate "
+ "dummy"
+ " RTP packets");
DEFINE_string(ssrc,
"",
"Only use packets with this SSRC (decimal or hex, the latter "
@@ -240,8 +245,8 @@
NetEq* neteq) override {
if (last_ssrc_ && packet.header.ssrc != *last_ssrc_) {
std::cout << "Changing streams from 0x" << std::hex << *last_ssrc_
- << " to 0x" << std::hex << packet.header.ssrc
- << std::dec << " (payload type "
+ << " to 0x" << std::hex << packet.header.ssrc << std::dec
+ << " (payload type "
<< static_cast<int>(packet.header.payloadType) << ")"
<< std::endl;
}
@@ -258,10 +263,13 @@
int RunTest(int argc, char* argv[]) {
std::string program_name = argv[0];
- std::string usage = "Tool for decoding an RTP dump file using NetEq.\n"
- "Run " + program_name + " --help for usage.\n"
- "Example usage:\n" + program_name +
- " input.rtp output.{pcm, wav}\n";
+ std::string usage =
+ "Tool for decoding an RTP dump file using NetEq.\n"
+ "Run " +
+ program_name +
+ " --help for usage.\n"
+ "Example usage:\n" +
+ program_name + " input.rtp output.{pcm, wav}\n";
if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true)) {
return 1;
}
@@ -406,10 +414,8 @@
{FLAG_g722, std::make_pair(NetEqDecoder::kDecoderG722, "g722")},
{FLAG_avt, std::make_pair(NetEqDecoder::kDecoderAVT, "avt")},
{FLAG_avt_16, std::make_pair(NetEqDecoder::kDecoderAVT16kHz, "avt-16")},
- {FLAG_avt_32,
- std::make_pair(NetEqDecoder::kDecoderAVT32kHz, "avt-32")},
- {FLAG_avt_48,
- std::make_pair(NetEqDecoder::kDecoderAVT48kHz, "avt-48")},
+ {FLAG_avt_32, std::make_pair(NetEqDecoder::kDecoderAVT32kHz, "avt-32")},
+ {FLAG_avt_48, std::make_pair(NetEqDecoder::kDecoderAVT48kHz, "avt-48")},
{FLAG_red, std::make_pair(NetEqDecoder::kDecoderRED, "red")},
{FLAG_cn_nb, std::make_pair(NetEqDecoder::kDecoderCNGnb, "cng-nb")},
{FLAG_cn_wb, std::make_pair(NetEqDecoder::kDecoderCNGwb, "cng-wb")},
@@ -440,9 +446,8 @@
std::set<uint8_t> cn_types = std_set_int32_to_uint8(
{FLAG_cn_nb, FLAG_cn_wb, FLAG_cn_swb32, FLAG_cn_swb48});
- std::set<uint8_t> forbidden_types =
- std_set_int32_to_uint8({FLAG_g722, FLAG_red, FLAG_avt,
- FLAG_avt_16, FLAG_avt_32, FLAG_avt_48});
+ std::set<uint8_t> forbidden_types = std_set_int32_to_uint8(
+ {FLAG_g722, FLAG_red, FLAG_avt, FLAG_avt_16, FLAG_avt_32, FLAG_avt_48});
input.reset(new NetEqReplacementInput(std::move(input), replacement_pt,
cn_types, forbidden_types));
diff --git a/modules/audio_coding/neteq/tools/neteq_stats_getter.cc b/modules/audio_coding/neteq/tools/neteq_stats_getter.cc
index 6474e21..58c9ae4 100644
--- a/modules/audio_coding/neteq/tools/neteq_stats_getter.cc
+++ b/modules/audio_coding/neteq/tools/neteq_stats_getter.cc
@@ -26,8 +26,7 @@
rtc::SimpleStringBuilder ss(ss_buf);
ss << "ConcealmentEvent duration_ms:" << duration_ms
<< " event_number:" << concealment_event_number
- << " time_from_previous_event_end_ms:"
- << time_from_previous_event_end_ms;
+ << " time_from_previous_event_end_ms:" << time_from_previous_event_end_ms;
return ss.str();
}
@@ -115,12 +114,10 @@
a.added_zero_samples += b.added_zero_samples;
a.mean_waiting_time_ms += b.mean_waiting_time_ms;
a.median_waiting_time_ms += b.median_waiting_time_ms;
- a.min_waiting_time_ms =
- std::min(a.min_waiting_time_ms,
- static_cast<double>(b.min_waiting_time_ms));
- a.max_waiting_time_ms =
- std::max(a.max_waiting_time_ms,
- static_cast<double>(b.max_waiting_time_ms));
+ a.min_waiting_time_ms = std::min(
+ a.min_waiting_time_ms, static_cast<double>(b.min_waiting_time_ms));
+ a.max_waiting_time_ms = std::max(
+ a.max_waiting_time_ms, static_cast<double>(b.max_waiting_time_ms));
return a;
});
diff --git a/modules/audio_coding/neteq/tools/neteq_stats_getter.h b/modules/audio_coding/neteq/tools/neteq_stats_getter.h
index dbb396a..975393c 100644
--- a/modules/audio_coding/neteq/tools/neteq_stats_getter.h
+++ b/modules/audio_coding/neteq/tools/neteq_stats_getter.h
@@ -69,9 +69,7 @@
double AverageSpeechExpandRate() const;
- NetEqDelayAnalyzer* delay_analyzer() const {
- return delay_analyzer_.get();
- }
+ NetEqDelayAnalyzer* delay_analyzer() const { return delay_analyzer_.get(); }
const std::vector<ConcealmentEvent>& concealment_events() const {
// Do not account for the last concealment event to avoid potential end
diff --git a/modules/audio_coding/neteq/tools/packet.cc b/modules/audio_coding/neteq/tools/packet.cc
index 9505a29..b1a9b64 100644
--- a/modules/audio_coding/neteq/tools/packet.cc
+++ b/modules/audio_coding/neteq/tools/packet.cc
@@ -158,11 +158,10 @@
destination->paddingLength = header_.paddingLength;
destination->headerLength = header_.headerLength;
destination->payload_type_frequency = header_.payload_type_frequency;
- memcpy(&destination->arrOfCSRCs,
- &header_.arrOfCSRCs,
+ memcpy(&destination->arrOfCSRCs, &header_.arrOfCSRCs,
sizeof(header_.arrOfCSRCs));
- memcpy(
- &destination->extension, &header_.extension, sizeof(header_.extension));
+ memcpy(&destination->extension, &header_.extension,
+ sizeof(header_.extension));
}
} // namespace test
diff --git a/modules/audio_coding/neteq/tools/packet.h b/modules/audio_coding/neteq/tools/packet.h
index 94d45c5..2c9a26f 100644
--- a/modules/audio_coding/neteq/tools/packet.h
+++ b/modules/audio_coding/neteq/tools/packet.h
@@ -15,7 +15,7 @@
#include <memory>
#include "api/rtp_headers.h" // NOLINT(build/include)
-#include "common_types.h" // NOLINT(build/include)
+#include "common_types.h" // NOLINT(build/include)
#include "rtc_base/constructormagic.h"
#include "typedefs.h" // NOLINT(build/include)
diff --git a/modules/audio_coding/neteq/tools/packet_unittest.cc b/modules/audio_coding/neteq/tools/packet_unittest.cc
index ce6a3b9..7f3d663 100644
--- a/modules/audio_coding/neteq/tools/packet_unittest.cc
+++ b/modules/audio_coding/neteq/tools/packet_unittest.cc
@@ -28,7 +28,7 @@
rtp_data[0] = 0x80;
rtp_data[1] = static_cast<uint8_t>(payload_type);
rtp_data[2] = (seq_number >> 8) & 0xFF;
- rtp_data[3] = (seq_number) & 0xFF;
+ rtp_data[3] = (seq_number)&0xFF;
rtp_data[4] = timestamp >> 24;
rtp_data[5] = (timestamp >> 16) & 0xFF;
rtp_data[6] = (timestamp >> 8) & 0xFF;
@@ -47,8 +47,8 @@
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
- MakeRtpHeader(
- kPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
+ MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
+ packet_memory);
const double kPacketTime = 1.0;
// Hand over ownership of |packet_memory| to |packet|.
Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
@@ -75,13 +75,11 @@
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
- MakeRtpHeader(
- kPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
+ MakeRtpHeader(kPayloadType, kSequenceNumber, kTimestamp, kSsrc,
+ packet_memory);
const double kPacketTime = 1.0;
// Hand over ownership of |packet_memory| to |packet|.
- Packet packet(packet_memory,
- kPacketLengthBytes,
- kVirtualPacketLengthBytes,
+ Packet packet(packet_memory, kPacketLengthBytes, kVirtualPacketLengthBytes,
kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kPayloadType, packet.header().payloadType);
@@ -140,8 +138,8 @@
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
- MakeRtpHeader(
- kRedPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
+ MakeRtpHeader(kRedPayloadType, kSequenceNumber, kTimestamp, kSsrc,
+ packet_memory);
// Create four RED headers.
// Payload types are just the same as the block index the offset is 100 times
// the block index.
@@ -154,8 +152,8 @@
uint32_t timestamp_offset = 100 * i;
int block_length = 10 * i;
bool last_block = (i == kRedBlocks - 1) ? true : false;
- payload_ptr += MakeRedHeader(
- payload_type, timestamp_offset, block_length, last_block, payload_ptr);
+ payload_ptr += MakeRedHeader(payload_type, timestamp_offset, block_length,
+ last_block, payload_ptr);
}
const double kPacketTime = 1.0;
// Hand over ownership of |packet_memory| to |packet|.
@@ -178,8 +176,7 @@
EXPECT_EQ(kRedBlocks, static_cast<int>(red_headers.size()));
int block_index = 0;
for (std::list<RTPHeader*>::reverse_iterator it = red_headers.rbegin();
- it != red_headers.rend();
- ++it) {
+ it != red_headers.rend(); ++it) {
// Reading list from the back, since the extraction puts the main payload
// (which is the last one on wire) first.
RTPHeader* red_block = *it;
diff --git a/modules/audio_coding/neteq/tools/rtp_analyze.cc b/modules/audio_coding/neteq/tools/rtp_analyze.cc
index 12721cc..f939038 100644
--- a/modules/audio_coding/neteq/tools/rtp_analyze.cc
+++ b/modules/audio_coding/neteq/tools/rtp_analyze.cc
@@ -20,10 +20,14 @@
// Define command line flags.
DEFINE_int(red, 117, "RTP payload type for RED");
-DEFINE_int(audio_level, -1, "Extension ID for audio level (RFC 6464); "
- "-1 not to print audio level");
-DEFINE_int(abs_send_time, -1, "Extension ID for absolute sender time; "
- "-1 not to print absolute send time");
+DEFINE_int(audio_level,
+ -1,
+ "Extension ID for audio level (RFC 6464); "
+ "-1 not to print audio level");
+DEFINE_int(abs_send_time,
+ -1,
+ "Extension ID for absolute sender time; "
+ "-1 not to print absolute send time");
DEFINE_bool(help, false, "Print this message");
int main(int argc, char* argv[]) {
@@ -37,8 +41,8 @@
program_name + " input.rtp output.txt\n\n" +
"Output is sent to stdout if no output file is given. " +
"Note that this tool can read files with or without payloads.\n";
- if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) ||
- FLAG_help || (argc != 2 && argc != 3)) {
+ if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) || FLAG_help ||
+ (argc != 2 && argc != 3)) {
printf("%s", usage.c_str());
if (FLAG_help) {
rtc::FlagList::Print(nullptr, false);
@@ -47,10 +51,11 @@
return 1;
}
- RTC_CHECK(FLAG_red >= 0 && FLAG_red <= 127); // Payload type
- RTC_CHECK(FLAG_audio_level == -1 || // Default
- (FLAG_audio_level > 0 && FLAG_audio_level <= 255)); // Extension ID
- RTC_CHECK(FLAG_abs_send_time == -1 || // Default
+ RTC_CHECK(FLAG_red >= 0 && FLAG_red <= 127); // Payload type
+ RTC_CHECK(FLAG_audio_level == -1 || // Default
+ (FLAG_audio_level > 0 && FLAG_audio_level <= 255)); // Extension ID
+ RTC_CHECK(
+ FLAG_abs_send_time == -1 || // Default
(FLAG_abs_send_time > 0 && FLAG_abs_send_time <= 255)); // Extension ID
printf("Input file: %s\n", argv[1]);
@@ -104,19 +109,14 @@
}
// Write packet data to file. Use virtual_packet_length_bytes so that the
// correct packet sizes are printed also for RTP header-only dumps.
- fprintf(out_file,
- "%5u %10u %10u %5i %5i %2i %#08X",
- packet->header().sequenceNumber,
- packet->header().timestamp,
+ fprintf(out_file, "%5u %10u %10u %5i %5i %2i %#08X",
+ packet->header().sequenceNumber, packet->header().timestamp,
static_cast<unsigned int>(packet->time_ms()),
static_cast<int>(packet->virtual_packet_length_bytes()),
- packet->header().payloadType,
- packet->header().markerBit,
+ packet->header().payloadType, packet->header().markerBit,
packet->header().ssrc);
if (print_audio_level && packet->header().extension.hasAudioLevel) {
- fprintf(out_file,
- " %5u (%1i)",
- packet->header().extension.audioLevel,
+ fprintf(out_file, " %5u (%1i)", packet->header().extension.audioLevel,
packet->header().extension.voiceActivity);
}
if (print_abs_send_time && packet->header().extension.hasAbsoluteSendTime) {
@@ -156,11 +156,8 @@
while (!red_headers.empty()) {
webrtc::RTPHeader* red = red_headers.front();
assert(red);
- fprintf(out_file,
- "* %5u %10u %10u %5i\n",
- red->sequenceNumber,
- red->timestamp,
- static_cast<unsigned int>(packet->time_ms()),
+ fprintf(out_file, "* %5u %10u %10u %5i\n", red->sequenceNumber,
+ red->timestamp, static_cast<unsigned int>(packet->time_ms()),
red->payloadType);
red_headers.pop_front();
delete red;
diff --git a/modules/audio_coding/neteq/tools/rtp_encode.cc b/modules/audio_coding/neteq/tools/rtp_encode.cc
index 66e7a28..1984e3f 100644
--- a/modules/audio_coding/neteq/tools/rtp_encode.cc
+++ b/modules/audio_coding/neteq/tools/rtp_encode.cc
@@ -247,11 +247,16 @@
AudioEncoderCng::Config cng_config;
const auto default_payload_type = [&] {
switch (sample_rate_hz) {
- case 8000: return 13;
- case 16000: return 98;
- case 32000: return 99;
- case 48000: return 100;
- default: RTC_NOTREACHED();
+ case 8000:
+ return 13;
+ case 16000:
+ return 98;
+ case 32000:
+ return 99;
+ case 48000:
+ return 100;
+ default:
+ RTC_NOTREACHED();
}
return 0;
};
diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.cc b/modules/audio_coding/neteq/tools/rtp_file_source.cc
index 0945667..806bba7 100644
--- a/modules/audio_coding/neteq/tools/rtp_file_source.cc
+++ b/modules/audio_coding/neteq/tools/rtp_file_source.cc
@@ -44,8 +44,7 @@
return !!temp_file;
}
-RtpFileSource::~RtpFileSource() {
-}
+RtpFileSource::~RtpFileSource() {}
bool RtpFileSource::RegisterRtpHeaderExtension(RTPExtensionType type,
uint8_t id) {
@@ -82,8 +81,7 @@
}
RtpFileSource::RtpFileSource()
- : PacketSource(),
- parser_(RtpHeaderParser::Create()) {}
+ : PacketSource(), parser_(RtpHeaderParser::Create()) {}
bool RtpFileSource::OpenFile(const std::string& file_name) {
rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.cc b/modules/audio_coding/neteq/tools/rtp_generator.cc
index cedd7ae..ab7acdc 100644
--- a/modules/audio_coding/neteq/tools/rtp_generator.cc
+++ b/modules/audio_coding/neteq/tools/rtp_generator.cc
@@ -32,8 +32,8 @@
uint32_t this_send_time = next_send_time_ms_;
assert(samples_per_ms_ > 0);
- next_send_time_ms_ += ((1.0 + drift_factor_) * payload_length_samples) /
- samples_per_ms_;
+ next_send_time_ms_ +=
+ ((1.0 + drift_factor_) * payload_length_samples) / samples_per_ms_;
return this_send_time;
}
@@ -46,8 +46,8 @@
uint32_t TimestampJumpRtpGenerator::GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header) {
- uint32_t ret = RtpGenerator::GetRtpHeader(
- payload_type, payload_length_samples, rtp_header);
+ uint32_t ret = RtpGenerator::GetRtpHeader(payload_type,
+ payload_length_samples, rtp_header);
if (timestamp_ - static_cast<uint32_t>(payload_length_samples) <=
jump_from_timestamp_ &&
timestamp_ > jump_from_timestamp_) {
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.h b/modules/audio_coding/neteq/tools/rtp_generator.h
index 3b3cca9..04fdbdd 100644
--- a/modules/audio_coding/neteq/tools/rtp_generator.h
+++ b/modules/audio_coding/neteq/tools/rtp_generator.h
@@ -32,8 +32,7 @@
next_send_time_ms_(start_send_time_ms),
ssrc_(ssrc),
samples_per_ms_(samples_per_ms),
- drift_factor_(0.0) {
- }
+ drift_factor_(0.0) {}
virtual ~RtpGenerator() {}