Add RMS computation for the RTP level indicator.
- Compute RMS over a packet's worth of audio to be sent in Channel, rather than the captured audio in TransmitMixer.
- We now use the entire packet rather than the last 10 ms frame.
- Restore functionality to LevelEstimator.
- Fix a bug in the splitting filter.
- Fix a number of bugs in process_test related to a poorly named
AudioFrame member.
- Update the unittest protobuf and float reference output.
- Add audioproc unittests.
- Reenable voe_extended_tests, and add a real function test.
- Use correct minimum level of 127.
TEST=audioproc_unittest, audioproc, voe_extended_test, voe_auto_test
Review URL: http://webrtc-codereview.appspot.com/279003
git-svn-id: http://webrtc.googlecode.com/svn/trunk@950 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/src/modules/audio_processing/audio_processing_impl.cc b/src/modules/audio_processing/audio_processing_impl.cc
index da8dcdb..4828ba8 100644
--- a/src/modules/audio_processing/audio_processing_impl.cc
+++ b/src/modules/audio_processing/audio_processing_impl.cc
@@ -271,7 +271,7 @@
if (debug_file_->Open()) {
event_msg_->set_type(audioproc::Event::STREAM);
audioproc::Stream* msg = event_msg_->mutable_stream();
- const size_t data_size = sizeof(WebRtc_Word16) *
+ const size_t data_size = sizeof(int16_t) *
frame->_payloadDataLengthInSamples *
frame->_audioChannel;
msg->set_input_data(frame->_payloadData, data_size);
@@ -285,12 +285,12 @@
// TODO(ajm): experiment with mixing and AEC placement.
if (num_output_channels_ < num_input_channels_) {
capture_audio_->Mix(num_output_channels_);
-
frame->_audioChannel = num_output_channels_;
}
- if (sample_rate_hz_ == kSampleRate32kHz) {
- for (int i = 0; i < num_input_channels_; i++) {
+ bool data_changed = stream_data_changed();
+ if (analysis_needed(data_changed)) {
+ for (int i = 0; i < num_output_channels_; i++) {
// Split into a low and high band.
SplittingFilterAnalysis(capture_audio_->data(i),
capture_audio_->low_pass_split_data(i),
@@ -340,12 +340,7 @@
return err;
}
- //err = level_estimator_->ProcessCaptureAudio(capture_audio_);
- //if (err != kNoError) {
- // return err;
- //}
-
- if (sample_rate_hz_ == kSampleRate32kHz) {
+ if (synthesis_needed(data_changed)) {
for (int i = 0; i < num_output_channels_; i++) {
// Recombine low and high bands.
SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
@@ -356,11 +351,17 @@
}
}
- capture_audio_->InterleaveTo(frame);
+ // The level estimator operates on the recombined data.
+ err = level_estimator_->ProcessStream(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ capture_audio_->InterleaveTo(frame, data_changed);
if (debug_file_->Open()) {
audioproc::Stream* msg = event_msg_->mutable_stream();
- const size_t data_size = sizeof(WebRtc_Word16) *
+ const size_t data_size = sizeof(int16_t) *
frame->_payloadDataLengthInSamples *
frame->_audioChannel;
msg->set_output_data(frame->_payloadData, data_size);
@@ -396,7 +397,7 @@
if (debug_file_->Open()) {
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
- const size_t data_size = sizeof(WebRtc_Word16) *
+ const size_t data_size = sizeof(int16_t) *
frame->_payloadDataLengthInSamples *
frame->_audioChannel;
msg->set_data(frame->_payloadData, data_size);
@@ -436,11 +437,6 @@
return err;
}
- //err = level_estimator_->AnalyzeReverseStream(render_audio_);
- //if (err != kNoError) {
- // return err;
- //}
-
was_stream_delay_set_ = false;
return err; // TODO(ajm): this is for returning warnings; necessary?
}
@@ -648,4 +644,44 @@
return kNoError;
}
+
+bool AudioProcessingImpl::stream_data_changed() const {
+ int enabled_count = 0;
+ std::list<ProcessingComponent*>::const_iterator it;
+ for (it = component_list_.begin(); it != component_list_.end(); it++) {
+ if ((*it)->is_component_enabled()) {
+ enabled_count++;
+ }
+ }
+
+ // Data is unchanged if no components are enabled, or if only level_estimator_
+ // or voice_detection_ is enabled.
+ if (enabled_count == 0) {
+ return false;
+ } else if (enabled_count == 1) {
+ if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
+ return false;
+ }
+ } else if (enabled_count == 2) {
+ if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool AudioProcessingImpl::synthesis_needed(bool stream_data_changed) const {
+ return (stream_data_changed && sample_rate_hz_ == kSampleRate32kHz);
+}
+
+bool AudioProcessingImpl::analysis_needed(bool stream_data_changed) const {
+ if (!stream_data_changed && !voice_detection_->is_enabled()) {
+ // Only level_estimator_ is enabled.
+ return false;
+ } else if (sample_rate_hz_ == kSampleRate32kHz) {
+ // Something besides level_estimator_ is enabled, and we have super-wb.
+ return true;
+ }
+ return false;
+}
} // namespace webrtc