Remove the use of AudioFrame::energy_ from AudioProcessing and VoE.

We want to remove energy_ entirely as we've seen that carrying around
this potentially invalid value is dangerous.

Results in the removal of AudioBuffer::is_muted(). This wasn't used in
practice any longer, after the level calculation moved directly to
channel.cc

Instead, now use ProcessMuted() in channel.cc, to shortcut the level
computation when the signal is muted.

BUG=3315
TESTED=Muting the channel in voe_cmd_test results in rms=127.
R=bjornv@webrtc.org, kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/12529004

git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@6159 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/modules/audio_processing/audio_buffer.cc b/modules/audio_processing/audio_buffer.cc
index 024b700..eb9bea5 100644
--- a/modules/audio_processing/audio_buffer.cc
+++ b/modules/audio_processing/audio_buffer.cc
@@ -99,7 +99,6 @@
     num_mixed_low_pass_channels_(0),
     reference_copied_(false),
     activity_(AudioFrame::kVadUnknown),
-    is_muted_(false),
     data_(NULL),
     keyboard_data_(NULL),
     channels_(new ChannelBuffer<int16_t>(proc_samples_per_channel_,
@@ -223,7 +222,6 @@
   num_mixed_low_pass_channels_ = 0;
   reference_copied_ = false;
   activity_ = AudioFrame::kVadUnknown;
-  is_muted_ = false;
 }
 
 const int16_t* AudioBuffer::data(int channel) const {
@@ -307,10 +305,6 @@
   return activity_;
 }
 
-bool AudioBuffer::is_muted() const {
-  return is_muted_;
-}
-
 int AudioBuffer::num_channels() const {
   return num_proc_channels_;
 }
@@ -336,9 +330,6 @@
   assert(frame->samples_per_channel_ ==  proc_samples_per_channel_);
   InitForNewData();
   activity_ = frame->vad_activity_;
-  if (frame->energy_ == 0) {
-    is_muted_ = true;
-  }
 
   if (num_proc_channels_ == 1) {
     // We can get away with a pointer assignment in this case.
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index 2b93510..c05ffc9 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -71,8 +71,6 @@
   void set_activity(AudioFrame::VADActivity activity);
   AudioFrame::VADActivity activity() const;
 
-  bool is_muted() const;
-
   // Use for int16 interleaved data.
   void DeinterleaveFrom(AudioFrame* audioFrame);
   void InterleaveTo(AudioFrame* audioFrame) const;
@@ -106,7 +104,6 @@
   int num_mixed_low_pass_channels_;
   bool reference_copied_;
   AudioFrame::VADActivity activity_;
-  bool is_muted_;
 
   // If non-null, use this instead of channels_->channel(0). This is an
   // optimization for the case num_proc_channels_ == 1 that allows us to point
diff --git a/modules/audio_processing/level_estimator_impl.cc b/modules/audio_processing/level_estimator_impl.cc
index d209d40..cfe295a 100644
--- a/modules/audio_processing/level_estimator_impl.cc
+++ b/modules/audio_processing/level_estimator_impl.cc
@@ -12,6 +12,7 @@
 
 #include "webrtc/modules/audio_processing/audio_buffer.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/rms_level.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
 namespace webrtc {
@@ -29,13 +30,8 @@
   }
 
   RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
-  if (audio->is_muted()) {
-    rms_level->ProcessMuted(audio->samples_per_channel() *
-                            audio->num_channels());
-  } else {
-    for (int i = 0; i < audio->num_channels(); ++i) {
-      rms_level->Process(audio->data(i), audio->samples_per_channel());
-    }
+  for (int i = 0; i < audio->num_channels(); ++i) {
+    rms_level->Process(audio->data(i), audio->samples_per_channel());
   }
 
   return AudioProcessing::kNoError;
diff --git a/modules/audio_processing/rms_level.cc b/modules/audio_processing/rms_level.cc
index 963622b..14136bf 100644
--- a/modules/audio_processing/rms_level.cc
+++ b/modules/audio_processing/rms_level.cc
@@ -15,16 +15,16 @@
 
 namespace webrtc {
 
-static const float kMaxSquaredLevel = 32768.0 * 32768.0;
+static const float kMaxSquaredLevel = 32768 * 32768;
 
 RMSLevel::RMSLevel()
-    : sum_square_(0.0),
+    : sum_square_(0),
       sample_count_(0) {}
 
 RMSLevel::~RMSLevel() {}
 
 void RMSLevel::Reset() {
-  sum_square_ = 0.0;
+  sum_square_ = 0;
   sample_count_ = 0;
 }
 
@@ -40,7 +40,7 @@
 }
 
 int RMSLevel::RMS() {
-  if (sample_count_ == 0 || sum_square_ == 0.0) {
+  if (sample_count_ == 0 || sum_square_ == 0) {
     Reset();
     return kMinLevel;
   }
diff --git a/modules/audio_processing/rms_level.h b/modules/audio_processing/rms_level.h
index 1b19803..055d271 100644
--- a/modules/audio_processing/rms_level.h
+++ b/modules/audio_processing/rms_level.h
@@ -8,6 +8,9 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+
 #include "webrtc/typedefs.h"
 
 namespace webrtc {
@@ -49,3 +52,6 @@
 };
 
 }  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+
diff --git a/modules/audio_processing/test/audio_processing_unittest.cc b/modules/audio_processing/test/audio_processing_unittest.cc
index 0c5b67d..bfb7766 100644
--- a/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/modules/audio_processing/test/audio_processing_unittest.cc
@@ -1229,15 +1229,6 @@
   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
   EXPECT_EQ(70, apm_->level_estimator()->RMS());
 
-  // Min value if energy_ == 0.
-  SetFrameTo(frame_, 10000);
-  uint32_t energy = frame_->energy_;  // Save default to restore below.
-  frame_->energy_ = 0;
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
-  EXPECT_EQ(127, apm_->level_estimator()->RMS());
-  frame_->energy_ = energy;
-
   // Verify reset after enable/disable.
   SetFrameTo(frame_, 32767);
   EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
diff --git a/modules/utility/source/audio_frame_operations.cc b/modules/utility/source/audio_frame_operations.cc
index 18dba52..e3b0010 100644
--- a/modules/utility/source/audio_frame_operations.cc
+++ b/modules/utility/source/audio_frame_operations.cc
@@ -72,7 +72,6 @@
 void AudioFrameOperations::Mute(AudioFrame& frame) {
   memset(frame.data_, 0, sizeof(int16_t) *
       frame.samples_per_channel_ * frame.num_channels_);
-  frame.energy_ = 0;
 }
 
 int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
diff --git a/modules/utility/source/audio_frame_operations_unittest.cc b/modules/utility/source/audio_frame_operations_unittest.cc
index 34c08a8..f4d881c 100644
--- a/modules/utility/source/audio_frame_operations_unittest.cc
+++ b/modules/utility/source/audio_frame_operations_unittest.cc
@@ -142,17 +142,13 @@
 
 TEST_F(AudioFrameOperationsTest, MuteSucceeds) {
   SetFrameData(&frame_, 1000, 1000);
-  frame_.energy_ = 1000 * 1000 * frame_.samples_per_channel_ *
-      frame_.num_channels_;
   AudioFrameOperations::Mute(frame_);
 
   AudioFrame muted_frame;
   muted_frame.samples_per_channel_ = 320;
   muted_frame.num_channels_ = 2;
   SetFrameData(&muted_frame, 0, 0);
-  muted_frame.energy_ = 0;
   VerifyFramesAreEqual(muted_frame, frame_);
-  EXPECT_EQ(muted_frame.energy_, frame_.energy_);
 }
 
 // TODO(andrew): should not allow negative scales.
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index 6bbce78..f919c3d 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -3689,9 +3689,9 @@
         MixOrReplaceAudioWithFile(mixingFrequency);
     }
 
-    if (Mute())
-    {
-        AudioFrameOperations::Mute(_audioFrame);
+    bool is_muted = Mute();  // Cache locally as Mute() takes a lock.
+    if (is_muted) {
+      AudioFrameOperations::Mute(_audioFrame);
     }
 
     if (channel_state_.Get().input_external_media)
@@ -3714,7 +3714,11 @@
 
     if (_includeAudioLevelIndication) {
       int length = _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
-      rms_level_.Process(_audioFrame.data_, length);
+      if (is_muted) {
+        rms_level_.ProcessMuted(length);
+      } else {
+        rms_level_.Process(_audioFrame.data_, length);
+      }
     }
 
     return 0;