blob: 4828ba8641ee5ae2c8408d06c0a5330932c7a60f [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "audio_processing_impl.h"
12
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
15#include "audio_buffer.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000016#include "critical_section_wrapper.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000017#include "echo_cancellation_impl.h"
18#include "echo_control_mobile_impl.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000019#include "file_wrapper.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000020#include "high_pass_filter_impl.h"
21#include "gain_control_impl.h"
22#include "level_estimator_impl.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000023#include "module_common_types.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000024#include "noise_suppression_impl.h"
25#include "processing_component.h"
26#include "splitting_filter.h"
27#include "voice_detection_impl.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000028#ifdef WEBRTC_ANDROID
andrew@webrtc.org4d5d5c12011-10-19 01:40:33 +000029#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000030#else
ajm@google.com808e0e02011-08-03 21:08:51 +000031#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000032#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000033
34namespace webrtc {
niklase@google.com470e71d2011-07-07 08:21:25 +000035AudioProcessing* AudioProcessing::Create(int id) {
36 /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
37 webrtc::kTraceAudioProcessing,
38 id,
39 "AudioProcessing::Create()");*/
40
41 AudioProcessingImpl* apm = new AudioProcessingImpl(id);
42 if (apm->Initialize() != kNoError) {
43 delete apm;
44 apm = NULL;
45 }
46
47 return apm;
48}
49
50void AudioProcessing::Destroy(AudioProcessing* apm) {
51 delete static_cast<AudioProcessingImpl*>(apm);
52}
53
54AudioProcessingImpl::AudioProcessingImpl(int id)
55 : id_(id),
56 echo_cancellation_(NULL),
57 echo_control_mobile_(NULL),
58 gain_control_(NULL),
59 high_pass_filter_(NULL),
60 level_estimator_(NULL),
61 noise_suppression_(NULL),
62 voice_detection_(NULL),
63 debug_file_(FileWrapper::Create()),
ajm@google.com808e0e02011-08-03 21:08:51 +000064 event_msg_(new audioproc::Event()),
niklase@google.com470e71d2011-07-07 08:21:25 +000065 crit_(CriticalSectionWrapper::CreateCriticalSection()),
66 render_audio_(NULL),
67 capture_audio_(NULL),
68 sample_rate_hz_(kSampleRate16kHz),
69 split_sample_rate_hz_(kSampleRate16kHz),
70 samples_per_channel_(sample_rate_hz_ / 100),
71 stream_delay_ms_(0),
72 was_stream_delay_set_(false),
ajm@google.com808e0e02011-08-03 21:08:51 +000073 num_reverse_channels_(1),
74 num_input_channels_(1),
75 num_output_channels_(1) {
niklase@google.com470e71d2011-07-07 08:21:25 +000076
77 echo_cancellation_ = new EchoCancellationImpl(this);
78 component_list_.push_back(echo_cancellation_);
79
80 echo_control_mobile_ = new EchoControlMobileImpl(this);
81 component_list_.push_back(echo_control_mobile_);
82
83 gain_control_ = new GainControlImpl(this);
84 component_list_.push_back(gain_control_);
85
86 high_pass_filter_ = new HighPassFilterImpl(this);
87 component_list_.push_back(high_pass_filter_);
88
89 level_estimator_ = new LevelEstimatorImpl(this);
90 component_list_.push_back(level_estimator_);
91
92 noise_suppression_ = new NoiseSuppressionImpl(this);
93 component_list_.push_back(noise_suppression_);
94
95 voice_detection_ = new VoiceDetectionImpl(this);
96 component_list_.push_back(voice_detection_);
97}
98
99AudioProcessingImpl::~AudioProcessingImpl() {
100 while (!component_list_.empty()) {
101 ProcessingComponent* component = component_list_.front();
102 component->Destroy();
103 delete component;
104 component_list_.pop_front();
105 }
106
107 if (debug_file_->Open()) {
108 debug_file_->CloseFile();
109 }
110 delete debug_file_;
111 debug_file_ = NULL;
112
ajm@google.com808e0e02011-08-03 21:08:51 +0000113 delete event_msg_;
114 event_msg_ = NULL;
115
niklase@google.com470e71d2011-07-07 08:21:25 +0000116 delete crit_;
117 crit_ = NULL;
118
ajm@google.com808e0e02011-08-03 21:08:51 +0000119 if (render_audio_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000120 delete render_audio_;
121 render_audio_ = NULL;
122 }
123
ajm@google.com808e0e02011-08-03 21:08:51 +0000124 if (capture_audio_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000125 delete capture_audio_;
126 capture_audio_ = NULL;
127 }
128}
129
130CriticalSectionWrapper* AudioProcessingImpl::crit() const {
131 return crit_;
132}
133
134int AudioProcessingImpl::split_sample_rate_hz() const {
135 return split_sample_rate_hz_;
136}
137
138int AudioProcessingImpl::Initialize() {
139 CriticalSectionScoped crit_scoped(*crit_);
140 return InitializeLocked();
141}
142
143int AudioProcessingImpl::InitializeLocked() {
144 if (render_audio_ != NULL) {
145 delete render_audio_;
146 render_audio_ = NULL;
147 }
148
149 if (capture_audio_ != NULL) {
150 delete capture_audio_;
151 capture_audio_ = NULL;
152 }
153
ajm@google.com808e0e02011-08-03 21:08:51 +0000154 render_audio_ = new AudioBuffer(num_reverse_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000155 samples_per_channel_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000156 capture_audio_ = new AudioBuffer(num_input_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000157 samples_per_channel_);
158
159 was_stream_delay_set_ = false;
160
161 // Initialize all components.
162 std::list<ProcessingComponent*>::iterator it;
163 for (it = component_list_.begin(); it != component_list_.end(); it++) {
164 int err = (*it)->Initialize();
165 if (err != kNoError) {
166 return err;
167 }
168 }
169
ajm@google.com808e0e02011-08-03 21:08:51 +0000170 if (debug_file_->Open()) {
171 int err = WriteInitMessage();
172 if (err != kNoError) {
173 return err;
174 }
175 }
176
niklase@google.com470e71d2011-07-07 08:21:25 +0000177 return kNoError;
178}
179
180int AudioProcessingImpl::set_sample_rate_hz(int rate) {
181 CriticalSectionScoped crit_scoped(*crit_);
182 if (rate != kSampleRate8kHz &&
183 rate != kSampleRate16kHz &&
184 rate != kSampleRate32kHz) {
185 return kBadParameterError;
186 }
187
188 sample_rate_hz_ = rate;
189 samples_per_channel_ = rate / 100;
190
191 if (sample_rate_hz_ == kSampleRate32kHz) {
192 split_sample_rate_hz_ = kSampleRate16kHz;
193 } else {
194 split_sample_rate_hz_ = sample_rate_hz_;
195 }
196
197 return InitializeLocked();
198}
199
200int AudioProcessingImpl::sample_rate_hz() const {
201 return sample_rate_hz_;
202}
203
204int AudioProcessingImpl::set_num_reverse_channels(int channels) {
205 CriticalSectionScoped crit_scoped(*crit_);
206 // Only stereo supported currently.
207 if (channels > 2 || channels < 1) {
208 return kBadParameterError;
209 }
210
ajm@google.com808e0e02011-08-03 21:08:51 +0000211 num_reverse_channels_ = channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000212
213 return InitializeLocked();
214}
215
216int AudioProcessingImpl::num_reverse_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000217 return num_reverse_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000218}
219
220int AudioProcessingImpl::set_num_channels(
221 int input_channels,
222 int output_channels) {
223 CriticalSectionScoped crit_scoped(*crit_);
224 if (output_channels > input_channels) {
225 return kBadParameterError;
226 }
227
228 // Only stereo supported currently.
229 if (input_channels > 2 || input_channels < 1) {
230 return kBadParameterError;
231 }
232
233 if (output_channels > 2 || output_channels < 1) {
234 return kBadParameterError;
235 }
236
ajm@google.com808e0e02011-08-03 21:08:51 +0000237 num_input_channels_ = input_channels;
238 num_output_channels_ = output_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000239
240 return InitializeLocked();
241}
242
243int AudioProcessingImpl::num_input_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000244 return num_input_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000245}
246
247int AudioProcessingImpl::num_output_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000248 return num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000249}
250
251int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
252 CriticalSectionScoped crit_scoped(*crit_);
253 int err = kNoError;
254
255 if (frame == NULL) {
256 return kNullPointerError;
257 }
258
xians@google.com0b0665a2011-08-08 08:18:44 +0000259 if (frame->_frequencyInHz != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000260 return kBadSampleRateError;
261 }
262
ajm@google.com808e0e02011-08-03 21:08:51 +0000263 if (frame->_audioChannel != num_input_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000264 return kBadNumberChannelsError;
265 }
266
267 if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
268 return kBadDataLengthError;
269 }
270
271 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000272 event_msg_->set_type(audioproc::Event::STREAM);
273 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000274 const size_t data_size = sizeof(int16_t) *
ajm@google.com808e0e02011-08-03 21:08:51 +0000275 frame->_payloadDataLengthInSamples *
276 frame->_audioChannel;
277 msg->set_input_data(frame->_payloadData, data_size);
278 msg->set_delay(stream_delay_ms_);
279 msg->set_drift(echo_cancellation_->stream_drift_samples());
280 msg->set_level(gain_control_->stream_analog_level());
niklase@google.com470e71d2011-07-07 08:21:25 +0000281 }
282
283 capture_audio_->DeinterleaveFrom(frame);
284
285 // TODO(ajm): experiment with mixing and AEC placement.
ajm@google.com808e0e02011-08-03 21:08:51 +0000286 if (num_output_channels_ < num_input_channels_) {
287 capture_audio_->Mix(num_output_channels_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000288 frame->_audioChannel = num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000289 }
290
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000291 bool data_changed = stream_data_changed();
292 if (analysis_needed(data_changed)) {
293 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000294 // Split into a low and high band.
295 SplittingFilterAnalysis(capture_audio_->data(i),
296 capture_audio_->low_pass_split_data(i),
297 capture_audio_->high_pass_split_data(i),
298 capture_audio_->analysis_filter_state1(i),
299 capture_audio_->analysis_filter_state2(i));
300 }
301 }
302
303 err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
304 if (err != kNoError) {
305 return err;
306 }
307
308 err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
309 if (err != kNoError) {
310 return err;
311 }
312
313 err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
314 if (err != kNoError) {
315 return err;
316 }
317
318 if (echo_control_mobile_->is_enabled() &&
319 noise_suppression_->is_enabled()) {
320 capture_audio_->CopyLowPassToReference();
321 }
322
323 err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
324 if (err != kNoError) {
325 return err;
326 }
327
328 err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
329 if (err != kNoError) {
330 return err;
331 }
332
333 err = voice_detection_->ProcessCaptureAudio(capture_audio_);
334 if (err != kNoError) {
335 return err;
336 }
337
338 err = gain_control_->ProcessCaptureAudio(capture_audio_);
339 if (err != kNoError) {
340 return err;
341 }
342
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000343 if (synthesis_needed(data_changed)) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000344 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000345 // Recombine low and high bands.
346 SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
347 capture_audio_->high_pass_split_data(i),
348 capture_audio_->data(i),
349 capture_audio_->synthesis_filter_state1(i),
350 capture_audio_->synthesis_filter_state2(i));
351 }
352 }
353
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000354 // The level estimator operates on the recombined data.
355 err = level_estimator_->ProcessStream(capture_audio_);
356 if (err != kNoError) {
357 return err;
358 }
359
360 capture_audio_->InterleaveTo(frame, data_changed);
niklase@google.com470e71d2011-07-07 08:21:25 +0000361
ajm@google.com808e0e02011-08-03 21:08:51 +0000362 if (debug_file_->Open()) {
363 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000364 const size_t data_size = sizeof(int16_t) *
ajm@google.com808e0e02011-08-03 21:08:51 +0000365 frame->_payloadDataLengthInSamples *
366 frame->_audioChannel;
367 msg->set_output_data(frame->_payloadData, data_size);
368 err = WriteMessageToDebugFile();
369 if (err != kNoError) {
370 return err;
371 }
372 }
373
niklase@google.com470e71d2011-07-07 08:21:25 +0000374 return kNoError;
375}
376
377int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
378 CriticalSectionScoped crit_scoped(*crit_);
379 int err = kNoError;
380
381 if (frame == NULL) {
382 return kNullPointerError;
383 }
384
xians@google.com0b0665a2011-08-08 08:18:44 +0000385 if (frame->_frequencyInHz != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000386 return kBadSampleRateError;
387 }
388
ajm@google.com808e0e02011-08-03 21:08:51 +0000389 if (frame->_audioChannel != num_reverse_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000390 return kBadNumberChannelsError;
391 }
392
393 if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
394 return kBadDataLengthError;
395 }
396
397 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000398 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
399 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000400 const size_t data_size = sizeof(int16_t) *
ajm@google.com808e0e02011-08-03 21:08:51 +0000401 frame->_payloadDataLengthInSamples *
402 frame->_audioChannel;
403 msg->set_data(frame->_payloadData, data_size);
404 err = WriteMessageToDebugFile();
405 if (err != kNoError) {
406 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000407 }
408 }
409
410 render_audio_->DeinterleaveFrom(frame);
411
412 // TODO(ajm): turn the splitting filter into a component?
413 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000414 for (int i = 0; i < num_reverse_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000415 // Split into low and high band.
416 SplittingFilterAnalysis(render_audio_->data(i),
417 render_audio_->low_pass_split_data(i),
418 render_audio_->high_pass_split_data(i),
419 render_audio_->analysis_filter_state1(i),
420 render_audio_->analysis_filter_state2(i));
421 }
422 }
423
424 // TODO(ajm): warnings possible from components?
425 err = echo_cancellation_->ProcessRenderAudio(render_audio_);
426 if (err != kNoError) {
427 return err;
428 }
429
430 err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
431 if (err != kNoError) {
432 return err;
433 }
434
435 err = gain_control_->ProcessRenderAudio(render_audio_);
436 if (err != kNoError) {
437 return err;
438 }
439
niklase@google.com470e71d2011-07-07 08:21:25 +0000440 was_stream_delay_set_ = false;
441 return err; // TODO(ajm): this is for returning warnings; necessary?
442}
443
444int AudioProcessingImpl::set_stream_delay_ms(int delay) {
445 was_stream_delay_set_ = true;
446 if (delay < 0) {
447 return kBadParameterError;
448 }
449
450 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
451 if (delay > 500) {
452 stream_delay_ms_ = 500;
453 return kBadStreamParameterWarning;
454 }
455
456 stream_delay_ms_ = delay;
457 return kNoError;
458}
459
460int AudioProcessingImpl::stream_delay_ms() const {
461 return stream_delay_ms_;
462}
463
464bool AudioProcessingImpl::was_stream_delay_set() const {
465 return was_stream_delay_set_;
466}
467
468int AudioProcessingImpl::StartDebugRecording(
469 const char filename[AudioProcessing::kMaxFilenameSize]) {
470 CriticalSectionScoped crit_scoped(*crit_);
471 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
472
473 if (filename == NULL) {
474 return kNullPointerError;
475 }
476
477 // Stop any ongoing recording.
478 if (debug_file_->Open()) {
479 if (debug_file_->CloseFile() == -1) {
480 return kFileError;
481 }
482 }
483
484 if (debug_file_->OpenFile(filename, false) == -1) {
485 debug_file_->CloseFile();
486 return kFileError;
487 }
488
ajm@google.com808e0e02011-08-03 21:08:51 +0000489 int err = WriteInitMessage();
490 if (err != kNoError) {
491 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000492 }
493
494 return kNoError;
495}
496
497int AudioProcessingImpl::StopDebugRecording() {
498 CriticalSectionScoped crit_scoped(*crit_);
499 // We just return if recording hasn't started.
500 if (debug_file_->Open()) {
501 if (debug_file_->CloseFile() == -1) {
502 return kFileError;
503 }
504 }
505
506 return kNoError;
507}
508
509EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
510 return echo_cancellation_;
511}
512
513EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
514 return echo_control_mobile_;
515}
516
517GainControl* AudioProcessingImpl::gain_control() const {
518 return gain_control_;
519}
520
521HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
522 return high_pass_filter_;
523}
524
525LevelEstimator* AudioProcessingImpl::level_estimator() const {
526 return level_estimator_;
527}
528
529NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
530 return noise_suppression_;
531}
532
533VoiceDetection* AudioProcessingImpl::voice_detection() const {
534 return voice_detection_;
535}
536
537WebRtc_Word32 AudioProcessingImpl::Version(WebRtc_Word8* version,
538 WebRtc_UWord32& bytes_remaining, WebRtc_UWord32& position) const {
539 if (version == NULL) {
540 /*WEBRTC_TRACE(webrtc::kTraceError,
541 webrtc::kTraceAudioProcessing,
542 -1,
543 "Null version pointer");*/
544 return kNullPointerError;
545 }
546 memset(&version[position], 0, bytes_remaining);
547
ajm@google.com808e0e02011-08-03 21:08:51 +0000548 char my_version[] = "AudioProcessing 1.0.0";
niklase@google.com470e71d2011-07-07 08:21:25 +0000549 // Includes null termination.
550 WebRtc_UWord32 length = static_cast<WebRtc_UWord32>(strlen(my_version));
551 if (bytes_remaining < length) {
552 /*WEBRTC_TRACE(webrtc::kTraceError,
553 webrtc::kTraceAudioProcessing,
554 -1,
555 "Buffer of insufficient length");*/
556 return kBadParameterError;
557 }
558 memcpy(&version[position], my_version, length);
559 bytes_remaining -= length;
560 position += length;
561
562 std::list<ProcessingComponent*>::const_iterator it;
563 for (it = component_list_.begin(); it != component_list_.end(); it++) {
564 char component_version[256];
565 strcpy(component_version, "\n");
566 int err = (*it)->get_version(&component_version[1],
567 sizeof(component_version) - 1);
568 if (err != kNoError) {
569 return err;
570 }
571 if (strncmp(&component_version[1], "\0", 1) == 0) {
572 // Assume empty if first byte is NULL.
573 continue;
574 }
575
576 length = static_cast<WebRtc_UWord32>(strlen(component_version));
577 if (bytes_remaining < length) {
578 /*WEBRTC_TRACE(webrtc::kTraceError,
579 webrtc::kTraceAudioProcessing,
580 -1,
581 "Buffer of insufficient length");*/
582 return kBadParameterError;
583 }
584 memcpy(&version[position], component_version, length);
585 bytes_remaining -= length;
586 position += length;
587 }
588
589 return kNoError;
590}
591
592WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
593 CriticalSectionScoped crit_scoped(*crit_);
594 /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
595 webrtc::kTraceAudioProcessing,
596 id_,
597 "ChangeUniqueId(new id = %d)",
598 id);*/
599 id_ = id;
600
601 return kNoError;
602}
ajm@google.com808e0e02011-08-03 21:08:51 +0000603
604int AudioProcessingImpl::WriteMessageToDebugFile() {
605 int32_t size = event_msg_->ByteSize();
606 if (size <= 0) {
607 return kUnspecifiedError;
608 }
609#if defined(WEBRTC_BIG_ENDIAN)
610 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
611 // pretty safe in assuming little-endian.
612#endif
613
614 if (!event_msg_->SerializeToString(&event_str_)) {
615 return kUnspecifiedError;
616 }
617
618 // Write message preceded by its size.
619 if (!debug_file_->Write(&size, sizeof(int32_t))) {
620 return kFileError;
621 }
622 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
623 return kFileError;
624 }
625
626 event_msg_->Clear();
627
628 return 0;
629}
630
631int AudioProcessingImpl::WriteInitMessage() {
632 event_msg_->set_type(audioproc::Event::INIT);
633 audioproc::Init* msg = event_msg_->mutable_init();
634 msg->set_sample_rate(sample_rate_hz_);
635 msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
636 msg->set_num_input_channels(num_input_channels_);
637 msg->set_num_output_channels(num_output_channels_);
638 msg->set_num_reverse_channels(num_reverse_channels_);
639
640 int err = WriteMessageToDebugFile();
641 if (err != kNoError) {
642 return err;
643 }
644
645 return kNoError;
646}
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000647
648bool AudioProcessingImpl::stream_data_changed() const {
649 int enabled_count = 0;
650 std::list<ProcessingComponent*>::const_iterator it;
651 for (it = component_list_.begin(); it != component_list_.end(); it++) {
652 if ((*it)->is_component_enabled()) {
653 enabled_count++;
654 }
655 }
656
657 // Data is unchanged if no components are enabled, or if only level_estimator_
658 // or voice_detection_ is enabled.
659 if (enabled_count == 0) {
660 return false;
661 } else if (enabled_count == 1) {
662 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
663 return false;
664 }
665 } else if (enabled_count == 2) {
666 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
667 return false;
668 }
669 }
670 return true;
671}
672
673bool AudioProcessingImpl::synthesis_needed(bool stream_data_changed) const {
674 return (stream_data_changed && sample_rate_hz_ == kSampleRate32kHz);
675}
676
677bool AudioProcessingImpl::analysis_needed(bool stream_data_changed) const {
678 if (!stream_data_changed && !voice_detection_->is_enabled()) {
679 // Only level_estimator_ is enabled.
680 return false;
681 } else if (sample_rate_hz_ == kSampleRate32kHz) {
682 // Something besides level_estimator_ is enabled, and we have super-wb.
683 return true;
684 }
685 return false;
686}
niklase@google.com470e71d2011-07-07 08:21:25 +0000687} // namespace webrtc