blob: 2f1c9dc6c112c5835f9ffc9a908da725cc315dc7 [file] [log] [blame]
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +00001/*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "webrtc/voice_engine/transmit_mixer.h"
12
13#include "webrtc/modules/utility/interface/audio_frame_operations.h"
14#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
15#include "webrtc/system_wrappers/interface/event_wrapper.h"
16#include "webrtc/system_wrappers/interface/logging.h"
17#include "webrtc/system_wrappers/interface/trace.h"
18#include "webrtc/voice_engine/channel.h"
19#include "webrtc/voice_engine/channel_manager.h"
20#include "webrtc/voice_engine/include/voe_external_media.h"
21#include "webrtc/voice_engine/statistics.h"
22#include "webrtc/voice_engine/utility.h"
23#include "webrtc/voice_engine/voe_base_impl.h"
24
25#define WEBRTC_ABS(a) (((a) < 0) ? -(a) : (a))
26
27namespace webrtc {
28namespace voe {
29
30// TODO(ajm): The thread safety of this is dubious...
31void
32TransmitMixer::OnPeriodicProcess()
33{
34 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
35 "TransmitMixer::OnPeriodicProcess()");
36
37#if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
38 if (_typingNoiseWarningPending)
39 {
40 CriticalSectionScoped cs(&_callbackCritSect);
41 if (_voiceEngineObserverPtr)
42 {
43 if (_typingNoiseDetected) {
44 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
45 "TransmitMixer::OnPeriodicProcess() => "
46 "CallbackOnError(VE_TYPING_NOISE_WARNING)");
47 _voiceEngineObserverPtr->CallbackOnError(
48 -1,
49 VE_TYPING_NOISE_WARNING);
50 } else {
51 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
52 "TransmitMixer::OnPeriodicProcess() => "
53 "CallbackOnError(VE_TYPING_NOISE_OFF_WARNING)");
54 _voiceEngineObserverPtr->CallbackOnError(
55 -1,
56 VE_TYPING_NOISE_OFF_WARNING);
57 }
58 }
59 _typingNoiseWarningPending = false;
60 }
61#endif
62
63 bool saturationWarning = false;
64 {
65 // Modify |_saturationWarning| under lock to avoid conflict with write op
66 // in ProcessAudio and also ensure that we don't hold the lock during the
67 // callback.
68 CriticalSectionScoped cs(&_critSect);
69 saturationWarning = _saturationWarning;
70 if (_saturationWarning)
71 _saturationWarning = false;
72 }
73
74 if (saturationWarning)
75 {
76 CriticalSectionScoped cs(&_callbackCritSect);
77 if (_voiceEngineObserverPtr)
78 {
79 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
80 "TransmitMixer::OnPeriodicProcess() =>"
81 " CallbackOnError(VE_SATURATION_WARNING)");
82 _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
83 }
84 }
85}
86
87
88void TransmitMixer::PlayNotification(int32_t id,
89 uint32_t durationMs)
90{
91 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
92 "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
93 id, durationMs);
94
95 // Not implement yet
96}
97
98void TransmitMixer::RecordNotification(int32_t id,
99 uint32_t durationMs)
100{
101 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
102 "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
103 id, durationMs);
104
105 // Not implement yet
106}
107
108void TransmitMixer::PlayFileEnded(int32_t id)
109{
110 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
111 "TransmitMixer::PlayFileEnded(id=%d)", id);
112
113 assert(id == _filePlayerId);
114
115 CriticalSectionScoped cs(&_critSect);
116
117 _filePlaying = false;
118 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
119 "TransmitMixer::PlayFileEnded() =>"
120 "file player module is shutdown");
121}
122
123void
124TransmitMixer::RecordFileEnded(int32_t id)
125{
126 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
127 "TransmitMixer::RecordFileEnded(id=%d)", id);
128
129 if (id == _fileRecorderId)
130 {
131 CriticalSectionScoped cs(&_critSect);
132 _fileRecording = false;
133 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
134 "TransmitMixer::RecordFileEnded() => fileRecorder module"
135 "is shutdown");
136 } else if (id == _fileCallRecorderId)
137 {
138 CriticalSectionScoped cs(&_critSect);
139 _fileCallRecording = false;
140 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
141 "TransmitMixer::RecordFileEnded() => fileCallRecorder"
142 "module is shutdown");
143 }
144}
145
146int32_t
147TransmitMixer::Create(TransmitMixer*& mixer, uint32_t instanceId)
148{
149 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
150 "TransmitMixer::Create(instanceId=%d)", instanceId);
151 mixer = new TransmitMixer(instanceId);
152 if (mixer == NULL)
153 {
154 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
155 "TransmitMixer::Create() unable to allocate memory"
156 "for mixer");
157 return -1;
158 }
159 return 0;
160}
161
162void
163TransmitMixer::Destroy(TransmitMixer*& mixer)
164{
165 if (mixer)
166 {
167 delete mixer;
168 mixer = NULL;
169 }
170}
171
172TransmitMixer::TransmitMixer(uint32_t instanceId) :
173 _engineStatisticsPtr(NULL),
174 _channelManagerPtr(NULL),
175 audioproc_(NULL),
176 _voiceEngineObserverPtr(NULL),
177 _processThreadPtr(NULL),
178 _filePlayerPtr(NULL),
179 _fileRecorderPtr(NULL),
180 _fileCallRecorderPtr(NULL),
181 // Avoid conflict with other channels by adding 1024 - 1026,
182 // won't use as much as 1024 channels.
183 _filePlayerId(instanceId + 1024),
184 _fileRecorderId(instanceId + 1025),
185 _fileCallRecorderId(instanceId + 1026),
186 _filePlaying(false),
187 _fileRecording(false),
188 _fileCallRecording(false),
189 _audioLevel(),
190 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
191 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
192#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
193 _typingNoiseWarningPending(false),
194 _typingNoiseDetected(false),
195#endif
196 _saturationWarning(false),
197 _instanceId(instanceId),
198 _mixFileWithMicrophone(false),
199 _captureLevel(0),
200 external_postproc_ptr_(NULL),
201 external_preproc_ptr_(NULL),
202 _mute(false),
203 _remainingMuteMicTimeMs(0),
204 stereo_codec_(false),
205 swap_stereo_channels_(false)
206{
207 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
208 "TransmitMixer::TransmitMixer() - ctor");
209}
210
211TransmitMixer::~TransmitMixer()
212{
213 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
214 "TransmitMixer::~TransmitMixer() - dtor");
215 _monitorModule.DeRegisterObserver();
216 if (_processThreadPtr)
217 {
218 _processThreadPtr->DeRegisterModule(&_monitorModule);
219 }
220 DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
221 DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
222 {
223 CriticalSectionScoped cs(&_critSect);
224 if (_fileRecorderPtr)
225 {
226 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
227 _fileRecorderPtr->StopRecording();
228 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
229 _fileRecorderPtr = NULL;
230 }
231 if (_fileCallRecorderPtr)
232 {
233 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
234 _fileCallRecorderPtr->StopRecording();
235 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
236 _fileCallRecorderPtr = NULL;
237 }
238 if (_filePlayerPtr)
239 {
240 _filePlayerPtr->RegisterModuleFileCallback(NULL);
241 _filePlayerPtr->StopPlayingFile();
242 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
243 _filePlayerPtr = NULL;
244 }
245 }
246 delete &_critSect;
247 delete &_callbackCritSect;
248}
249
250int32_t
251TransmitMixer::SetEngineInformation(ProcessThread& processThread,
252 Statistics& engineStatistics,
253 ChannelManager& channelManager)
254{
255 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
256 "TransmitMixer::SetEngineInformation()");
257
258 _processThreadPtr = &processThread;
259 _engineStatisticsPtr = &engineStatistics;
260 _channelManagerPtr = &channelManager;
261
262 if (_processThreadPtr->RegisterModule(&_monitorModule) == -1)
263 {
264 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
265 "TransmitMixer::SetEngineInformation() failed to"
266 "register the monitor module");
267 } else
268 {
269 _monitorModule.RegisterObserver(*this);
270 }
271
272 return 0;
273}
274
275int32_t
276TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
277{
278 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
279 "TransmitMixer::RegisterVoiceEngineObserver()");
280 CriticalSectionScoped cs(&_callbackCritSect);
281
282 if (_voiceEngineObserverPtr)
283 {
284 _engineStatisticsPtr->SetLastError(
285 VE_INVALID_OPERATION, kTraceError,
286 "RegisterVoiceEngineObserver() observer already enabled");
287 return -1;
288 }
289 _voiceEngineObserverPtr = &observer;
290 return 0;
291}
292
293int32_t
294TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
295{
296 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
297 "TransmitMixer::SetAudioProcessingModule("
298 "audioProcessingModule=0x%x)",
299 audioProcessingModule);
300 audioproc_ = audioProcessingModule;
301 return 0;
302}
303
304void TransmitMixer::GetSendCodecInfo(int* max_sample_rate, int* max_channels) {
305 *max_sample_rate = 8000;
306 *max_channels = 1;
307 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
308 it.Increment()) {
309 Channel* channel = it.GetChannel();
310 if (channel->Sending()) {
311 CodecInst codec;
312 channel->GetSendCodec(codec);
313 *max_sample_rate = std::max(*max_sample_rate, codec.plfreq);
314 *max_channels = std::max(*max_channels, codec.channels);
315 }
316 }
317}
318
319int32_t
320TransmitMixer::PrepareDemux(const void* audioSamples,
321 uint32_t nSamples,
322 uint8_t nChannels,
323 uint32_t samplesPerSec,
324 uint16_t totalDelayMS,
325 int32_t clockDrift,
326 uint16_t currentMicLevel,
327 bool keyPressed)
328{
329 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
330 "TransmitMixer::PrepareDemux(nSamples=%u, nChannels=%u,"
331 "samplesPerSec=%u, totalDelayMS=%u, clockDrift=%d,"
332 "currentMicLevel=%u)", nSamples, nChannels, samplesPerSec,
333 totalDelayMS, clockDrift, currentMicLevel);
334
335 // --- Resample input audio and create/store the initial audio frame
336 GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
337 nSamples,
338 nChannels,
339 samplesPerSec);
340
341 {
342 CriticalSectionScoped cs(&_callbackCritSect);
343 if (external_preproc_ptr_) {
344 external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
345 _audioFrame.data_,
346 _audioFrame.samples_per_channel_,
347 _audioFrame.sample_rate_hz_,
348 _audioFrame.num_channels_ == 2);
349 }
350 }
351
352 // --- Near-end audio processing.
353 ProcessAudio(totalDelayMS, clockDrift, currentMicLevel, keyPressed);
354
355 if (swap_stereo_channels_ && stereo_codec_)
356 // Only bother swapping if we're using a stereo codec.
357 AudioFrameOperations::SwapStereoChannels(&_audioFrame);
358
359 // --- Annoying typing detection (utilizes the APM/VAD decision)
360#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
361 TypingDetection(keyPressed);
362#endif
363
364 // --- Mute during DTMF tone if direct feedback is enabled
365 if (_remainingMuteMicTimeMs > 0)
366 {
367 AudioFrameOperations::Mute(_audioFrame);
368 _remainingMuteMicTimeMs -= 10;
369 if (_remainingMuteMicTimeMs < 0)
370 {
371 _remainingMuteMicTimeMs = 0;
372 }
373 }
374
375 // --- Mute signal
376 if (_mute)
377 {
378 AudioFrameOperations::Mute(_audioFrame);
379 }
380
381 // --- Mix with file (does not affect the mixing frequency)
382 if (_filePlaying)
383 {
384 MixOrReplaceAudioWithFile(_audioFrame.sample_rate_hz_);
385 }
386
387 // --- Record to file
388 bool file_recording = false;
389 {
390 CriticalSectionScoped cs(&_critSect);
391 file_recording = _fileRecording;
392 }
393 if (file_recording)
394 {
395 RecordAudioToFile(_audioFrame.sample_rate_hz_);
396 }
397
398 {
399 CriticalSectionScoped cs(&_callbackCritSect);
400 if (external_postproc_ptr_) {
401 external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
402 _audioFrame.data_,
403 _audioFrame.samples_per_channel_,
404 _audioFrame.sample_rate_hz_,
405 _audioFrame.num_channels_ == 2);
406 }
407 }
408
409 // --- Measure audio level of speech after all processing.
410 _audioLevel.ComputeLevel(_audioFrame);
411 return 0;
412}
413
414int32_t
415TransmitMixer::DemuxAndMix()
416{
417 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
418 "TransmitMixer::DemuxAndMix()");
419
420 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
421 it.Increment())
422 {
423 Channel* channelPtr = it.GetChannel();
henrika@webrtc.org692224a2014-04-17 10:45:01 +0000424 if (channelPtr->Sending())
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +0000425 {
426 // Demultiplex makes a copy of its input.
427 channelPtr->Demultiplex(_audioFrame);
428 channelPtr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
429 }
430 }
431 return 0;
432}
433
434void TransmitMixer::DemuxAndMix(const int voe_channels[],
435 int number_of_voe_channels) {
436 for (int i = 0; i < number_of_voe_channels; ++i) {
437 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
438 voe::Channel* channel_ptr = ch.channel();
439 if (channel_ptr) {
henrika@webrtc.org692224a2014-04-17 10:45:01 +0000440 if (channel_ptr->Sending()) {
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +0000441 // Demultiplex makes a copy of its input.
442 channel_ptr->Demultiplex(_audioFrame);
443 channel_ptr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
444 }
445 }
446 }
447}
448
449int32_t
450TransmitMixer::EncodeAndSend()
451{
452 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
453 "TransmitMixer::EncodeAndSend()");
454
455 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
456 it.Increment())
457 {
458 Channel* channelPtr = it.GetChannel();
henrika@webrtc.org692224a2014-04-17 10:45:01 +0000459 if (channelPtr->Sending())
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +0000460 {
461 channelPtr->EncodeAndSend();
462 }
463 }
464 return 0;
465}
466
467void TransmitMixer::EncodeAndSend(const int voe_channels[],
468 int number_of_voe_channels) {
469 for (int i = 0; i < number_of_voe_channels; ++i) {
470 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
471 voe::Channel* channel_ptr = ch.channel();
henrika@webrtc.org692224a2014-04-17 10:45:01 +0000472 if (channel_ptr && channel_ptr->Sending())
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +0000473 channel_ptr->EncodeAndSend();
474 }
475}
476
477uint32_t TransmitMixer::CaptureLevel() const
478{
479 return _captureLevel;
480}
481
482void
483TransmitMixer::UpdateMuteMicrophoneTime(uint32_t lengthMs)
484{
485 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
486 "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)",
487 lengthMs);
488 _remainingMuteMicTimeMs = lengthMs;
489}
490
491int32_t
492TransmitMixer::StopSend()
493{
494 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
495 "TransmitMixer::StopSend()");
496 _audioLevel.Clear();
497 return 0;
498}
499
500int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
501 bool loop,
502 FileFormats format,
503 int startPosition,
504 float volumeScaling,
505 int stopPosition,
506 const CodecInst* codecInst)
507{
508 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
509 "TransmitMixer::StartPlayingFileAsMicrophone("
510 "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
511 " startPosition=%d, stopPosition=%d)", fileName, loop,
512 format, volumeScaling, startPosition, stopPosition);
513
514 if (_filePlaying)
515 {
516 _engineStatisticsPtr->SetLastError(
517 VE_ALREADY_PLAYING, kTraceWarning,
518 "StartPlayingFileAsMicrophone() is already playing");
519 return 0;
520 }
521
522 CriticalSectionScoped cs(&_critSect);
523
524 // Destroy the old instance
525 if (_filePlayerPtr)
526 {
527 _filePlayerPtr->RegisterModuleFileCallback(NULL);
528 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
529 _filePlayerPtr = NULL;
530 }
531
532 // Dynamically create the instance
533 _filePlayerPtr
534 = FilePlayer::CreateFilePlayer(_filePlayerId,
535 (const FileFormats) format);
536
537 if (_filePlayerPtr == NULL)
538 {
539 _engineStatisticsPtr->SetLastError(
540 VE_INVALID_ARGUMENT, kTraceError,
541 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
542 return -1;
543 }
544
545 const uint32_t notificationTime(0);
546
547 if (_filePlayerPtr->StartPlayingFile(
548 fileName,
549 loop,
550 startPosition,
551 volumeScaling,
552 notificationTime,
553 stopPosition,
554 (const CodecInst*) codecInst) != 0)
555 {
556 _engineStatisticsPtr->SetLastError(
557 VE_BAD_FILE, kTraceError,
558 "StartPlayingFile() failed to start file playout");
559 _filePlayerPtr->StopPlayingFile();
560 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
561 _filePlayerPtr = NULL;
562 return -1;
563 }
564
565 _filePlayerPtr->RegisterModuleFileCallback(this);
566 _filePlaying = true;
567
568 return 0;
569}
570
571int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
572 FileFormats format,
573 int startPosition,
574 float volumeScaling,
575 int stopPosition,
576 const CodecInst* codecInst)
577{
578 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
579 "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
580 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
581 format, volumeScaling, startPosition, stopPosition);
582
583 if (stream == NULL)
584 {
585 _engineStatisticsPtr->SetLastError(
586 VE_BAD_FILE, kTraceError,
587 "StartPlayingFileAsMicrophone() NULL as input stream");
588 return -1;
589 }
590
591 if (_filePlaying)
592 {
593 _engineStatisticsPtr->SetLastError(
594 VE_ALREADY_PLAYING, kTraceWarning,
595 "StartPlayingFileAsMicrophone() is already playing");
596 return 0;
597 }
598
599 CriticalSectionScoped cs(&_critSect);
600
601 // Destroy the old instance
602 if (_filePlayerPtr)
603 {
604 _filePlayerPtr->RegisterModuleFileCallback(NULL);
605 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
606 _filePlayerPtr = NULL;
607 }
608
609 // Dynamically create the instance
610 _filePlayerPtr
611 = FilePlayer::CreateFilePlayer(_filePlayerId,
612 (const FileFormats) format);
613
614 if (_filePlayerPtr == NULL)
615 {
616 _engineStatisticsPtr->SetLastError(
617 VE_INVALID_ARGUMENT, kTraceWarning,
618 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
619 return -1;
620 }
621
622 const uint32_t notificationTime(0);
623
624 if (_filePlayerPtr->StartPlayingFile(
625 (InStream&) *stream,
626 startPosition,
627 volumeScaling,
628 notificationTime,
629 stopPosition,
630 (const CodecInst*) codecInst) != 0)
631 {
632 _engineStatisticsPtr->SetLastError(
633 VE_BAD_FILE, kTraceError,
634 "StartPlayingFile() failed to start file playout");
635 _filePlayerPtr->StopPlayingFile();
636 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
637 _filePlayerPtr = NULL;
638 return -1;
639 }
640 _filePlayerPtr->RegisterModuleFileCallback(this);
641 _filePlaying = true;
642
643 return 0;
644}
645
646int TransmitMixer::StopPlayingFileAsMicrophone()
647{
648 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
649 "TransmitMixer::StopPlayingFileAsMicrophone()");
650
651 if (!_filePlaying)
652 {
653 _engineStatisticsPtr->SetLastError(
654 VE_INVALID_OPERATION, kTraceWarning,
655 "StopPlayingFileAsMicrophone() isnot playing");
656 return 0;
657 }
658
659 CriticalSectionScoped cs(&_critSect);
660
661 if (_filePlayerPtr->StopPlayingFile() != 0)
662 {
663 _engineStatisticsPtr->SetLastError(
664 VE_CANNOT_STOP_PLAYOUT, kTraceError,
665 "StopPlayingFile() couldnot stop playing file");
666 return -1;
667 }
668
669 _filePlayerPtr->RegisterModuleFileCallback(NULL);
670 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
671 _filePlayerPtr = NULL;
672 _filePlaying = false;
673
674 return 0;
675}
676
677int TransmitMixer::IsPlayingFileAsMicrophone() const
678{
679 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
680 "TransmitMixer::IsPlayingFileAsMicrophone()");
681 return _filePlaying;
682}
683
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +0000684int TransmitMixer::StartRecordingMicrophone(const char* fileName,
685 const CodecInst* codecInst)
686{
687 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
688 "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
689 fileName);
690
691 CriticalSectionScoped cs(&_critSect);
692
693 if (_fileRecording)
694 {
695 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
696 "StartRecordingMicrophone() is already recording");
697 return 0;
698 }
699
700 FileFormats format;
701 const uint32_t notificationTime(0); // Not supported in VoE
702 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
703
704 if (codecInst != NULL &&
705 (codecInst->channels < 0 || codecInst->channels > 2))
706 {
707 _engineStatisticsPtr->SetLastError(
708 VE_BAD_ARGUMENT, kTraceError,
709 "StartRecordingMicrophone() invalid compression");
710 return (-1);
711 }
712 if (codecInst == NULL)
713 {
714 format = kFileFormatPcm16kHzFile;
715 codecInst = &dummyCodec;
716 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
717 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
718 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
719 {
720 format = kFileFormatWavFile;
721 } else
722 {
723 format = kFileFormatCompressedFile;
724 }
725
726 // Destroy the old instance
727 if (_fileRecorderPtr)
728 {
729 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
730 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
731 _fileRecorderPtr = NULL;
732 }
733
734 _fileRecorderPtr =
735 FileRecorder::CreateFileRecorder(_fileRecorderId,
736 (const FileFormats) format);
737 if (_fileRecorderPtr == NULL)
738 {
739 _engineStatisticsPtr->SetLastError(
740 VE_INVALID_ARGUMENT, kTraceError,
741 "StartRecordingMicrophone() fileRecorder format isnot correct");
742 return -1;
743 }
744
745 if (_fileRecorderPtr->StartRecordingAudioFile(
746 fileName,
747 (const CodecInst&) *codecInst,
748 notificationTime) != 0)
749 {
750 _engineStatisticsPtr->SetLastError(
751 VE_BAD_FILE, kTraceError,
752 "StartRecordingAudioFile() failed to start file recording");
753 _fileRecorderPtr->StopRecording();
754 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
755 _fileRecorderPtr = NULL;
756 return -1;
757 }
758 _fileRecorderPtr->RegisterModuleFileCallback(this);
759 _fileRecording = true;
760
761 return 0;
762}
763
764int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
765 const CodecInst* codecInst)
766{
767 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
768 "TransmitMixer::StartRecordingMicrophone()");
769
770 CriticalSectionScoped cs(&_critSect);
771
772 if (_fileRecording)
773 {
774 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
775 "StartRecordingMicrophone() is already recording");
776 return 0;
777 }
778
779 FileFormats format;
780 const uint32_t notificationTime(0); // Not supported in VoE
781 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
782
783 if (codecInst != NULL && codecInst->channels != 1)
784 {
785 _engineStatisticsPtr->SetLastError(
786 VE_BAD_ARGUMENT, kTraceError,
787 "StartRecordingMicrophone() invalid compression");
788 return (-1);
789 }
790 if (codecInst == NULL)
791 {
792 format = kFileFormatPcm16kHzFile;
793 codecInst = &dummyCodec;
794 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
795 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
796 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
797 {
798 format = kFileFormatWavFile;
799 } else
800 {
801 format = kFileFormatCompressedFile;
802 }
803
804 // Destroy the old instance
805 if (_fileRecorderPtr)
806 {
807 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
808 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
809 _fileRecorderPtr = NULL;
810 }
811
812 _fileRecorderPtr =
813 FileRecorder::CreateFileRecorder(_fileRecorderId,
814 (const FileFormats) format);
815 if (_fileRecorderPtr == NULL)
816 {
817 _engineStatisticsPtr->SetLastError(
818 VE_INVALID_ARGUMENT, kTraceError,
819 "StartRecordingMicrophone() fileRecorder format isnot correct");
820 return -1;
821 }
822
823 if (_fileRecorderPtr->StartRecordingAudioFile(*stream,
824 *codecInst,
825 notificationTime) != 0)
826 {
827 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
828 "StartRecordingAudioFile() failed to start file recording");
829 _fileRecorderPtr->StopRecording();
830 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
831 _fileRecorderPtr = NULL;
832 return -1;
833 }
834
835 _fileRecorderPtr->RegisterModuleFileCallback(this);
836 _fileRecording = true;
837
838 return 0;
839}
840
841
842int TransmitMixer::StopRecordingMicrophone()
843{
844 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
845 "TransmitMixer::StopRecordingMicrophone()");
846
847 CriticalSectionScoped cs(&_critSect);
848
849 if (!_fileRecording)
850 {
851 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
852 "StopRecordingMicrophone() isnot recording");
853 return 0;
854 }
855
856 if (_fileRecorderPtr->StopRecording() != 0)
857 {
858 _engineStatisticsPtr->SetLastError(
859 VE_STOP_RECORDING_FAILED, kTraceError,
860 "StopRecording(), could not stop recording");
861 return -1;
862 }
863 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
864 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
865 _fileRecorderPtr = NULL;
866 _fileRecording = false;
867
868 return 0;
869}
870
871int TransmitMixer::StartRecordingCall(const char* fileName,
872 const CodecInst* codecInst)
873{
874 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
875 "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
876
877 if (_fileCallRecording)
878 {
879 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
880 "StartRecordingCall() is already recording");
881 return 0;
882 }
883
884 FileFormats format;
885 const uint32_t notificationTime(0); // Not supported in VoE
886 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
887
888 if (codecInst != NULL && codecInst->channels != 1)
889 {
890 _engineStatisticsPtr->SetLastError(
891 VE_BAD_ARGUMENT, kTraceError,
892 "StartRecordingCall() invalid compression");
893 return (-1);
894 }
895 if (codecInst == NULL)
896 {
897 format = kFileFormatPcm16kHzFile;
898 codecInst = &dummyCodec;
899 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
900 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
901 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
902 {
903 format = kFileFormatWavFile;
904 } else
905 {
906 format = kFileFormatCompressedFile;
907 }
908
909 CriticalSectionScoped cs(&_critSect);
910
911 // Destroy the old instance
912 if (_fileCallRecorderPtr)
913 {
914 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
915 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
916 _fileCallRecorderPtr = NULL;
917 }
918
919 _fileCallRecorderPtr
920 = FileRecorder::CreateFileRecorder(_fileCallRecorderId,
921 (const FileFormats) format);
922 if (_fileCallRecorderPtr == NULL)
923 {
924 _engineStatisticsPtr->SetLastError(
925 VE_INVALID_ARGUMENT, kTraceError,
926 "StartRecordingCall() fileRecorder format isnot correct");
927 return -1;
928 }
929
930 if (_fileCallRecorderPtr->StartRecordingAudioFile(
931 fileName,
932 (const CodecInst&) *codecInst,
933 notificationTime) != 0)
934 {
935 _engineStatisticsPtr->SetLastError(
936 VE_BAD_FILE, kTraceError,
937 "StartRecordingAudioFile() failed to start file recording");
938 _fileCallRecorderPtr->StopRecording();
939 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
940 _fileCallRecorderPtr = NULL;
941 return -1;
942 }
943 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
944 _fileCallRecording = true;
945
946 return 0;
947}
948
949int TransmitMixer::StartRecordingCall(OutStream* stream,
950 const CodecInst* codecInst)
951{
952 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
953 "TransmitMixer::StartRecordingCall()");
954
955 if (_fileCallRecording)
956 {
957 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
958 "StartRecordingCall() is already recording");
959 return 0;
960 }
961
962 FileFormats format;
963 const uint32_t notificationTime(0); // Not supported in VoE
964 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
965
966 if (codecInst != NULL && codecInst->channels != 1)
967 {
968 _engineStatisticsPtr->SetLastError(
969 VE_BAD_ARGUMENT, kTraceError,
970 "StartRecordingCall() invalid compression");
971 return (-1);
972 }
973 if (codecInst == NULL)
974 {
975 format = kFileFormatPcm16kHzFile;
976 codecInst = &dummyCodec;
977 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
978 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
979 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
980 {
981 format = kFileFormatWavFile;
982 } else
983 {
984 format = kFileFormatCompressedFile;
985 }
986
987 CriticalSectionScoped cs(&_critSect);
988
989 // Destroy the old instance
990 if (_fileCallRecorderPtr)
991 {
992 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
993 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
994 _fileCallRecorderPtr = NULL;
995 }
996
997 _fileCallRecorderPtr =
998 FileRecorder::CreateFileRecorder(_fileCallRecorderId,
999 (const FileFormats) format);
1000 if (_fileCallRecorderPtr == NULL)
1001 {
1002 _engineStatisticsPtr->SetLastError(
1003 VE_INVALID_ARGUMENT, kTraceError,
1004 "StartRecordingCall() fileRecorder format isnot correct");
1005 return -1;
1006 }
1007
1008 if (_fileCallRecorderPtr->StartRecordingAudioFile(*stream,
1009 *codecInst,
1010 notificationTime) != 0)
1011 {
1012 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
1013 "StartRecordingAudioFile() failed to start file recording");
1014 _fileCallRecorderPtr->StopRecording();
1015 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1016 _fileCallRecorderPtr = NULL;
1017 return -1;
1018 }
1019
1020 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
1021 _fileCallRecording = true;
1022
1023 return 0;
1024}
1025
1026int TransmitMixer::StopRecordingCall()
1027{
1028 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1029 "TransmitMixer::StopRecordingCall()");
1030
1031 if (!_fileCallRecording)
1032 {
1033 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1034 "StopRecordingCall() file isnot recording");
1035 return -1;
1036 }
1037
1038 CriticalSectionScoped cs(&_critSect);
1039
1040 if (_fileCallRecorderPtr->StopRecording() != 0)
1041 {
1042 _engineStatisticsPtr->SetLastError(
1043 VE_STOP_RECORDING_FAILED, kTraceError,
1044 "StopRecording(), could not stop recording");
1045 return -1;
1046 }
1047
1048 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
1049 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1050 _fileCallRecorderPtr = NULL;
1051 _fileCallRecording = false;
1052
1053 return 0;
1054}
1055
1056void
1057TransmitMixer::SetMixWithMicStatus(bool mix)
1058{
1059 _mixFileWithMicrophone = mix;
1060}
1061
1062int TransmitMixer::RegisterExternalMediaProcessing(
1063 VoEMediaProcess* object,
1064 ProcessingTypes type) {
1065 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1066 "TransmitMixer::RegisterExternalMediaProcessing()");
1067
1068 CriticalSectionScoped cs(&_callbackCritSect);
1069 if (!object) {
1070 return -1;
1071 }
1072
1073 // Store the callback object according to the processing type.
1074 if (type == kRecordingAllChannelsMixed) {
1075 external_postproc_ptr_ = object;
1076 } else if (type == kRecordingPreprocessing) {
1077 external_preproc_ptr_ = object;
1078 } else {
1079 return -1;
1080 }
1081 return 0;
1082}
1083
1084int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
1085 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1086 "TransmitMixer::DeRegisterExternalMediaProcessing()");
1087
1088 CriticalSectionScoped cs(&_callbackCritSect);
1089 if (type == kRecordingAllChannelsMixed) {
1090 external_postproc_ptr_ = NULL;
1091 } else if (type == kRecordingPreprocessing) {
1092 external_preproc_ptr_ = NULL;
1093 } else {
1094 return -1;
1095 }
1096 return 0;
1097}
1098
1099int
1100TransmitMixer::SetMute(bool enable)
1101{
1102 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1103 "TransmitMixer::SetMute(enable=%d)", enable);
1104 _mute = enable;
1105 return 0;
1106}
1107
1108bool
1109TransmitMixer::Mute() const
1110{
1111 return _mute;
1112}
1113
1114int8_t TransmitMixer::AudioLevel() const
1115{
1116 // Speech + file level [0,9]
1117 return _audioLevel.Level();
1118}
1119
1120int16_t TransmitMixer::AudioLevelFullRange() const
1121{
1122 // Speech + file level [0,32767]
1123 return _audioLevel.LevelFullRange();
1124}
1125
1126bool TransmitMixer::IsRecordingCall()
1127{
1128 return _fileCallRecording;
1129}
1130
1131bool TransmitMixer::IsRecordingMic()
1132{
1133 CriticalSectionScoped cs(&_critSect);
1134 return _fileRecording;
1135}
1136
1137void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
1138 int samples_per_channel,
1139 int num_channels,
1140 int sample_rate_hz) {
1141 int codec_rate;
1142 int num_codec_channels;
1143 GetSendCodecInfo(&codec_rate, &num_codec_channels);
1144 // TODO(ajm): This currently restricts the sample rate to 32 kHz.
1145 // See: https://code.google.com/p/webrtc/issues/detail?id=3146
1146 // When 48 kHz is supported natively by AudioProcessing, this will have
1147 // to be changed to handle 44.1 kHz.
andrew@webrtc.orgc1878ac2014-04-30 18:58:23 +00001148 int max_sample_rate_hz = kAudioProcMaxNativeSampleRateHz;
1149 if (audioproc_->echo_control_mobile()->is_enabled()) {
1150 // AECM only supports 8 and 16 kHz.
1151 max_sample_rate_hz = 16000;
1152 }
1153 codec_rate = std::min(codec_rate, max_sample_rate_hz);
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +00001154 stereo_codec_ = num_codec_channels == 2;
1155
1156 if (!mono_buffer_.get()) {
1157 // Temporary space for DownConvertToCodecFormat.
1158 mono_buffer_.reset(new int16_t[kMaxMonoDataSizeSamples]);
1159 }
1160 DownConvertToCodecFormat(audio,
1161 samples_per_channel,
1162 num_channels,
1163 sample_rate_hz,
1164 num_codec_channels,
1165 codec_rate,
1166 mono_buffer_.get(),
1167 &resampler_,
1168 &_audioFrame);
1169}
1170
1171int32_t TransmitMixer::RecordAudioToFile(
1172 uint32_t mixingFrequency)
1173{
1174 CriticalSectionScoped cs(&_critSect);
1175 if (_fileRecorderPtr == NULL)
1176 {
1177 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1178 "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
1179 "exist");
1180 return -1;
1181 }
1182
1183 if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
1184 {
1185 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1186 "TransmitMixer::RecordAudioToFile() file recording"
1187 "failed");
1188 return -1;
1189 }
1190
1191 return 0;
1192}
1193
1194int32_t TransmitMixer::MixOrReplaceAudioWithFile(
1195 int mixingFrequency)
1196{
andrew@webrtc.orgba476162014-04-25 23:10:28 +00001197 scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +00001198
1199 int fileSamples(0);
1200 {
1201 CriticalSectionScoped cs(&_critSect);
1202 if (_filePlayerPtr == NULL)
1203 {
1204 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1205 VoEId(_instanceId, -1),
1206 "TransmitMixer::MixOrReplaceAudioWithFile()"
1207 "fileplayer doesnot exist");
1208 return -1;
1209 }
1210
1211 if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
1212 fileSamples,
1213 mixingFrequency) == -1)
1214 {
1215 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1216 "TransmitMixer::MixOrReplaceAudioWithFile() file"
1217 " mixing failed");
1218 return -1;
1219 }
1220 }
1221
1222 assert(_audioFrame.samples_per_channel_ == fileSamples);
1223
1224 if (_mixFileWithMicrophone)
1225 {
1226 // Currently file stream is always mono.
1227 // TODO(xians): Change the code when FilePlayer supports real stereo.
1228 MixWithSat(_audioFrame.data_,
1229 _audioFrame.num_channels_,
1230 fileBuffer.get(),
1231 1,
1232 fileSamples);
1233 } else
1234 {
1235 // Replace ACM audio with file.
1236 // Currently file stream is always mono.
1237 // TODO(xians): Change the code when FilePlayer supports real stereo.
1238 _audioFrame.UpdateFrame(-1,
tommi@webrtc.org9fbd3ec2014-07-11 19:09:59 +00001239 0xFFFFFFFF,
andrew@webrtc.orgf7c73b52014-04-03 21:56:01 +00001240 fileBuffer.get(),
1241 fileSamples,
1242 mixingFrequency,
1243 AudioFrame::kNormalSpeech,
1244 AudioFrame::kVadUnknown,
1245 1);
1246 }
1247 return 0;
1248}
1249
1250void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
1251 int current_mic_level, bool key_pressed) {
1252 if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
1253 // A redundant warning is reported in AudioDevice, which we've throttled
1254 // to avoid flooding the logs. Relegate this one to LS_VERBOSE to avoid
1255 // repeating the problem here.
1256 LOG_FERR1(LS_VERBOSE, set_stream_delay_ms, delay_ms);
1257 }
1258
1259 GainControl* agc = audioproc_->gain_control();
1260 if (agc->set_stream_analog_level(current_mic_level) != 0) {
1261 LOG_FERR1(LS_ERROR, set_stream_analog_level, current_mic_level);
1262 assert(false);
1263 }
1264
1265 EchoCancellation* aec = audioproc_->echo_cancellation();
1266 if (aec->is_drift_compensation_enabled()) {
1267 aec->set_stream_drift_samples(clock_drift);
1268 }
1269
1270 audioproc_->set_stream_key_pressed(key_pressed);
1271
1272 int err = audioproc_->ProcessStream(&_audioFrame);
1273 if (err != 0) {
1274 LOG(LS_ERROR) << "ProcessStream() error: " << err;
1275 assert(false);
1276 }
1277
1278 // Store new capture level. Only updated when analog AGC is enabled.
1279 _captureLevel = agc->stream_analog_level();
1280
1281 CriticalSectionScoped cs(&_critSect);
1282 // Triggers a callback in OnPeriodicProcess().
1283 _saturationWarning |= agc->stream_is_saturated();
1284}
1285
1286#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1287void TransmitMixer::TypingDetection(bool keyPressed)
1288{
1289 // We let the VAD determine if we're using this feature or not.
1290 if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) {
1291 return;
1292 }
1293
1294 bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
1295 if (_typingDetection.Process(keyPressed, vadActive)) {
1296 _typingNoiseWarningPending = true;
1297 _typingNoiseDetected = true;
1298 } else {
1299 // If there is already a warning pending, do not change the state.
1300 // Otherwise set a warning pending if last callback was for noise detected.
1301 if (!_typingNoiseWarningPending && _typingNoiseDetected) {
1302 _typingNoiseWarningPending = true;
1303 _typingNoiseDetected = false;
1304 }
1305 }
1306}
1307#endif
1308
1309int TransmitMixer::GetMixingFrequency()
1310{
1311 assert(_audioFrame.sample_rate_hz_ != 0);
1312 return _audioFrame.sample_rate_hz_;
1313}
1314
1315#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1316int TransmitMixer::TimeSinceLastTyping(int &seconds)
1317{
1318 // We check in VoEAudioProcessingImpl that this is only called when
1319 // typing detection is active.
1320 seconds = _typingDetection.TimeSinceLastDetectionInSeconds();
1321 return 0;
1322}
1323#endif
1324
1325#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1326int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
1327 int costPerTyping,
1328 int reportingThreshold,
1329 int penaltyDecay,
1330 int typeEventDelay)
1331{
1332 _typingDetection.SetParameters(timeWindow,
1333 costPerTyping,
1334 reportingThreshold,
1335 penaltyDecay,
1336 typeEventDelay,
1337 0);
1338 return 0;
1339}
1340#endif
1341
1342void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
1343 swap_stereo_channels_ = enable;
1344}
1345
1346bool TransmitMixer::IsStereoChannelSwappingEnabled() {
1347 return swap_stereo_channels_;
1348}
1349
1350} // namespace voe
1351} // namespace webrtc