blob: dfa9ee9bfceb2f3abf2715671aec172170c9ffc6 [file] [log] [blame]
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +00001/*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "transmit_mixer.h"
12
13#include "audio_frame_operations.h"
14#include "channel.h"
15#include "channel_manager.h"
16#include "critical_section_wrapper.h"
17#include "event_wrapper.h"
18#include "statistics.h"
19#include "trace.h"
20#include "utility.h"
21#include "voe_base_impl.h"
22#include "voe_external_media.h"
23
24#define WEBRTC_ABS(a) (((a) < 0) ? -(a) : (a))
25
26namespace webrtc {
27
28namespace voe {
29
30// Used for downmixing before resampling.
31// TODO(andrew): audio_device should advertise the maximum sample rate it can
32// provide.
33static const int kMaxMonoDeviceDataSizeSamples = 960; // 10 ms, 96 kHz, mono.
34
35void
36TransmitMixer::OnPeriodicProcess()
37{
38 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
39 "TransmitMixer::OnPeriodicProcess()");
40
41#if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
42 if (_typingNoiseWarning > 0)
43 {
44 CriticalSectionScoped cs(&_callbackCritSect);
45 if (_voiceEngineObserverPtr)
46 {
47 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
48 "TransmitMixer::OnPeriodicProcess() => "
49 "CallbackOnError(VE_TYPING_NOISE_WARNING)");
50 _voiceEngineObserverPtr->CallbackOnError(-1,
51 VE_TYPING_NOISE_WARNING);
52 }
53 _typingNoiseWarning = 0;
54 }
55#endif
56
57 if (_saturationWarning > 0)
58 {
59 CriticalSectionScoped cs(&_callbackCritSect);
60 if (_voiceEngineObserverPtr)
61 {
62 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
63 "TransmitMixer::OnPeriodicProcess() =>"
64 " CallbackOnError(VE_SATURATION_WARNING)");
65 _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
66 }
67 _saturationWarning = 0;
68 }
69
70 if (_noiseWarning > 0)
71 {
72 CriticalSectionScoped cs(&_callbackCritSect);
73 if (_voiceEngineObserverPtr)
74 {
75 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
76 "TransmitMixer::OnPeriodicProcess() =>"
77 "CallbackOnError(VE_NOISE_WARNING)");
78 _voiceEngineObserverPtr->CallbackOnError(-1, VE_NOISE_WARNING);
79 }
80 _noiseWarning = 0;
81 }
82}
83
84
85void TransmitMixer::PlayNotification(const WebRtc_Word32 id,
86 const WebRtc_UWord32 durationMs)
87{
88 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
89 "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
90 id, durationMs);
91
92 // Not implement yet
93}
94
95void TransmitMixer::RecordNotification(const WebRtc_Word32 id,
96 const WebRtc_UWord32 durationMs)
97{
98 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
99 "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
100 id, durationMs);
101
102 // Not implement yet
103}
104
105void TransmitMixer::PlayFileEnded(const WebRtc_Word32 id)
106{
107 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
108 "TransmitMixer::PlayFileEnded(id=%d)", id);
109
110 assert(id == _filePlayerId);
111
112 CriticalSectionScoped cs(&_critSect);
113
114 _filePlaying = false;
115 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
116 "TransmitMixer::PlayFileEnded() =>"
117 "file player module is shutdown");
118}
119
120void
121TransmitMixer::RecordFileEnded(const WebRtc_Word32 id)
122{
123 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
124 "TransmitMixer::RecordFileEnded(id=%d)", id);
125
126 if (id == _fileRecorderId)
127 {
128 CriticalSectionScoped cs(&_critSect);
129 _fileRecording = false;
130 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
131 "TransmitMixer::RecordFileEnded() => fileRecorder module"
132 "is shutdown");
133 } else if (id == _fileCallRecorderId)
134 {
135 CriticalSectionScoped cs(&_critSect);
136 _fileCallRecording = false;
137 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
138 "TransmitMixer::RecordFileEnded() => fileCallRecorder"
139 "module is shutdown");
140 }
141}
142
143WebRtc_Word32
144TransmitMixer::Create(TransmitMixer*& mixer, const WebRtc_UWord32 instanceId)
145{
146 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
147 "TransmitMixer::Create(instanceId=%d)", instanceId);
148 mixer = new TransmitMixer(instanceId);
149 if (mixer == NULL)
150 {
151 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
152 "TransmitMixer::Create() unable to allocate memory"
153 "for mixer");
154 return -1;
155 }
156 return 0;
157}
158
159void
160TransmitMixer::Destroy(TransmitMixer*& mixer)
161{
162 if (mixer)
163 {
164 delete mixer;
165 mixer = NULL;
166 }
167}
168
169TransmitMixer::TransmitMixer(const WebRtc_UWord32 instanceId) :
170 _engineStatisticsPtr(NULL),
171 _channelManagerPtr(NULL),
172 _audioProcessingModulePtr(NULL),
173 _voiceEngineObserverPtr(NULL),
174 _processThreadPtr(NULL),
175 _filePlayerPtr(NULL),
176 _fileRecorderPtr(NULL),
177 _fileCallRecorderPtr(NULL),
178 // Avoid conflict with other channels by adding 1024 - 1026,
179 // won't use as much as 1024 channels.
180 _filePlayerId(instanceId + 1024),
181 _fileRecorderId(instanceId + 1025),
182 _fileCallRecorderId(instanceId + 1026),
183 _filePlaying(false),
184 _fileRecording(false),
185 _fileCallRecording(false),
186 _audioLevel(),
187 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
188 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
189#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
190 _timeActive(0),
191 _timeSinceLastTyping(0),
192 _penaltyCounter(0),
193 _typingNoiseWarning(0),
194 _timeWindow(10), // 10ms slots accepted to count as a hit
195 _costPerTyping(100), // Penalty added for a typing + activity coincide
196 _reportingThreshold(300), // Threshold for _penaltyCounter
197 _penaltyDecay(1), // how much we reduce _penaltyCounter every 10 ms.
198 _typeEventDelay(2), // how "old" event we check for
199#endif
200 _saturationWarning(0),
201 _noiseWarning(0),
202 _instanceId(instanceId),
203 _mixFileWithMicrophone(false),
204 _captureLevel(0),
205 external_postproc_ptr_(NULL),
206 external_preproc_ptr_(NULL),
207 _mute(false),
208 _remainingMuteMicTimeMs(0),
209 _mixingFrequency(0),
210 stereo_codec_(false),
211 swap_stereo_channels_(false)
212{
213 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
214 "TransmitMixer::TransmitMixer() - ctor");
215}
216
217TransmitMixer::~TransmitMixer()
218{
219 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
220 "TransmitMixer::~TransmitMixer() - dtor");
221 _monitorModule.DeRegisterObserver();
222 if (_processThreadPtr)
223 {
224 _processThreadPtr->DeRegisterModule(&_monitorModule);
225 }
226 DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
227 DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
228 {
229 CriticalSectionScoped cs(&_critSect);
230 if (_fileRecorderPtr)
231 {
232 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
233 _fileRecorderPtr->StopRecording();
234 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
235 _fileRecorderPtr = NULL;
236 }
237 if (_fileCallRecorderPtr)
238 {
239 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
240 _fileCallRecorderPtr->StopRecording();
241 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
242 _fileCallRecorderPtr = NULL;
243 }
244 if (_filePlayerPtr)
245 {
246 _filePlayerPtr->RegisterModuleFileCallback(NULL);
247 _filePlayerPtr->StopPlayingFile();
248 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
249 _filePlayerPtr = NULL;
250 }
251 }
252 delete &_critSect;
253 delete &_callbackCritSect;
254}
255
256WebRtc_Word32
257TransmitMixer::SetEngineInformation(ProcessThread& processThread,
258 Statistics& engineStatistics,
259 ChannelManager& channelManager)
260{
261 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
262 "TransmitMixer::SetEngineInformation()");
263
264 _processThreadPtr = &processThread;
265 _engineStatisticsPtr = &engineStatistics;
266 _channelManagerPtr = &channelManager;
267
268 if (_processThreadPtr->RegisterModule(&_monitorModule) == -1)
269 {
270 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
271 "TransmitMixer::SetEngineInformation() failed to"
272 "register the monitor module");
273 } else
274 {
275 _monitorModule.RegisterObserver(*this);
276 }
277
278 return 0;
279}
280
281WebRtc_Word32
282TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
283{
284 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
285 "TransmitMixer::RegisterVoiceEngineObserver()");
286 CriticalSectionScoped cs(&_callbackCritSect);
287
288 if (_voiceEngineObserverPtr)
289 {
290 _engineStatisticsPtr->SetLastError(
291 VE_INVALID_OPERATION, kTraceError,
292 "RegisterVoiceEngineObserver() observer already enabled");
293 return -1;
294 }
295 _voiceEngineObserverPtr = &observer;
296 return 0;
297}
298
299WebRtc_Word32
300TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
301{
302 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
303 "TransmitMixer::SetAudioProcessingModule("
304 "audioProcessingModule=0x%x)",
305 audioProcessingModule);
306 _audioProcessingModulePtr = audioProcessingModule;
307 return 0;
308}
309
310void TransmitMixer::CheckForSendCodecChanges() {
311 ScopedChannel sc(*_channelManagerPtr);
312 void* iterator = NULL;
313 Channel* channel = sc.GetFirstChannel(iterator);
314 _mixingFrequency = 8000;
315 stereo_codec_ = false;
316 while (channel != NULL) {
317 if (channel->Sending()) {
318 CodecInst codec;
319 channel->GetSendCodec(codec);
320
321 if (codec.channels == 2)
322 stereo_codec_ = true;
323
324 // TODO(tlegrand): Remove once we have full 48 kHz support in
325 // Audio Coding Module.
326 if (codec.plfreq > 32000) {
327 _mixingFrequency = 32000;
328 } else if (codec.plfreq > _mixingFrequency) {
329 _mixingFrequency = codec.plfreq;
330 }
331 }
332 channel = sc.GetNextChannel(iterator);
333 }
334}
335
336WebRtc_Word32
337TransmitMixer::PrepareDemux(const void* audioSamples,
338 const WebRtc_UWord32 nSamples,
339 const WebRtc_UWord8 nChannels,
340 const WebRtc_UWord32 samplesPerSec,
341 const WebRtc_UWord16 totalDelayMS,
342 const WebRtc_Word32 clockDrift,
343 const WebRtc_UWord16 currentMicLevel)
344{
345 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
346 "TransmitMixer::PrepareDemux(nSamples=%u, nChannels=%u,"
347 "samplesPerSec=%u, totalDelayMS=%u, clockDrift=%u,"
348 "currentMicLevel=%u)", nSamples, nChannels, samplesPerSec,
349 totalDelayMS, clockDrift, currentMicLevel);
350
351 CheckForSendCodecChanges();
352
353 // --- Resample input audio and create/store the initial audio frame
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000354 if (GenerateAudioFrame(static_cast<const WebRtc_Word16*>(audioSamples),
355 nSamples,
356 nChannels,
357 samplesPerSec) == -1)
358 {
359 return -1;
360 }
361
362 {
363 CriticalSectionScoped cs(&_callbackCritSect);
364 if (external_preproc_ptr_) {
365 external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
366 _audioFrame.data_,
367 _audioFrame.samples_per_channel_,
368 _audioFrame.sample_rate_hz_,
369 _audioFrame.num_channels_ == 2);
370 }
371 }
372
373 // --- Near-end Voice Quality Enhancement (APM) processing
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000374 APMProcessStream(totalDelayMS, clockDrift, currentMicLevel);
375
376 if (swap_stereo_channels_ && stereo_codec_)
377 // Only bother swapping if we're using a stereo codec.
378 AudioFrameOperations::SwapStereoChannels(&_audioFrame);
379
380 // --- Annoying typing detection (utilizes the APM/VAD decision)
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000381#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
382 TypingDetection();
383#endif
384
385 // --- Mute during DTMF tone if direct feedback is enabled
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000386 if (_remainingMuteMicTimeMs > 0)
387 {
388 AudioFrameOperations::Mute(_audioFrame);
389 _remainingMuteMicTimeMs -= 10;
390 if (_remainingMuteMicTimeMs < 0)
391 {
392 _remainingMuteMicTimeMs = 0;
393 }
394 }
395
396 // --- Mute signal
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000397 if (_mute)
398 {
399 AudioFrameOperations::Mute(_audioFrame);
400 }
401
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000402 // --- Mix with file (does not affect the mixing frequency)
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000403 if (_filePlaying)
404 {
405 MixOrReplaceAudioWithFile(_mixingFrequency);
406 }
407
408 // --- Record to file
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000409 if (_fileRecording)
410 {
411 RecordAudioToFile(_mixingFrequency);
412 }
413
414 {
415 CriticalSectionScoped cs(&_callbackCritSect);
416 if (external_postproc_ptr_) {
417 external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
418 _audioFrame.data_,
419 _audioFrame.samples_per_channel_,
420 _audioFrame.sample_rate_hz_,
421 _audioFrame.num_channels_ == 2);
422 }
423 }
424
andrew@webrtc.org285705b2012-11-07 19:08:03 +0000425 // --- Measure audio level of speech after all processing.
426 _audioLevel.ComputeLevel(_audioFrame);
andrew@webrtc.orga7b57da2012-10-22 18:19:23 +0000427 return 0;
428}
429
430WebRtc_Word32
431TransmitMixer::DemuxAndMix()
432{
433 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
434 "TransmitMixer::DemuxAndMix()");
435
436 ScopedChannel sc(*_channelManagerPtr);
437 void* iterator(NULL);
438 Channel* channelPtr = sc.GetFirstChannel(iterator);
439 while (channelPtr != NULL)
440 {
441 if (channelPtr->InputIsOnHold())
442 {
443 channelPtr->UpdateLocalTimeStamp();
444 } else if (channelPtr->Sending())
445 {
446 // load temporary audioframe with current (mixed) microphone signal
447 AudioFrame tmpAudioFrame = _audioFrame;
448
449 channelPtr->Demultiplex(tmpAudioFrame);
450 channelPtr->PrepareEncodeAndSend(_mixingFrequency);
451 }
452 channelPtr = sc.GetNextChannel(iterator);
453 }
454 return 0;
455}
456
457WebRtc_Word32
458TransmitMixer::EncodeAndSend()
459{
460 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
461 "TransmitMixer::EncodeAndSend()");
462
463 ScopedChannel sc(*_channelManagerPtr);
464 void* iterator(NULL);
465 Channel* channelPtr = sc.GetFirstChannel(iterator);
466 while (channelPtr != NULL)
467 {
468 if (channelPtr->Sending() && !channelPtr->InputIsOnHold())
469 {
470 channelPtr->EncodeAndSend();
471 }
472 channelPtr = sc.GetNextChannel(iterator);
473 }
474 return 0;
475}
476
477WebRtc_UWord32 TransmitMixer::CaptureLevel() const
478{
479 return _captureLevel;
480}
481
482void
483TransmitMixer::UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs)
484{
485 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
486 "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)",
487 lengthMs);
488 _remainingMuteMicTimeMs = lengthMs;
489}
490
491WebRtc_Word32
492TransmitMixer::StopSend()
493{
494 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
495 "TransmitMixer::StopSend()");
496 _audioLevel.Clear();
497 return 0;
498}
499
500int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
501 const bool loop,
502 const FileFormats format,
503 const int startPosition,
504 const float volumeScaling,
505 const int stopPosition,
506 const CodecInst* codecInst)
507{
508 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
509 "TransmitMixer::StartPlayingFileAsMicrophone("
510 "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
511 " startPosition=%d, stopPosition=%d)", fileName, loop,
512 format, volumeScaling, startPosition, stopPosition);
513
514 if (_filePlaying)
515 {
516 _engineStatisticsPtr->SetLastError(
517 VE_ALREADY_PLAYING, kTraceWarning,
518 "StartPlayingFileAsMicrophone() is already playing");
519 return 0;
520 }
521
522 CriticalSectionScoped cs(&_critSect);
523
524 // Destroy the old instance
525 if (_filePlayerPtr)
526 {
527 _filePlayerPtr->RegisterModuleFileCallback(NULL);
528 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
529 _filePlayerPtr = NULL;
530 }
531
532 // Dynamically create the instance
533 _filePlayerPtr
534 = FilePlayer::CreateFilePlayer(_filePlayerId,
535 (const FileFormats) format);
536
537 if (_filePlayerPtr == NULL)
538 {
539 _engineStatisticsPtr->SetLastError(
540 VE_INVALID_ARGUMENT, kTraceError,
541 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
542 return -1;
543 }
544
545 const WebRtc_UWord32 notificationTime(0);
546
547 if (_filePlayerPtr->StartPlayingFile(
548 fileName,
549 loop,
550 startPosition,
551 volumeScaling,
552 notificationTime,
553 stopPosition,
554 (const CodecInst*) codecInst) != 0)
555 {
556 _engineStatisticsPtr->SetLastError(
557 VE_BAD_FILE, kTraceError,
558 "StartPlayingFile() failed to start file playout");
559 _filePlayerPtr->StopPlayingFile();
560 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
561 _filePlayerPtr = NULL;
562 return -1;
563 }
564
565 _filePlayerPtr->RegisterModuleFileCallback(this);
566 _filePlaying = true;
567
568 return 0;
569}
570
571int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
572 const FileFormats format,
573 const int startPosition,
574 const float volumeScaling,
575 const int stopPosition,
576 const CodecInst* codecInst)
577{
578 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
579 "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
580 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
581 format, volumeScaling, startPosition, stopPosition);
582
583 if (stream == NULL)
584 {
585 _engineStatisticsPtr->SetLastError(
586 VE_BAD_FILE, kTraceError,
587 "StartPlayingFileAsMicrophone() NULL as input stream");
588 return -1;
589 }
590
591 if (_filePlaying)
592 {
593 _engineStatisticsPtr->SetLastError(
594 VE_ALREADY_PLAYING, kTraceWarning,
595 "StartPlayingFileAsMicrophone() is already playing");
596 return 0;
597 }
598
599 CriticalSectionScoped cs(&_critSect);
600
601 // Destroy the old instance
602 if (_filePlayerPtr)
603 {
604 _filePlayerPtr->RegisterModuleFileCallback(NULL);
605 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
606 _filePlayerPtr = NULL;
607 }
608
609 // Dynamically create the instance
610 _filePlayerPtr
611 = FilePlayer::CreateFilePlayer(_filePlayerId,
612 (const FileFormats) format);
613
614 if (_filePlayerPtr == NULL)
615 {
616 _engineStatisticsPtr->SetLastError(
617 VE_INVALID_ARGUMENT, kTraceWarning,
618 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
619 return -1;
620 }
621
622 const WebRtc_UWord32 notificationTime(0);
623
624 if (_filePlayerPtr->StartPlayingFile(
625 (InStream&) *stream,
626 startPosition,
627 volumeScaling,
628 notificationTime,
629 stopPosition,
630 (const CodecInst*) codecInst) != 0)
631 {
632 _engineStatisticsPtr->SetLastError(
633 VE_BAD_FILE, kTraceError,
634 "StartPlayingFile() failed to start file playout");
635 _filePlayerPtr->StopPlayingFile();
636 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
637 _filePlayerPtr = NULL;
638 return -1;
639 }
640 _filePlayerPtr->RegisterModuleFileCallback(this);
641 _filePlaying = true;
642
643 return 0;
644}
645
646int TransmitMixer::StopPlayingFileAsMicrophone()
647{
648 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
649 "TransmitMixer::StopPlayingFileAsMicrophone()");
650
651 if (!_filePlaying)
652 {
653 _engineStatisticsPtr->SetLastError(
654 VE_INVALID_OPERATION, kTraceWarning,
655 "StopPlayingFileAsMicrophone() isnot playing");
656 return 0;
657 }
658
659 CriticalSectionScoped cs(&_critSect);
660
661 if (_filePlayerPtr->StopPlayingFile() != 0)
662 {
663 _engineStatisticsPtr->SetLastError(
664 VE_CANNOT_STOP_PLAYOUT, kTraceError,
665 "StopPlayingFile() couldnot stop playing file");
666 return -1;
667 }
668
669 _filePlayerPtr->RegisterModuleFileCallback(NULL);
670 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
671 _filePlayerPtr = NULL;
672 _filePlaying = false;
673
674 return 0;
675}
676
677int TransmitMixer::IsPlayingFileAsMicrophone() const
678{
679 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
680 "TransmitMixer::IsPlayingFileAsMicrophone()");
681 return _filePlaying;
682}
683
684int TransmitMixer::ScaleFileAsMicrophonePlayout(const float scale)
685{
686 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
687 "TransmitMixer::ScaleFileAsMicrophonePlayout(scale=%5.3f)",
688 scale);
689
690 CriticalSectionScoped cs(&_critSect);
691
692 if (!_filePlaying)
693 {
694 _engineStatisticsPtr->SetLastError(
695 VE_INVALID_OPERATION, kTraceError,
696 "ScaleFileAsMicrophonePlayout() isnot playing file");
697 return -1;
698 }
699
700 if ((_filePlayerPtr == NULL) ||
701 (_filePlayerPtr->SetAudioScaling(scale) != 0))
702 {
703 _engineStatisticsPtr->SetLastError(
704 VE_BAD_ARGUMENT, kTraceError,
705 "SetAudioScaling() failed to scale playout");
706 return -1;
707 }
708
709 return 0;
710}
711
712int TransmitMixer::StartRecordingMicrophone(const char* fileName,
713 const CodecInst* codecInst)
714{
715 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
716 "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
717 fileName);
718
719 if (_fileRecording)
720 {
721 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
722 "StartRecordingMicrophone() is already recording");
723 return 0;
724 }
725
726 FileFormats format;
727 const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
728 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
729
730 if (codecInst != NULL &&
731 (codecInst->channels < 0 || codecInst->channels > 2))
732 {
733 _engineStatisticsPtr->SetLastError(
734 VE_BAD_ARGUMENT, kTraceError,
735 "StartRecordingMicrophone() invalid compression");
736 return (-1);
737 }
738 if (codecInst == NULL)
739 {
740 format = kFileFormatPcm16kHzFile;
741 codecInst = &dummyCodec;
742 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
743 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
744 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
745 {
746 format = kFileFormatWavFile;
747 } else
748 {
749 format = kFileFormatCompressedFile;
750 }
751
752 CriticalSectionScoped cs(&_critSect);
753
754 // Destroy the old instance
755 if (_fileRecorderPtr)
756 {
757 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
758 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
759 _fileRecorderPtr = NULL;
760 }
761
762 _fileRecorderPtr =
763 FileRecorder::CreateFileRecorder(_fileRecorderId,
764 (const FileFormats) format);
765 if (_fileRecorderPtr == NULL)
766 {
767 _engineStatisticsPtr->SetLastError(
768 VE_INVALID_ARGUMENT, kTraceError,
769 "StartRecordingMicrophone() fileRecorder format isnot correct");
770 return -1;
771 }
772
773 if (_fileRecorderPtr->StartRecordingAudioFile(
774 fileName,
775 (const CodecInst&) *codecInst,
776 notificationTime) != 0)
777 {
778 _engineStatisticsPtr->SetLastError(
779 VE_BAD_FILE, kTraceError,
780 "StartRecordingAudioFile() failed to start file recording");
781 _fileRecorderPtr->StopRecording();
782 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
783 _fileRecorderPtr = NULL;
784 return -1;
785 }
786 _fileRecorderPtr->RegisterModuleFileCallback(this);
787 _fileRecording = true;
788
789 return 0;
790}
791
792int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
793 const CodecInst* codecInst)
794{
795 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
796 "TransmitMixer::StartRecordingMicrophone()");
797
798 if (_fileRecording)
799 {
800 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
801 "StartRecordingMicrophone() is already recording");
802 return 0;
803 }
804
805 FileFormats format;
806 const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
807 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
808
809 if (codecInst != NULL && codecInst->channels != 1)
810 {
811 _engineStatisticsPtr->SetLastError(
812 VE_BAD_ARGUMENT, kTraceError,
813 "StartRecordingMicrophone() invalid compression");
814 return (-1);
815 }
816 if (codecInst == NULL)
817 {
818 format = kFileFormatPcm16kHzFile;
819 codecInst = &dummyCodec;
820 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
821 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
822 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
823 {
824 format = kFileFormatWavFile;
825 } else
826 {
827 format = kFileFormatCompressedFile;
828 }
829
830 CriticalSectionScoped cs(&_critSect);
831
832 // Destroy the old instance
833 if (_fileRecorderPtr)
834 {
835 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
836 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
837 _fileRecorderPtr = NULL;
838 }
839
840 _fileRecorderPtr =
841 FileRecorder::CreateFileRecorder(_fileRecorderId,
842 (const FileFormats) format);
843 if (_fileRecorderPtr == NULL)
844 {
845 _engineStatisticsPtr->SetLastError(
846 VE_INVALID_ARGUMENT, kTraceError,
847 "StartRecordingMicrophone() fileRecorder format isnot correct");
848 return -1;
849 }
850
851 if (_fileRecorderPtr->StartRecordingAudioFile(*stream,
852 *codecInst,
853 notificationTime) != 0)
854 {
855 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
856 "StartRecordingAudioFile() failed to start file recording");
857 _fileRecorderPtr->StopRecording();
858 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
859 _fileRecorderPtr = NULL;
860 return -1;
861 }
862
863 _fileRecorderPtr->RegisterModuleFileCallback(this);
864 _fileRecording = true;
865
866 return 0;
867}
868
869
870int TransmitMixer::StopRecordingMicrophone()
871{
872 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
873 "TransmitMixer::StopRecordingMicrophone()");
874
875 if (!_fileRecording)
876 {
877 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
878 "StopRecordingMicrophone() isnot recording");
879 return 0;
880 }
881
882 CriticalSectionScoped cs(&_critSect);
883
884 if (_fileRecorderPtr->StopRecording() != 0)
885 {
886 _engineStatisticsPtr->SetLastError(
887 VE_STOP_RECORDING_FAILED, kTraceError,
888 "StopRecording(), could not stop recording");
889 return -1;
890 }
891 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
892 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
893 _fileRecorderPtr = NULL;
894 _fileRecording = false;
895
896 return 0;
897}
898
899int TransmitMixer::StartRecordingCall(const char* fileName,
900 const CodecInst* codecInst)
901{
902 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
903 "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
904
905 if (_fileCallRecording)
906 {
907 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
908 "StartRecordingCall() is already recording");
909 return 0;
910 }
911
912 FileFormats format;
913 const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
914 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
915
916 if (codecInst != NULL && codecInst->channels != 1)
917 {
918 _engineStatisticsPtr->SetLastError(
919 VE_BAD_ARGUMENT, kTraceError,
920 "StartRecordingCall() invalid compression");
921 return (-1);
922 }
923 if (codecInst == NULL)
924 {
925 format = kFileFormatPcm16kHzFile;
926 codecInst = &dummyCodec;
927 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
928 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
929 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
930 {
931 format = kFileFormatWavFile;
932 } else
933 {
934 format = kFileFormatCompressedFile;
935 }
936
937 CriticalSectionScoped cs(&_critSect);
938
939 // Destroy the old instance
940 if (_fileCallRecorderPtr)
941 {
942 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
943 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
944 _fileCallRecorderPtr = NULL;
945 }
946
947 _fileCallRecorderPtr
948 = FileRecorder::CreateFileRecorder(_fileCallRecorderId,
949 (const FileFormats) format);
950 if (_fileCallRecorderPtr == NULL)
951 {
952 _engineStatisticsPtr->SetLastError(
953 VE_INVALID_ARGUMENT, kTraceError,
954 "StartRecordingCall() fileRecorder format isnot correct");
955 return -1;
956 }
957
958 if (_fileCallRecorderPtr->StartRecordingAudioFile(
959 fileName,
960 (const CodecInst&) *codecInst,
961 notificationTime) != 0)
962 {
963 _engineStatisticsPtr->SetLastError(
964 VE_BAD_FILE, kTraceError,
965 "StartRecordingAudioFile() failed to start file recording");
966 _fileCallRecorderPtr->StopRecording();
967 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
968 _fileCallRecorderPtr = NULL;
969 return -1;
970 }
971 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
972 _fileCallRecording = true;
973
974 return 0;
975}
976
977int TransmitMixer::StartRecordingCall(OutStream* stream,
978 const CodecInst* codecInst)
979{
980 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
981 "TransmitMixer::StartRecordingCall()");
982
983 if (_fileCallRecording)
984 {
985 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
986 "StartRecordingCall() is already recording");
987 return 0;
988 }
989
990 FileFormats format;
991 const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
992 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
993
994 if (codecInst != NULL && codecInst->channels != 1)
995 {
996 _engineStatisticsPtr->SetLastError(
997 VE_BAD_ARGUMENT, kTraceError,
998 "StartRecordingCall() invalid compression");
999 return (-1);
1000 }
1001 if (codecInst == NULL)
1002 {
1003 format = kFileFormatPcm16kHzFile;
1004 codecInst = &dummyCodec;
1005 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
1006 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
1007 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
1008 {
1009 format = kFileFormatWavFile;
1010 } else
1011 {
1012 format = kFileFormatCompressedFile;
1013 }
1014
1015 CriticalSectionScoped cs(&_critSect);
1016
1017 // Destroy the old instance
1018 if (_fileCallRecorderPtr)
1019 {
1020 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
1021 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1022 _fileCallRecorderPtr = NULL;
1023 }
1024
1025 _fileCallRecorderPtr =
1026 FileRecorder::CreateFileRecorder(_fileCallRecorderId,
1027 (const FileFormats) format);
1028 if (_fileCallRecorderPtr == NULL)
1029 {
1030 _engineStatisticsPtr->SetLastError(
1031 VE_INVALID_ARGUMENT, kTraceError,
1032 "StartRecordingCall() fileRecorder format isnot correct");
1033 return -1;
1034 }
1035
1036 if (_fileCallRecorderPtr->StartRecordingAudioFile(*stream,
1037 *codecInst,
1038 notificationTime) != 0)
1039 {
1040 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
1041 "StartRecordingAudioFile() failed to start file recording");
1042 _fileCallRecorderPtr->StopRecording();
1043 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1044 _fileCallRecorderPtr = NULL;
1045 return -1;
1046 }
1047
1048 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
1049 _fileCallRecording = true;
1050
1051 return 0;
1052}
1053
1054int TransmitMixer::StopRecordingCall()
1055{
1056 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1057 "TransmitMixer::StopRecordingCall()");
1058
1059 if (!_fileCallRecording)
1060 {
1061 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1062 "StopRecordingCall() file isnot recording");
1063 return -1;
1064 }
1065
1066 CriticalSectionScoped cs(&_critSect);
1067
1068 if (_fileCallRecorderPtr->StopRecording() != 0)
1069 {
1070 _engineStatisticsPtr->SetLastError(
1071 VE_STOP_RECORDING_FAILED, kTraceError,
1072 "StopRecording(), could not stop recording");
1073 return -1;
1074 }
1075
1076 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
1077 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1078 _fileCallRecorderPtr = NULL;
1079 _fileCallRecording = false;
1080
1081 return 0;
1082}
1083
1084void
1085TransmitMixer::SetMixWithMicStatus(bool mix)
1086{
1087 _mixFileWithMicrophone = mix;
1088}
1089
1090int TransmitMixer::RegisterExternalMediaProcessing(
1091 VoEMediaProcess* object,
1092 ProcessingTypes type) {
1093 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1094 "TransmitMixer::RegisterExternalMediaProcessing()");
1095
1096 CriticalSectionScoped cs(&_callbackCritSect);
1097 if (!object) {
1098 return -1;
1099 }
1100
1101 // Store the callback object according to the processing type.
1102 if (type == kRecordingAllChannelsMixed) {
1103 external_postproc_ptr_ = object;
1104 } else if (type == kRecordingPreprocessing) {
1105 external_preproc_ptr_ = object;
1106 } else {
1107 return -1;
1108 }
1109 return 0;
1110}
1111
1112int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
1113 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1114 "TransmitMixer::DeRegisterExternalMediaProcessing()");
1115
1116 CriticalSectionScoped cs(&_callbackCritSect);
1117 if (type == kRecordingAllChannelsMixed) {
1118 external_postproc_ptr_ = NULL;
1119 } else if (type == kRecordingPreprocessing) {
1120 external_preproc_ptr_ = NULL;
1121 } else {
1122 return -1;
1123 }
1124 return 0;
1125}
1126
1127int
1128TransmitMixer::SetMute(bool enable)
1129{
1130 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1131 "TransmitMixer::SetMute(enable=%d)", enable);
1132 _mute = enable;
1133 return 0;
1134}
1135
1136bool
1137TransmitMixer::Mute() const
1138{
1139 return _mute;
1140}
1141
1142WebRtc_Word8 TransmitMixer::AudioLevel() const
1143{
1144 // Speech + file level [0,9]
1145 return _audioLevel.Level();
1146}
1147
1148WebRtc_Word16 TransmitMixer::AudioLevelFullRange() const
1149{
1150 // Speech + file level [0,32767]
1151 return _audioLevel.LevelFullRange();
1152}
1153
1154bool TransmitMixer::IsRecordingCall()
1155{
1156 return _fileCallRecording;
1157}
1158
1159bool TransmitMixer::IsRecordingMic()
1160{
1161
1162 return _fileRecording;
1163}
1164
1165// TODO(andrew): use RemixAndResample for this.
1166int TransmitMixer::GenerateAudioFrame(const int16_t audio[],
1167 int samples_per_channel,
1168 int num_channels,
1169 int sample_rate_hz)
1170{
1171 const int16_t* audio_ptr = audio;
1172 int16_t mono_audio[kMaxMonoDeviceDataSizeSamples];
1173 assert(samples_per_channel <= kMaxMonoDeviceDataSizeSamples);
1174 // If no stereo codecs are in use, we downmix a stereo stream from the
1175 // device early in the chain, before resampling.
1176 if (num_channels == 2 && !stereo_codec_) {
1177 AudioFrameOperations::StereoToMono(audio, samples_per_channel,
1178 mono_audio);
1179 audio_ptr = mono_audio;
1180 num_channels = 1;
1181 }
1182
1183 ResamplerType resampler_type = (num_channels == 1) ?
1184 kResamplerSynchronous : kResamplerSynchronousStereo;
1185
1186 if (_audioResampler.ResetIfNeeded(sample_rate_hz,
1187 _mixingFrequency,
1188 resampler_type) != 0)
1189 {
1190 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1191 "TransmitMixer::GenerateAudioFrame() unable to resample");
1192 return -1;
1193 }
1194 if (_audioResampler.Push(audio_ptr,
1195 samples_per_channel * num_channels,
1196 _audioFrame.data_,
1197 AudioFrame::kMaxDataSizeSamples,
1198 _audioFrame.samples_per_channel_) == -1)
1199 {
1200 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1201 "TransmitMixer::GenerateAudioFrame() resampling failed");
1202 return -1;
1203 }
1204
1205 _audioFrame.samples_per_channel_ /= num_channels;
1206 _audioFrame.id_ = _instanceId;
1207 _audioFrame.timestamp_ = -1;
1208 _audioFrame.sample_rate_hz_ = _mixingFrequency;
1209 _audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
1210 _audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
1211 _audioFrame.num_channels_ = num_channels;
1212
1213 return 0;
1214}
1215
1216WebRtc_Word32 TransmitMixer::RecordAudioToFile(
1217 const WebRtc_UWord32 mixingFrequency)
1218{
1219 CriticalSectionScoped cs(&_critSect);
1220 if (_fileRecorderPtr == NULL)
1221 {
1222 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1223 "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
1224 "exist");
1225 return -1;
1226 }
1227
1228 if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
1229 {
1230 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1231 "TransmitMixer::RecordAudioToFile() file recording"
1232 "failed");
1233 return -1;
1234 }
1235
1236 return 0;
1237}
1238
1239WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
1240 const int mixingFrequency)
1241{
1242 scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
1243
1244 int fileSamples(0);
1245 {
1246 CriticalSectionScoped cs(&_critSect);
1247 if (_filePlayerPtr == NULL)
1248 {
1249 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1250 VoEId(_instanceId, -1),
1251 "TransmitMixer::MixOrReplaceAudioWithFile()"
1252 "fileplayer doesnot exist");
1253 return -1;
1254 }
1255
1256 if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
1257 fileSamples,
1258 mixingFrequency) == -1)
1259 {
1260 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1261 "TransmitMixer::MixOrReplaceAudioWithFile() file"
1262 " mixing failed");
1263 return -1;
1264 }
1265 }
1266
1267 assert(_audioFrame.samples_per_channel_ == fileSamples);
1268
1269 if (_mixFileWithMicrophone)
1270 {
1271 // Currently file stream is always mono.
1272 // TODO(xians): Change the code when FilePlayer supports real stereo.
1273 Utility::MixWithSat(_audioFrame.data_,
1274 _audioFrame.num_channels_,
1275 fileBuffer.get(),
1276 1,
1277 fileSamples);
1278 } else
1279 {
1280 // Replace ACM audio with file.
1281 // Currently file stream is always mono.
1282 // TODO(xians): Change the code when FilePlayer supports real stereo.
1283 _audioFrame.UpdateFrame(-1,
1284 -1,
1285 fileBuffer.get(),
1286 fileSamples,
1287 mixingFrequency,
1288 AudioFrame::kNormalSpeech,
1289 AudioFrame::kVadUnknown,
1290 1);
1291 }
1292 return 0;
1293}
1294
1295WebRtc_Word32 TransmitMixer::APMProcessStream(
1296 const WebRtc_UWord16 totalDelayMS,
1297 const WebRtc_Word32 clockDrift,
1298 const WebRtc_UWord16 currentMicLevel)
1299{
1300 WebRtc_UWord16 captureLevel(currentMicLevel);
1301
1302 // Check if the number of incoming channels has changed. This has taken
1303 // both the capture device and send codecs into account.
1304 if (_audioFrame.num_channels_ !=
1305 _audioProcessingModulePtr->num_input_channels())
1306 {
1307 if (_audioProcessingModulePtr->set_num_channels(
1308 _audioFrame.num_channels_,
1309 _audioFrame.num_channels_))
1310 {
1311 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1312 "AudioProcessing::set_num_channels(%d, %d) => error",
1313 _audioFrame.num_channels_,
1314 _audioProcessingModulePtr->num_output_channels());
1315 }
1316 }
1317
1318 // If the frequency has changed we need to change APM settings
1319 // Sending side is "master"
1320 if (_audioProcessingModulePtr->sample_rate_hz() !=
1321 _audioFrame.sample_rate_hz_)
1322 {
1323 if (_audioProcessingModulePtr->set_sample_rate_hz(
1324 _audioFrame.sample_rate_hz_))
1325 {
1326 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1327 "AudioProcessing::set_sample_rate_hz(%u) => error",
1328 _audioFrame.sample_rate_hz_);
1329 }
1330 }
1331
1332 if (_audioProcessingModulePtr->set_stream_delay_ms(totalDelayMS) == -1)
1333 {
1334 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1335 "AudioProcessing::set_stream_delay_ms(%u) => error",
1336 totalDelayMS);
1337 }
1338 if (_audioProcessingModulePtr->gain_control()->set_stream_analog_level(
1339 captureLevel) == -1)
1340 {
1341 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1342 "AudioProcessing::set_stream_analog_level(%u) => error",
1343 captureLevel);
1344 }
1345 if (_audioProcessingModulePtr->echo_cancellation()->
1346 is_drift_compensation_enabled())
1347 {
1348 if (_audioProcessingModulePtr->echo_cancellation()->
1349 set_stream_drift_samples(clockDrift) == -1)
1350 {
1351 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1352 "AudioProcessing::set_stream_drift_samples(%u) => error",
1353 clockDrift);
1354 }
1355 }
1356 if (_audioProcessingModulePtr->ProcessStream(&_audioFrame) == -1)
1357 {
1358 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1359 "AudioProcessing::ProcessStream() => error");
1360 }
1361 captureLevel =
1362 _audioProcessingModulePtr->gain_control()->stream_analog_level();
1363
1364 // Store new capture level (only updated when analog AGC is enabled)
1365 _captureLevel = captureLevel;
1366
1367 // Log notifications
1368 if (_audioProcessingModulePtr->gain_control()->stream_is_saturated())
1369 {
1370 if (_saturationWarning == 1)
1371 {
1372 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1373 "TransmitMixer::APMProcessStream() pending "
1374 "saturation warning exists");
1375 }
1376 _saturationWarning = 1; // triggers callback from moduleprocess thread
1377 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1378 "TransmitMixer::APMProcessStream() VE_SATURATION_WARNING "
1379 "message has been posted for callback");
1380 }
1381
1382 return 0;
1383}
1384
1385#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1386int TransmitMixer::TypingDetection()
1387{
1388
1389 // We let the VAD determine if we're using this feature or not.
1390 if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown)
1391 {
1392 return (0);
1393 }
1394
1395 int keyPressed = EventWrapper::KeyPressed();
1396
1397 if (keyPressed < 0)
1398 {
1399 return (-1);
1400 }
1401
1402 if (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
1403 _timeActive++;
1404 else
1405 _timeActive = 0;
1406
1407 // Keep track if time since last typing event
1408 if (keyPressed)
1409 {
1410 _timeSinceLastTyping = 0;
1411 }
1412 else
1413 {
1414 ++_timeSinceLastTyping;
1415 }
1416
1417 if ((_timeSinceLastTyping < _typeEventDelay)
1418 && (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
1419 && (_timeActive < _timeWindow))
1420 {
1421 _penaltyCounter += _costPerTyping;
1422 if (_penaltyCounter > _reportingThreshold)
1423 {
1424 if (_typingNoiseWarning == 1)
1425 {
1426 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1427 VoEId(_instanceId, -1),
1428 "TransmitMixer::TypingDetection() pending "
1429 "noise-saturation warning exists");
1430 }
1431 // triggers callback from the module process thread
1432 _typingNoiseWarning = 1;
1433 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1434 "TransmitMixer::TypingDetection() "
1435 "VE_TYPING_NOISE_WARNING message has been posted for"
1436 "callback");
1437 }
1438 }
1439
1440 if (_penaltyCounter > 0)
1441 _penaltyCounter-=_penaltyDecay;
1442
1443 return (0);
1444}
1445#endif
1446
1447int TransmitMixer::GetMixingFrequency()
1448{
1449 assert(_mixingFrequency!=0);
1450 return (_mixingFrequency);
1451}
1452
1453#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1454int TransmitMixer::TimeSinceLastTyping(int &seconds)
1455{
1456 // We check in VoEAudioProcessingImpl that this is only called when
1457 // typing detection is active.
1458
1459 // Round to whole seconds
1460 seconds = (_timeSinceLastTyping + 50) / 100;
1461 return(0);
1462}
1463#endif
1464
1465#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1466int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
1467 int costPerTyping,
1468 int reportingThreshold,
1469 int penaltyDecay,
1470 int typeEventDelay)
1471{
1472 if(timeWindow != 0)
1473 _timeWindow = timeWindow;
1474 if(costPerTyping != 0)
1475 _costPerTyping = costPerTyping;
1476 if(reportingThreshold != 0)
1477 _reportingThreshold = reportingThreshold;
1478 if(penaltyDecay != 0)
1479 _penaltyDecay = penaltyDecay;
1480 if(typeEventDelay != 0)
1481 _typeEventDelay = typeEventDelay;
1482
1483
1484 return(0);
1485}
1486#endif
1487
1488void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
1489 swap_stereo_channels_ = enable;
1490}
1491
1492bool TransmitMixer::IsStereoChannelSwappingEnabled() {
1493 return swap_stereo_channels_;
1494}
1495
1496} // namespace voe
1497
1498} // namespace webrtc