Replace scoped_ptr with unique_ptr in webrtc/modules/audio_device/
BUG=webrtc:5520
Review URL: https://codereview.webrtc.org/1722083002
Cr-Commit-Position: refs/heads/master@{#11740}
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc
index 768047d..7655c82 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -11,6 +11,7 @@
#include <algorithm>
#include <limits>
#include <list>
+#include <memory>
#include <numeric>
#include <string>
#include <vector>
@@ -20,7 +21,6 @@
#include "webrtc/base/arraysize.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/format_macros.h"
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
@@ -144,7 +144,7 @@
private:
size_t file_size_in_bytes_;
int sample_rate_;
- rtc::scoped_ptr<int16_t[]> file_;
+ std::unique_ptr<int16_t[]> file_;
size_t file_pos_;
};
@@ -239,7 +239,7 @@
rtc::CriticalSection lock_;
const size_t frames_per_buffer_;
const size_t bytes_per_buffer_;
- rtc::scoped_ptr<AudioBufferList> fifo_;
+ std::unique_ptr<AudioBufferList> fifo_;
size_t largest_size_;
size_t total_written_elements_;
size_t write_count_;
@@ -491,7 +491,7 @@
size_t play_count_;
size_t rec_count_;
AudioStreamInterface* audio_stream_;
- rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
+ std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
};
// AudioDeviceTest test fixture.
@@ -688,11 +688,11 @@
return volume;
}
- rtc::scoped_ptr<EventWrapper> test_is_done_;
+ std::unique_ptr<EventWrapper> test_is_done_;
rtc::scoped_refptr<AudioDeviceModule> audio_device_;
AudioParameters playout_parameters_;
AudioParameters record_parameters_;
- rtc::scoped_ptr<BuildInfo> build_info_;
+ std::unique_ptr<BuildInfo> build_info_;
};
TEST_F(AudioDeviceTest, ConstructDestruct) {
@@ -935,7 +935,7 @@
NiceMock<MockAudioTransport> mock(kPlayout);
const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
std::string file_name = GetFileName(playout_sample_rate());
- rtc::scoped_ptr<FileAudioStream> file_audio_stream(
+ std::unique_ptr<FileAudioStream> file_audio_stream(
new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
mock.HandleCallbacks(test_is_done_.get(),
file_audio_stream.get(),
@@ -964,7 +964,7 @@
EXPECT_EQ(record_channels(), playout_channels());
EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
- rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
+ std::unique_ptr<FifoAudioStream> fifo_audio_stream(
new FifoAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(),
fifo_audio_stream.get(),
@@ -994,7 +994,7 @@
EXPECT_EQ(record_channels(), playout_channels());
EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
- rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+ std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(),
latency_audio_stream.get(),
diff --git a/webrtc/modules/audio_device/android/audio_manager.cc b/webrtc/modules/audio_device/android/audio_manager.cc
index 1d08a6a..9174a5b 100644
--- a/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/webrtc/modules/audio_device/android/audio_manager.cc
@@ -16,7 +16,6 @@
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/utility/include/helpers_android.h"
@@ -32,7 +31,7 @@
// AudioManager::JavaAudioManager implementation
AudioManager::JavaAudioManager::JavaAudioManager(
NativeRegistration* native_reg,
- rtc::scoped_ptr<GlobalRef> audio_manager)
+ std::unique_ptr<GlobalRef> audio_manager)
: audio_manager_(std::move(audio_manager)),
init_(native_reg->GetMethodId("init", "()Z")),
dispose_(native_reg->GetMethodId("dispose", "()V")),
@@ -67,7 +66,7 @@
// AudioManager implementation
AudioManager::AudioManager()
- : j_environment_(JVM::GetInstance()->environment()),
+ : j_environment_(rtc::ScopedToUnique(JVM::GetInstance()->environment())),
audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
initialized_(false),
hardware_aec_(false),
@@ -81,14 +80,14 @@
{"nativeCacheAudioParameters",
"(IIZZZZIIJ)V",
reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
- j_native_registration_ = j_environment_->RegisterNatives(
+ j_native_registration_ = rtc::ScopedToUnique(j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioManager",
- native_methods, arraysize(native_methods));
+ native_methods, arraysize(native_methods)));
j_audio_manager_.reset(new JavaAudioManager(
j_native_registration_.get(),
- j_native_registration_->NewObject(
+ rtc::ScopedToUnique(j_native_registration_->NewObject(
"<init>", "(Landroid/content/Context;J)V",
- JVM::GetInstance()->context(), PointerTojlong(this))));
+ JVM::GetInstance()->context(), PointerTojlong(this)))));
}
AudioManager::~AudioManager() {
diff --git a/webrtc/modules/audio_device/android/audio_manager.h b/webrtc/modules/audio_device/android/audio_manager.h
index 26caf61..b4264a6 100644
--- a/webrtc/modules/audio_device/android/audio_manager.h
+++ b/webrtc/modules/audio_device/android/audio_manager.h
@@ -11,9 +11,10 @@
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
+#include <memory>
+
#include <jni.h>
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/audio_device_config.h"
@@ -39,7 +40,7 @@
class JavaAudioManager {
public:
JavaAudioManager(NativeRegistration* native_registration,
- rtc::scoped_ptr<GlobalRef> audio_manager);
+ std::unique_ptr<GlobalRef> audio_manager);
~JavaAudioManager();
bool Init();
@@ -48,7 +49,7 @@
bool IsDeviceBlacklistedForOpenSLESUsage();
private:
- rtc::scoped_ptr<GlobalRef> audio_manager_;
+ std::unique_ptr<GlobalRef> audio_manager_;
jmethodID init_;
jmethodID dispose_;
jmethodID is_communication_mode_enabled_;
@@ -128,13 +129,13 @@
AttachCurrentThreadIfNeeded attach_thread_if_needed_;
// Wraps the JNI interface pointer and methods associated with it.
- rtc::scoped_ptr<JNIEnvironment> j_environment_;
+ std::unique_ptr<JNIEnvironment> j_environment_;
// Contains factory method for creating the Java object.
- rtc::scoped_ptr<NativeRegistration> j_native_registration_;
+ std::unique_ptr<NativeRegistration> j_native_registration_;
// Wraps the Java specific parts of the AudioManager.
- rtc::scoped_ptr<AudioManager::JavaAudioManager> j_audio_manager_;
+ std::unique_ptr<AudioManager::JavaAudioManager> j_audio_manager_;
AudioDeviceModule::AudioLayer audio_layer_;
diff --git a/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
index ddae730..d1107e0 100644
--- a/webrtc/modules/audio_device/android/audio_manager_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -8,9 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <memory>
+
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/format_macros.h"
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/build_info.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/android/ensure_initialized.h"
@@ -43,7 +44,7 @@
EXPECT_NE(0, audio_manager()->GetDelayEstimateInMilliseconds());
}
- rtc::scoped_ptr<AudioManager> audio_manager_;
+ std::unique_ptr<AudioManager> audio_manager_;
AudioParameters playout_parameters_;
AudioParameters record_parameters_;
};
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc
index 5dda724..5ff5997 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -31,7 +31,7 @@
// AudioRecordJni::JavaAudioRecord implementation.
AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
NativeRegistration* native_reg,
- rtc::scoped_ptr<GlobalRef> audio_record)
+ std::unique_ptr<GlobalRef> audio_record)
: audio_record_(std::move(audio_record)),
init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
@@ -74,7 +74,7 @@
// AudioRecordJni implementation.
AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
- : j_environment_(JVM::GetInstance()->environment()),
+ : j_environment_(rtc::ScopedToUnique(JVM::GetInstance()->environment())),
audio_manager_(audio_manager),
audio_parameters_(audio_manager->GetRecordAudioParameters()),
total_delay_in_milliseconds_(0),
@@ -93,14 +93,14 @@
&webrtc::AudioRecordJni::CacheDirectBufferAddress)},
{"nativeDataIsRecorded", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioRecordJni::DataIsRecorded)}};
- j_native_registration_ = j_environment_->RegisterNatives(
+ j_native_registration_ = rtc::ScopedToUnique(j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioRecord",
- native_methods, arraysize(native_methods));
+ native_methods, arraysize(native_methods)));
j_audio_record_.reset(new JavaAudioRecord(
j_native_registration_.get(),
- j_native_registration_->NewObject(
+ rtc::ScopedToUnique(j_native_registration_->NewObject(
"<init>", "(Landroid/content/Context;J)V",
- JVM::GetInstance()->context(), PointerTojlong(this))));
+ JVM::GetInstance()->context(), PointerTojlong(this)))));
// Detach from this thread since we want to use the checker to verify calls
// from the Java based audio thread.
thread_checker_java_.DetachFromThread();
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h
index 766316a..1319493 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_RECORD_JNI_H_
+#include <memory>
+
#include <jni.h>
#include "webrtc/base/thread_checker.h"
@@ -46,7 +48,7 @@
class JavaAudioRecord {
public:
JavaAudioRecord(NativeRegistration* native_registration,
- rtc::scoped_ptr<GlobalRef> audio_track);
+ std::unique_ptr<GlobalRef> audio_track);
~JavaAudioRecord();
int InitRecording(int sample_rate, size_t channels);
@@ -57,7 +59,7 @@
bool EnableBuiltInNS(bool enable);
private:
- rtc::scoped_ptr<GlobalRef> audio_record_;
+ std::unique_ptr<GlobalRef> audio_record_;
jmethodID init_recording_;
jmethodID start_recording_;
jmethodID stop_recording_;
@@ -117,13 +119,13 @@
AttachCurrentThreadIfNeeded attach_thread_if_needed_;
// Wraps the JNI interface pointer and methods associated with it.
- rtc::scoped_ptr<JNIEnvironment> j_environment_;
+ std::unique_ptr<JNIEnvironment> j_environment_;
// Contains factory method for creating the Java object.
- rtc::scoped_ptr<NativeRegistration> j_native_registration_;
+ std::unique_ptr<NativeRegistration> j_native_registration_;
// Wraps the Java specific parts of the AudioRecordJni class.
- rtc::scoped_ptr<AudioRecordJni::JavaAudioRecord> j_audio_record_;
+ std::unique_ptr<AudioRecordJni::JavaAudioRecord> j_audio_record_;
// Raw pointer to the audio manger.
const AudioManager* audio_manager_;
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc
index 057e016..5bf3a5b 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -31,7 +31,7 @@
// AudioTrackJni::JavaAudioTrack implementation.
AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
NativeRegistration* native_reg,
- rtc::scoped_ptr<GlobalRef> audio_track)
+ std::unique_ptr<GlobalRef> audio_track)
: audio_track_(std::move(audio_track)),
init_playout_(native_reg->GetMethodId("initPlayout", "(II)V")),
start_playout_(native_reg->GetMethodId("startPlayout", "()Z")),
@@ -69,7 +69,7 @@
// TODO(henrika): possible extend usage of AudioManager and add it as member.
AudioTrackJni::AudioTrackJni(AudioManager* audio_manager)
- : j_environment_(JVM::GetInstance()->environment()),
+ : j_environment_(rtc::ScopedToUnique(JVM::GetInstance()->environment())),
audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
direct_buffer_address_(nullptr),
direct_buffer_capacity_in_bytes_(0),
@@ -86,14 +86,14 @@
&webrtc::AudioTrackJni::CacheDirectBufferAddress)},
{"nativeGetPlayoutData", "(IJ)V",
reinterpret_cast<void*>(&webrtc::AudioTrackJni::GetPlayoutData)}};
- j_native_registration_ = j_environment_->RegisterNatives(
+ j_native_registration_ = rtc::ScopedToUnique(j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioTrack",
- native_methods, arraysize(native_methods));
+ native_methods, arraysize(native_methods)));
j_audio_track_.reset(new JavaAudioTrack(
j_native_registration_.get(),
- j_native_registration_->NewObject(
+ rtc::ScopedToUnique(j_native_registration_->NewObject(
"<init>", "(Landroid/content/Context;J)V",
- JVM::GetInstance()->context(), PointerTojlong(this))));
+ JVM::GetInstance()->context(), PointerTojlong(this)))));
// Detach from this thread since we want to use the checker to verify calls
// from the Java based audio thread.
thread_checker_java_.DetachFromThread();
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h
index 067dc6c..32b0b77 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_TRACK_JNI_H_
+#include <memory>
+
#include <jni.h>
#include "webrtc/base/thread_checker.h"
@@ -42,7 +44,7 @@
class JavaAudioTrack {
public:
JavaAudioTrack(NativeRegistration* native_registration,
- rtc::scoped_ptr<GlobalRef> audio_track);
+ std::unique_ptr<GlobalRef> audio_track);
~JavaAudioTrack();
void InitPlayout(int sample_rate, int channels);
@@ -53,7 +55,7 @@
int GetStreamVolume();
private:
- rtc::scoped_ptr<GlobalRef> audio_track_;
+ std::unique_ptr<GlobalRef> audio_track_;
jmethodID init_playout_;
jmethodID start_playout_;
jmethodID stop_playout_;
@@ -113,13 +115,13 @@
AttachCurrentThreadIfNeeded attach_thread_if_needed_;
// Wraps the JNI interface pointer and methods associated with it.
- rtc::scoped_ptr<JNIEnvironment> j_environment_;
+ std::unique_ptr<JNIEnvironment> j_environment_;
// Contains factory method for creating the Java object.
- rtc::scoped_ptr<NativeRegistration> j_native_registration_;
+ std::unique_ptr<NativeRegistration> j_native_registration_;
// Wraps the Java specific parts of the AudioTrackJni class.
- rtc::scoped_ptr<AudioTrackJni::JavaAudioTrack> j_audio_track_;
+ std::unique_ptr<AudioTrackJni::JavaAudioTrack> j_audio_track_;
// Contains audio parameters provided to this class at construction by the
// AudioManager.
diff --git a/webrtc/modules/audio_device/android/build_info.cc b/webrtc/modules/audio_device/android/build_info.cc
index 6289697..c6cecc9 100644
--- a/webrtc/modules/audio_device/android/build_info.cc
+++ b/webrtc/modules/audio_device/android/build_info.cc
@@ -15,7 +15,7 @@
namespace webrtc {
BuildInfo::BuildInfo()
- : j_environment_(JVM::GetInstance()->environment()),
+ : j_environment_(rtc::ScopedToUnique(JVM::GetInstance()->environment())),
j_build_info_(JVM::GetInstance()->GetClass(
"org/webrtc/voiceengine/BuildInfo")) {
}
diff --git a/webrtc/modules/audio_device/android/build_info.h b/webrtc/modules/audio_device/android/build_info.h
index 1490fa0..4a4c30e 100644
--- a/webrtc/modules/audio_device/android/build_info.h
+++ b/webrtc/modules/audio_device/android/build_info.h
@@ -12,6 +12,7 @@
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
#include <jni.h>
+#include <memory>
#include <string>
#include "webrtc/modules/utility/include/jvm_android.h"
@@ -55,7 +56,7 @@
// Provides access to the JNIEnv interface pointer and the JavaToStdString()
// method which is used to translate Java strings to std strings.
- rtc::scoped_ptr<JNIEnvironment> j_environment_;
+ std::unique_ptr<JNIEnvironment> j_environment_;
// Holds the jclass object and provides access to CallStaticObjectMethod().
// Used by GetStringFromJava() during construction only.
diff --git a/webrtc/modules/audio_device/android/opensles_player.h b/webrtc/modules/audio_device/android/opensles_player.h
index fa9e931..084546d 100644
--- a/webrtc/modules/audio_device/android/opensles_player.h
+++ b/webrtc/modules/audio_device/android/opensles_player.h
@@ -11,11 +11,12 @@
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
+#include <memory>
+
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <SLES/OpenSLES_AndroidConfiguration.h>
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
@@ -150,7 +151,7 @@
// Queue of audio buffers to be used by the player object for rendering
// audio. They will be used in a Round-robin way and the size of each buffer
// is given by FineAudioBuffer::RequiredBufferSizeBytes().
- rtc::scoped_ptr<SLint8[]> audio_buffers_[kNumOfOpenSLESBuffers];
+ std::unique_ptr<SLint8[]> audio_buffers_[kNumOfOpenSLESBuffers];
// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
// in chunks of 10ms. It then allows for this data to be pulled in
@@ -162,7 +163,7 @@
// in each callback (one every 5ms). This class can then ask for 240 and the
// FineAudioBuffer will ask WebRTC for new data only every second callback
// and also cach non-utilized audio.
- rtc::scoped_ptr<FineAudioBuffer> fine_buffer_;
+ std::unique_ptr<FineAudioBuffer> fine_buffer_;
// Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
// Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
diff --git a/webrtc/modules/audio_device/audio_device_impl.h b/webrtc/modules/audio_device/audio_device_impl.h
index 5109693..c7312bf 100644
--- a/webrtc/modules/audio_device/audio_device_impl.h
+++ b/webrtc/modules/audio_device/audio_device_impl.h
@@ -13,8 +13,9 @@
#if defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE)
+#include <memory>
+
#include "webrtc/base/checks.h"
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/audio_device_buffer.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
@@ -216,7 +217,7 @@
AudioDeviceBuffer _audioDeviceBuffer;
#if defined(WEBRTC_ANDROID)
- rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
+ std::unique_ptr<AudioManager> _audioManagerAndroid;
#endif
int32_t _id;
AudioLayer _platformAudioLayer;
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.h b/webrtc/modules/audio_device/dummy/file_audio_device.h
index 7717940..be6fa3d 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.h
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.h
@@ -13,6 +13,7 @@
#include <stdio.h>
+#include <memory>
#include <string>
#include "webrtc/modules/audio_device/audio_device_generic.h"
@@ -182,8 +183,8 @@
size_t _playoutFramesIn10MS;
// TODO(pbos): Make plain members instead of pointers and stop resetting them.
- rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadRec;
- rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadPlay;
+ std::unique_ptr<rtc::PlatformThread> _ptrThreadRec;
+ std::unique_ptr<rtc::PlatformThread> _ptrThreadPlay;
bool _playing;
bool _recording;
diff --git a/webrtc/modules/audio_device/fine_audio_buffer.h b/webrtc/modules/audio_device/fine_audio_buffer.h
index 4ab5cd2..478e0c6 100644
--- a/webrtc/modules/audio_device/fine_audio_buffer.h
+++ b/webrtc/modules/audio_device/fine_audio_buffer.h
@@ -11,7 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
#define WEBRTC_MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
-#include "webrtc/base/scoped_ptr.h"
+#include <memory>
+
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -86,14 +87,14 @@
// Number of audio bytes per 10ms.
const size_t bytes_per_10_ms_;
// Storage for output samples that are not yet asked for.
- rtc::scoped_ptr<int8_t[]> playout_cache_buffer_;
+ std::unique_ptr<int8_t[]> playout_cache_buffer_;
// Location of first unread output sample.
size_t playout_cached_buffer_start_;
// Number of bytes stored in output (contain samples to be played out) cache.
size_t playout_cached_bytes_;
// Storage for input samples that are about to be delivered to the WebRTC
// ADB or remains from the last successful delivery of a 10ms audio buffer.
- rtc::scoped_ptr<int8_t[]> record_cache_buffer_;
+ std::unique_ptr<int8_t[]> record_cache_buffer_;
// Required (max) size in bytes of the |record_cache_buffer_|.
const size_t required_record_buffer_size_bytes_;
// Number of bytes in input (contains recorded samples) cache.
diff --git a/webrtc/modules/audio_device/fine_audio_buffer_unittest.cc b/webrtc/modules/audio_device/fine_audio_buffer_unittest.cc
index 6666364..ef189d1 100644
--- a/webrtc/modules/audio_device/fine_audio_buffer_unittest.cc
+++ b/webrtc/modules/audio_device/fine_audio_buffer_unittest.cc
@@ -15,7 +15,6 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/mock_audio_device_buffer.h"
using ::testing::_;
@@ -118,9 +117,9 @@
FineAudioBuffer fine_buffer(&audio_device_buffer, kFrameSizeBytes,
sample_rate);
- rtc::scoped_ptr<int8_t[]> out_buffer;
+ std::unique_ptr<int8_t[]> out_buffer;
out_buffer.reset(new int8_t[fine_buffer.RequiredPlayoutBufferSizeBytes()]);
- rtc::scoped_ptr<int8_t[]> in_buffer;
+ std::unique_ptr<int8_t[]> in_buffer;
in_buffer.reset(new int8_t[kFrameSizeBytes]);
for (int i = 0; i < kNumberOfFrames; ++i) {
fine_buffer.GetPlayoutData(out_buffer.get());
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.h b/webrtc/modules/audio_device/ios/audio_device_ios.h
index c4eb0d6..7320886 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -11,9 +11,10 @@
#ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
#define WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
+#include <memory>
+
#include <AudioUnit/AudioUnit.h>
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
@@ -256,11 +257,11 @@
// can provide audio data frames of size 128 and these are accumulated until
// enough data to supply one 10ms call exists. This 10ms chunk is then sent
// to WebRTC and the remaining part is stored.
- rtc::scoped_ptr<FineAudioBuffer> fine_audio_buffer_;
+ std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
// Extra audio buffer to be used by the playout side for rendering audio.
// The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes().
- rtc::scoped_ptr<SInt8[]> playout_audio_buffer_;
+ std::unique_ptr<SInt8[]> playout_audio_buffer_;
// Provides a mechanism for encapsulating one or more buffers of audio data.
// Only used on the recording side.
@@ -268,7 +269,7 @@
// Temporary storage for recorded data. AudioUnitRender() renders into this
// array as soon as a frame of the desired buffer size has been recorded.
- rtc::scoped_ptr<SInt8[]> record_audio_buffer_;
+ std::unique_ptr<SInt8[]> record_audio_buffer_;
// Set to 1 when recording is active and 0 otherwise.
volatile int recording_;
diff --git a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
index 076a674..4dfb073 100644
--- a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
+++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -11,6 +11,7 @@
#include <algorithm>
#include <limits>
#include <list>
+#include <memory>
#include <numeric>
#include <string>
#include <vector>
@@ -21,7 +22,6 @@
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
-#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/modules/audio_device/audio_device_impl.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
@@ -145,7 +145,7 @@
private:
size_t file_size_in_bytes_;
int sample_rate_;
- rtc::scoped_ptr<int16_t[]> file_;
+ std::unique_ptr<int16_t[]> file_;
size_t file_pos_;
};
@@ -233,7 +233,7 @@
rtc::CriticalSection lock_;
const size_t frames_per_buffer_;
const size_t bytes_per_buffer_;
- rtc::scoped_ptr<AudioBufferList> fifo_;
+ std::unique_ptr<AudioBufferList> fifo_;
size_t largest_size_;
size_t total_written_elements_;
size_t write_count_;
@@ -593,7 +593,7 @@
EXPECT_FALSE(audio_device()->Recording());
}
- rtc::scoped_ptr<EventWrapper> test_is_done_;
+ std::unique_ptr<EventWrapper> test_is_done_;
rtc::scoped_refptr<AudioDeviceModule> audio_device_;
AudioParameters playout_parameters_;
AudioParameters record_parameters_;
@@ -761,7 +761,7 @@
NiceMock<MockAudioTransport> mock(kPlayout);
const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
std::string file_name = GetFileName(playout_sample_rate());
- rtc::scoped_ptr<FileAudioStream> file_audio_stream(
+ std::unique_ptr<FileAudioStream> file_audio_stream(
new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
mock.HandleCallbacks(test_is_done_.get(), file_audio_stream.get(),
num_callbacks);
@@ -795,7 +795,7 @@
EXPECT_EQ(record_channels(), playout_channels());
EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
- rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
+ std::unique_ptr<FifoAudioStream> fifo_audio_stream(
new FifoAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(), fifo_audio_stream.get(),
kFullDuplexTimeInSec * kNumCallbacksPerSecond);
@@ -824,7 +824,7 @@
EXPECT_EQ(record_channels(), playout_channels());
EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
- rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+ std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(), latency_audio_stream.get(),
kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
diff --git a/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h b/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
index 4a1a519..340e963 100644
--- a/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
+++ b/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H
+#include <memory>
+
#include "webrtc/base/platform_thread.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
@@ -187,8 +189,8 @@
// TODO(pbos): Make plain members and start/stop instead of resetting these
// pointers. A thread can be reused.
- rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadRec;
- rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadPlay;
+ std::unique_ptr<rtc::PlatformThread> _ptrThreadRec;
+ std::unique_ptr<rtc::PlatformThread> _ptrThreadPlay;
int32_t _id;
diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
index de8df0b..a0a15e5 100644
--- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
+++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
+#include <memory>
+
#include "webrtc/base/platform_thread.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
@@ -284,9 +286,9 @@
EventWrapper& _recStartEvent;
EventWrapper& _playStartEvent;
- // TODO(pbos): Remove scoped_ptr and use directly without resetting.
- rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadPlay;
- rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadRec;
+ // TODO(pbos): Remove unique_ptr and use directly without resetting.
+ std::unique_ptr<rtc::PlatformThread> _ptrThreadPlay;
+ std::unique_ptr<rtc::PlatformThread> _ptrThreadRec;
int32_t _id;
AudioMixerManagerLinuxPulse _mixerManager;
diff --git a/webrtc/modules/audio_device/mac/audio_device_mac.h b/webrtc/modules/audio_device/mac/audio_device_mac.h
index ca3a519..102c67e 100644
--- a/webrtc/modules/audio_device/mac/audio_device_mac.h
+++ b/webrtc/modules/audio_device/mac/audio_device_mac.h
@@ -11,7 +11,8 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_MAC_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_MAC_H
-#include "webrtc/base/scoped_ptr.h"
+#include <memory>
+
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/audio_device/mac/audio_mixer_manager_mac.h"
@@ -290,10 +291,10 @@
// TODO(pbos): Replace with direct members, just start/stop, no need to
// recreate the thread.
// Only valid/running between calls to StartRecording and StopRecording.
- rtc::scoped_ptr<rtc::PlatformThread> capture_worker_thread_;
+ std::unique_ptr<rtc::PlatformThread> capture_worker_thread_;
// Only valid/running between calls to StartPlayout and StopPlayout.
- rtc::scoped_ptr<rtc::PlatformThread> render_worker_thread_;
+ std::unique_ptr<rtc::PlatformThread> render_worker_thread_;
int32_t _id;
diff --git a/webrtc/modules/audio_device/test/audio_device_test_api.cc b/webrtc/modules/audio_device/test/audio_device_test_api.cc
index f2861ec..a564e35 100644
--- a/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -12,6 +12,8 @@
#include <stdio.h>
#include <string.h>
+#include <memory>
+
#include "webrtc/modules/audio_device/test/audio_device_test_defines.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -140,7 +142,8 @@
virtual ~AudioDeviceAPITest() {}
static void SetUpTestCase() {
- process_thread_ = ProcessThread::Create("ProcessThread");
+ process_thread_ =
+ rtc::ScopedToUnique(ProcessThread::Create("ProcessThread"));
process_thread_->Start();
// Windows:
@@ -300,7 +303,7 @@
// TODO(henrika): Get rid of globals.
static bool linux_alsa_;
- static rtc::scoped_ptr<ProcessThread> process_thread_;
+ static std::unique_ptr<ProcessThread> process_thread_;
static AudioDeviceModule* audio_device_;
static AudioTransportAPI* audio_transport_;
static AudioEventObserverAPI* event_observer_;
@@ -308,7 +311,7 @@
// Must be initialized like this to handle static SetUpTestCase() above.
bool AudioDeviceAPITest::linux_alsa_ = false;
-rtc::scoped_ptr<ProcessThread> AudioDeviceAPITest::process_thread_;
+std::unique_ptr<ProcessThread> AudioDeviceAPITest::process_thread_;
AudioDeviceModule* AudioDeviceAPITest::audio_device_ = NULL;
AudioTransportAPI* AudioDeviceAPITest::audio_transport_ = NULL;
AudioEventObserverAPI* AudioDeviceAPITest::event_observer_ = NULL;
diff --git a/webrtc/modules/audio_device/test/func_test_manager.cc b/webrtc/modules/audio_device/test/func_test_manager.cc
index 0a2963e..bb7686c 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -594,8 +594,8 @@
int32_t FuncTestManager::Init()
{
- EXPECT_TRUE((_processThread = ProcessThread::Create("ProcessThread")) !=
- NULL);
+ EXPECT_TRUE((_processThread = rtc::ScopedToUnique(
+ ProcessThread::Create("ProcessThread"))) != NULL);
if (_processThread == NULL)
{
return -1;
@@ -832,8 +832,8 @@
// ==================================================
// Next, try to make fresh start with new audio layer
- EXPECT_TRUE((_processThread = ProcessThread::Create("ProcessThread")) !=
- NULL);
+ EXPECT_TRUE((_processThread = rtc::ScopedToUnique(
+ ProcessThread::Create("ProcessThread"))) != NULL);
if (_processThread == NULL)
{
return -1;
diff --git a/webrtc/modules/audio_device/test/func_test_manager.h b/webrtc/modules/audio_device/test/func_test_manager.h
index b7cc81c..2b8a19b 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.h
+++ b/webrtc/modules/audio_device/test/func_test_manager.h
@@ -12,6 +12,7 @@
#define WEBRTC_AUDIO_DEVICE_FUNC_TEST_MANAGER_H
#include <list>
+#include <memory>
#include <string>
#include "webrtc/common_audio/resampler/include/resampler.h"
@@ -206,7 +207,7 @@
std::string _playoutFile16;
std::string _playoutFile8;
- rtc::scoped_ptr<ProcessThread> _processThread;
+ std::unique_ptr<ProcessThread> _processThread;
AudioDeviceModule* _audioDevice;
AudioEventObserver* _audioEventObserver;
AudioTransportImpl* _audioTransport;
diff --git a/webrtc/modules/audio_device/win/audio_device_wave_win.h b/webrtc/modules/audio_device/win/audio_device_wave_win.h
index a1cfc6a..402d575 100644
--- a/webrtc/modules/audio_device/win/audio_device_wave_win.h
+++ b/webrtc/modules/audio_device/win/audio_device_wave_win.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H
+#include <memory>
+
#include "webrtc/base/platform_thread.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/audio_device/win/audio_mixer_manager_win.h"
@@ -222,8 +224,8 @@
HANDLE _hShutdownSetVolumeEvent;
HANDLE _hSetCaptureVolumeEvent;
- // TODO(pbos): Remove scoped_ptr usage and use PlatformThread directly
- rtc::scoped_ptr<rtc::PlatformThread> _ptrThread;
+ // TODO(pbos): Remove unique_ptr usage and use PlatformThread directly
+ std::unique_ptr<rtc::PlatformThread> _ptrThread;
CriticalSectionWrapper& _critSectCb;