Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | package android.media.cts; |
| 18 | |
Jason Parks | 62563bc | 2014-11-07 14:52:56 -0600 | [diff] [blame] | 19 | import android.content.pm.PackageManager; |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 20 | import android.cts.util.CtsAndroidTestCase; |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 21 | import android.media.AudioAttributes; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 22 | import android.media.AudioFormat; |
| 23 | import android.media.AudioManager; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 24 | import android.media.AudioTimestamp; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 25 | import android.media.AudioTrack; |
Wei Jia | 86ab058 | 2015-05-08 15:27:46 -0700 | [diff] [blame] | 26 | import android.media.PlaybackParams; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 27 | import android.util.Log; |
Jean-Michel Trivi | b164742 | 2015-04-03 11:17:31 -0700 | [diff] [blame] | 28 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 29 | import com.android.cts.util.ReportLog; |
| 30 | import com.android.cts.util.ResultType; |
| 31 | import com.android.cts.util.ResultUnit; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 32 | |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 33 | import java.nio.ByteOrder; |
Keun young Park | 88c3175 | 2012-06-26 11:46:02 -0700 | [diff] [blame] | 34 | import java.nio.ByteBuffer; |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 35 | import java.nio.ShortBuffer; |
| 36 | import java.nio.FloatBuffer; |
Keun young Park | 88c3175 | 2012-06-26 11:46:02 -0700 | [diff] [blame] | 37 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 38 | public class AudioTrackTest extends CtsAndroidTestCase { |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 39 | private String TAG = "AudioTrackTest"; |
Eunice | d26929a | 2011-01-05 23:35:19 -0800 | [diff] [blame] | 40 | private final long WAIT_MSEC = 200; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 41 | private final int OFFSET_DEFAULT = 0; |
| 42 | private final int OFFSET_NEGATIVE = -10; |
| 43 | |
| 44 | private void log(String testName, String message) { |
| 45 | Log.v(TAG, "[" + testName + "] " + message); |
| 46 | } |
| 47 | |
| 48 | private void loge(String testName, String message) { |
| 49 | Log.e(TAG, "[" + testName + "] " + message); |
| 50 | } |
| 51 | |
| 52 | // ----------------------------------------------------------------- |
| 53 | // private class to hold test results |
| 54 | private static class TestResults { |
| 55 | public boolean mResult = false; |
| 56 | public String mResultLog = ""; |
| 57 | |
| 58 | public TestResults(boolean b, String s) { |
| 59 | mResult = b; |
| 60 | mResultLog = s; |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | // ----------------------------------------------------------------- |
| 65 | // generic test methods |
| 66 | public TestResults constructorTestMultiSampleRate( |
| 67 | // parameters tested by this method |
| 68 | int _inTest_streamType, int _inTest_mode, int _inTest_config, int _inTest_format, |
| 69 | // parameter-dependent expected results |
| 70 | int _expected_stateForMode) { |
| 71 | |
| 72 | int[] testSampleRates = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }; |
| 73 | String failedRates = "Failure for rate(s): "; |
| 74 | boolean localRes, finalRes = true; |
| 75 | |
| 76 | for (int i = 0; i < testSampleRates.length; i++) { |
| 77 | AudioTrack track = null; |
| 78 | try { |
| 79 | track = new AudioTrack(_inTest_streamType, testSampleRates[i], _inTest_config, |
| 80 | _inTest_format, AudioTrack.getMinBufferSize(testSampleRates[i], |
| 81 | _inTest_config, _inTest_format), _inTest_mode); |
| 82 | } catch (IllegalArgumentException iae) { |
| 83 | Log.e("MediaAudioTrackTest", "[ constructorTestMultiSampleRate ] exception at SR " |
| 84 | + testSampleRates[i] + ": \n" + iae); |
| 85 | localRes = false; |
| 86 | } |
| 87 | if (track != null) { |
| 88 | localRes = (track.getState() == _expected_stateForMode); |
| 89 | track.release(); |
| 90 | } else { |
| 91 | localRes = false; |
| 92 | } |
| 93 | |
| 94 | if (!localRes) { |
| 95 | // log the error for the test runner |
| 96 | failedRates += Integer.toString(testSampleRates[i]) + "Hz "; |
| 97 | // log the error for logcat |
| 98 | log("constructorTestMultiSampleRate", "failed to construct " |
| 99 | + "AudioTrack(streamType=" |
| 100 | + _inTest_streamType |
| 101 | + ", sampleRateInHz=" |
| 102 | + testSampleRates[i] |
| 103 | + ", channelConfig=" |
| 104 | + _inTest_config |
| 105 | + ", audioFormat=" |
| 106 | + _inTest_format |
| 107 | + ", bufferSizeInBytes=" |
| 108 | + AudioTrack.getMinBufferSize(testSampleRates[i], _inTest_config, |
| 109 | AudioFormat.ENCODING_PCM_16BIT) + ", mode=" + _inTest_mode); |
| 110 | // mark test as failed |
| 111 | finalRes = false; |
| 112 | } |
| 113 | } |
| 114 | return new TestResults(finalRes, failedRates); |
| 115 | } |
| 116 | |
| 117 | // ----------------------------------------------------------------- |
| 118 | // AUDIOTRACK TESTS: |
| 119 | // ---------------------------------- |
| 120 | |
| 121 | // ----------------------------------------------------------------- |
| 122 | // AudioTrack constructor and AudioTrack.getMinBufferSize(...) for 16bit PCM |
| 123 | // ---------------------------------- |
| 124 | |
| 125 | // Test case 1: constructor for streaming AudioTrack, mono, 16bit at misc |
| 126 | // valid sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 127 | public void testConstructorMono16MusicStream() throws Exception { |
| 128 | |
| 129 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 130 | AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_MONO, |
| 131 | AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_INITIALIZED); |
| 132 | |
| 133 | assertTrue("testConstructorMono16MusicStream: " + res.mResultLog, res.mResult); |
| 134 | } |
| 135 | |
| 136 | // Test case 2: constructor for streaming AudioTrack, stereo, 16bit at misc |
| 137 | // valid sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 138 | public void testConstructorStereo16MusicStream() throws Exception { |
| 139 | |
| 140 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 141 | AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_STEREO, |
| 142 | AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_INITIALIZED); |
| 143 | |
| 144 | assertTrue("testConstructorStereo16MusicStream: " + res.mResultLog, res.mResult); |
| 145 | } |
| 146 | |
| 147 | // Test case 3: constructor for static AudioTrack, mono, 16bit at misc valid |
| 148 | // sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 149 | public void testConstructorMono16MusicStatic() throws Exception { |
| 150 | |
| 151 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 152 | AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_MONO, |
| 153 | AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_NO_STATIC_DATA); |
| 154 | |
| 155 | assertTrue("testConstructorMono16MusicStatic: " + res.mResultLog, res.mResult); |
| 156 | } |
| 157 | |
| 158 | // Test case 4: constructor for static AudioTrack, stereo, 16bit at misc |
| 159 | // valid sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 160 | public void testConstructorStereo16MusicStatic() throws Exception { |
| 161 | |
| 162 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 163 | AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_STEREO, |
| 164 | AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_NO_STATIC_DATA); |
| 165 | |
| 166 | assertTrue("testConstructorStereo16MusicStatic: " + res.mResultLog, res.mResult); |
| 167 | } |
| 168 | |
| 169 | // ----------------------------------------------------------------- |
| 170 | // AudioTrack constructor and AudioTrack.getMinBufferSize(...) for 8bit PCM |
| 171 | // ---------------------------------- |
| 172 | |
| 173 | // Test case 1: constructor for streaming AudioTrack, mono, 8bit at misc |
| 174 | // valid sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 175 | public void testConstructorMono8MusicStream() throws Exception { |
| 176 | |
| 177 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 178 | AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_MONO, |
| 179 | AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_INITIALIZED); |
| 180 | |
| 181 | assertTrue("testConstructorMono8MusicStream: " + res.mResultLog, res.mResult); |
| 182 | } |
| 183 | |
| 184 | // Test case 2: constructor for streaming AudioTrack, stereo, 8bit at misc |
| 185 | // valid sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 186 | public void testConstructorStereo8MusicStream() throws Exception { |
| 187 | |
| 188 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 189 | AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_STEREO, |
| 190 | AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_INITIALIZED); |
| 191 | |
| 192 | assertTrue("testConstructorStereo8MusicStream: " + res.mResultLog, res.mResult); |
| 193 | } |
| 194 | |
| 195 | // Test case 3: constructor for static AudioTrack, mono, 8bit at misc valid |
| 196 | // sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 197 | public void testConstructorMono8MusicStatic() throws Exception { |
| 198 | |
| 199 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 200 | AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_MONO, |
| 201 | AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_NO_STATIC_DATA); |
| 202 | |
| 203 | assertTrue("testConstructorMono8MusicStatic: " + res.mResultLog, res.mResult); |
| 204 | } |
| 205 | |
| 206 | // Test case 4: constructor for static AudioTrack, stereo, 8bit at misc |
| 207 | // valid sample rates |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 208 | public void testConstructorStereo8MusicStatic() throws Exception { |
| 209 | |
| 210 | TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, |
| 211 | AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_STEREO, |
| 212 | AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_NO_STATIC_DATA); |
| 213 | |
| 214 | assertTrue("testConstructorStereo8MusicStatic: " + res.mResultLog, res.mResult); |
| 215 | } |
| 216 | |
| 217 | // ----------------------------------------------------------------- |
| 218 | // AudioTrack constructor for all stream types |
| 219 | // ---------------------------------- |
| 220 | |
| 221 | // Test case 1: constructor for all stream types |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 222 | public void testConstructorStreamType() throws Exception { |
| 223 | // constants for test |
| 224 | final int TYPE_TEST_SR = 22050; |
| 225 | final int TYPE_TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 226 | final int TYPE_TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 227 | final int TYPE_TEST_MODE = AudioTrack.MODE_STREAM; |
| 228 | final int[] STREAM_TYPES = { AudioManager.STREAM_ALARM, AudioManager.STREAM_MUSIC, |
| 229 | AudioManager.STREAM_NOTIFICATION, AudioManager.STREAM_RING, |
| 230 | AudioManager.STREAM_SYSTEM, AudioManager.STREAM_VOICE_CALL }; |
| 231 | final String[] STREAM_NAMES = { "STREAM_ALARM", "STREAM_MUSIC", "STREAM_NOTIFICATION", |
| 232 | "STREAM_RING", "STREAM_SYSTEM", "STREAM_VOICE_CALL" }; |
| 233 | |
| 234 | boolean localTestRes = true; |
| 235 | AudioTrack track = null; |
| 236 | // test: loop constructor on all stream types |
| 237 | for (int i = 0; i < STREAM_TYPES.length; i++) { |
| 238 | try { |
| 239 | // -------- initialization -------------- |
| 240 | track = new AudioTrack(STREAM_TYPES[i], TYPE_TEST_SR, TYPE_TEST_CONF, |
| 241 | TYPE_TEST_FORMAT, AudioTrack.getMinBufferSize(TYPE_TEST_SR, TYPE_TEST_CONF, |
| 242 | TYPE_TEST_FORMAT), TYPE_TEST_MODE); |
| 243 | } catch (IllegalArgumentException iae) { |
| 244 | loge("testConstructorStreamType", "exception for stream type " + STREAM_NAMES[i] |
| 245 | + ": " + iae); |
| 246 | localTestRes = false; |
| 247 | } |
| 248 | // -------- test -------------- |
| 249 | if (track != null) { |
| 250 | if (track.getState() != AudioTrack.STATE_INITIALIZED) { |
| 251 | localTestRes = false; |
| 252 | Log.e("MediaAudioTrackTest", |
| 253 | "[ testConstructorStreamType ] failed for stream type " |
| 254 | + STREAM_NAMES[i]); |
| 255 | } |
| 256 | // -------- tear down -------------- |
| 257 | track.release(); |
| 258 | } else { |
| 259 | localTestRes = false; |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | assertTrue("testConstructorStreamType", localTestRes); |
| 264 | } |
| 265 | |
| 266 | // ----------------------------------------------------------------- |
Jean-Michel Trivi | b164742 | 2015-04-03 11:17:31 -0700 | [diff] [blame] | 267 | // AudioTrack construction with Builder |
| 268 | // ---------------------------------- |
| 269 | |
| 270 | // Test case 1: build AudioTrack with default parameters, test documented default params |
| 271 | public void testBuilderDefault() throws Exception { |
| 272 | // constants for test |
| 273 | final String TEST_NAME = "testBuilderDefault"; |
| 274 | final int expectedDefaultEncoding = AudioFormat.ENCODING_PCM_16BIT; |
| 275 | final int expectedDefaultRate = |
| 276 | AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC); |
| 277 | final int expectedDefaultChannels = AudioFormat.CHANNEL_OUT_STEREO; |
| 278 | // use Builder |
| 279 | final int buffSizeInBytes = AudioTrack.getMinBufferSize( |
| 280 | expectedDefaultRate, expectedDefaultChannels, expectedDefaultEncoding); |
| 281 | final AudioTrack track = new AudioTrack.Builder() |
| 282 | .setBufferSizeInBytes(buffSizeInBytes) |
| 283 | .build(); |
| 284 | // save results |
| 285 | final int observedState = track.getState(); |
| 286 | final int observedFormat = track.getAudioFormat(); |
| 287 | final int observedChannelConf = track.getChannelConfiguration(); |
| 288 | final int observedRate = track.getSampleRate(); |
| 289 | // release track before the test exits (either successfully or with an exception) |
| 290 | track.release(); |
| 291 | // compare results |
| 292 | assertEquals(TEST_NAME + ": Track initialized", AudioTrack.STATE_INITIALIZED, |
| 293 | observedState); |
| 294 | assertEquals(TEST_NAME + ": Default track encoding", expectedDefaultEncoding, |
| 295 | observedFormat); |
| 296 | assertEquals(TEST_NAME + ": Default track channels", expectedDefaultChannels, |
| 297 | observedChannelConf); |
| 298 | assertEquals(TEST_NAME + ": Default track sample rate", expectedDefaultRate, |
| 299 | observedRate); |
| 300 | } |
| 301 | |
| 302 | // Test case 2: build AudioTrack with AudioFormat, test it's used |
| 303 | public void testBuilderFormat() throws Exception { |
| 304 | // constants for test |
| 305 | final String TEST_NAME = "testBuilderFormat"; |
| 306 | final int TEST_RATE = 32000; |
| 307 | final int TEST_CHANNELS = AudioFormat.CHANNEL_OUT_STEREO; |
| 308 | // use Builder |
| 309 | final int buffSizeInBytes = AudioTrack.getMinBufferSize( |
| 310 | TEST_RATE, TEST_CHANNELS, AudioFormat.ENCODING_PCM_16BIT); |
| 311 | final AudioTrack track = new AudioTrack.Builder() |
| 312 | .setAudioAttributes(new AudioAttributes.Builder().build()) |
| 313 | .setBufferSizeInBytes(buffSizeInBytes) |
| 314 | .setAudioFormat(new AudioFormat.Builder() |
| 315 | .setChannelMask(TEST_CHANNELS).setSampleRate(TEST_RATE).build()) |
| 316 | .build(); |
| 317 | // save results |
| 318 | final int observedState = track.getState(); |
| 319 | final int observedChannelConf = track.getChannelConfiguration(); |
| 320 | final int observedRate = track.getSampleRate(); |
| 321 | // release track before the test exits (either successfully or with an exception) |
| 322 | track.release(); |
| 323 | // compare results |
| 324 | assertEquals(TEST_NAME + ": Track initialized", AudioTrack.STATE_INITIALIZED, |
| 325 | observedState); |
| 326 | assertEquals(TEST_NAME + ": Track channels", TEST_CHANNELS, observedChannelConf); |
| 327 | assertEquals(TEST_NAME + ": Track sample rate", TEST_RATE, observedRate); |
| 328 | } |
| 329 | |
| 330 | // Test case 3: build AudioTrack with session ID, test it's used |
| 331 | public void testBuilderSession() throws Exception { |
| 332 | // constants for test |
| 333 | final String TEST_NAME = "testBuilderSession"; |
| 334 | // generate a session ID |
| 335 | final int expectedSessionId = new AudioManager(getContext()).generateAudioSessionId(); |
| 336 | // use builder |
| 337 | final AudioTrack track = new AudioTrack.Builder() |
| 338 | .setSessionId(expectedSessionId) |
| 339 | .build(); |
| 340 | // save results |
| 341 | final int observedSessionId = track.getAudioSessionId(); |
| 342 | // release track before the test exits (either successfully or with an exception) |
| 343 | track.release(); |
| 344 | // compare results |
| 345 | assertEquals(TEST_NAME + ": Assigned track session ID", expectedSessionId, |
| 346 | observedSessionId); |
| 347 | } |
| 348 | |
| 349 | // Test case 4: build AudioTrack with AudioAttributes built from stream type, test it's used |
| 350 | public void testBuilderAttributesStream() throws Exception { |
| 351 | // constants for test |
| 352 | final String TEST_NAME = "testBuilderAttributesStream"; |
| 353 | // use a stream type documented in AudioAttributes.Builder.setLegacyStreamType(int) |
| 354 | final int expectedStreamType = AudioManager.STREAM_ALARM; |
| 355 | final int expectedContentType = AudioAttributes.CONTENT_TYPE_SPEECH; |
| 356 | final AudioAttributes aa = new AudioAttributes.Builder() |
| 357 | .setLegacyStreamType(expectedStreamType) |
| 358 | .setContentType(expectedContentType) |
| 359 | .build(); |
| 360 | // use builder |
| 361 | final AudioTrack track = new AudioTrack.Builder() |
| 362 | .setAudioAttributes(aa) |
| 363 | .build(); |
| 364 | // save results |
| 365 | final int observedStreamType = track.getStreamType(); |
| 366 | // release track before the test exits (either successfully or with an exception) |
| 367 | track.release(); |
| 368 | // compare results |
| 369 | assertEquals(TEST_NAME + ": track stream type", expectedStreamType, observedStreamType); |
| 370 | // also test content type was preserved in the attributes even though they |
| 371 | // were first configured with a legacy stream type |
| 372 | assertEquals(TEST_NAME + ": attributes content type", expectedContentType, |
| 373 | aa.getContentType()); |
| 374 | } |
| 375 | |
| 376 | // ----------------------------------------------------------------- |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 377 | // Playback head position |
| 378 | // ---------------------------------- |
| 379 | |
| 380 | // Test case 1: getPlaybackHeadPosition() at 0 after initialization |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 381 | public void testPlaybackHeadPositionAfterInit() throws Exception { |
| 382 | // constants for test |
| 383 | final String TEST_NAME = "testPlaybackHeadPositionAfterInit"; |
| 384 | final int TEST_SR = 22050; |
| 385 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 386 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 387 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 388 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 389 | |
| 390 | // -------- initialization -------------- |
| 391 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 392 | AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT), TEST_MODE); |
| 393 | // -------- test -------------- |
| 394 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 395 | assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0); |
| 396 | // -------- tear down -------------- |
| 397 | track.release(); |
| 398 | } |
| 399 | |
| 400 | // Test case 2: getPlaybackHeadPosition() increases after play() |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 401 | public void testPlaybackHeadPositionIncrease() throws Exception { |
| 402 | // constants for test |
| 403 | final String TEST_NAME = "testPlaybackHeadPositionIncrease"; |
| 404 | final int TEST_SR = 22050; |
| 405 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 406 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 407 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 408 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 409 | |
| 410 | // -------- initialization -------------- |
| 411 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 412 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 413 | 2 * minBuffSize, TEST_MODE); |
| 414 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 415 | // -------- test -------------- |
| 416 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 417 | track.write(data, OFFSET_DEFAULT, data.length); |
| 418 | track.write(data, OFFSET_DEFAULT, data.length); |
| 419 | track.play(); |
| 420 | Thread.sleep(100); |
| 421 | log(TEST_NAME, "position =" + track.getPlaybackHeadPosition()); |
| 422 | assertTrue(TEST_NAME, track.getPlaybackHeadPosition() > 0); |
| 423 | // -------- tear down -------------- |
| 424 | track.release(); |
| 425 | } |
| 426 | |
| 427 | // Test case 3: getPlaybackHeadPosition() is 0 after flush(); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 428 | public void testPlaybackHeadPositionAfterFlush() throws Exception { |
| 429 | // constants for test |
| 430 | final String TEST_NAME = "testPlaybackHeadPositionAfterFlush"; |
| 431 | final int TEST_SR = 22050; |
| 432 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 433 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 434 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 435 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 436 | |
| 437 | // -------- initialization -------------- |
| 438 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 439 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 440 | 2 * minBuffSize, TEST_MODE); |
| 441 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 442 | // -------- test -------------- |
| 443 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 444 | track.write(data, OFFSET_DEFAULT, data.length); |
| 445 | track.write(data, OFFSET_DEFAULT, data.length); |
| 446 | track.play(); |
| 447 | Thread.sleep(WAIT_MSEC); |
| 448 | track.stop(); |
| 449 | track.flush(); |
| 450 | log(TEST_NAME, "position =" + track.getPlaybackHeadPosition()); |
| 451 | assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0); |
| 452 | // -------- tear down -------------- |
| 453 | track.release(); |
| 454 | } |
| 455 | |
| 456 | // Test case 3: getPlaybackHeadPosition() is 0 after stop(); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 457 | public void testPlaybackHeadPositionAfterStop() throws Exception { |
| 458 | // constants for test |
| 459 | final String TEST_NAME = "testPlaybackHeadPositionAfterStop"; |
| 460 | final int TEST_SR = 22050; |
| 461 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 462 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 463 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 464 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
Eric Laurent | 2a1e521 | 2012-06-13 11:44:19 -0700 | [diff] [blame] | 465 | final int TEST_LOOP_CNT = 10; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 466 | |
| 467 | // -------- initialization -------------- |
| 468 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 469 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 470 | 2 * minBuffSize, TEST_MODE); |
| 471 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 472 | // -------- test -------------- |
| 473 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 474 | track.write(data, OFFSET_DEFAULT, data.length); |
| 475 | track.write(data, OFFSET_DEFAULT, data.length); |
| 476 | track.play(); |
| 477 | Thread.sleep(WAIT_MSEC); |
| 478 | track.stop(); |
Eric Laurent | 2a1e521 | 2012-06-13 11:44:19 -0700 | [diff] [blame] | 479 | int count = 0; |
| 480 | int pos; |
| 481 | do { |
| 482 | Thread.sleep(WAIT_MSEC); |
| 483 | pos = track.getPlaybackHeadPosition(); |
| 484 | count++; |
| 485 | } while((pos != 0) && (count < TEST_LOOP_CNT)); |
| 486 | log(TEST_NAME, "position =" + pos + ", read count ="+count); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 487 | assertTrue(TEST_NAME, pos == 0); |
| 488 | // -------- tear down -------------- |
| 489 | track.release(); |
| 490 | } |
| 491 | |
| 492 | // Test case 4: getPlaybackHeadPosition() is > 0 after play(); pause(); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 493 | public void testPlaybackHeadPositionAfterPause() throws Exception { |
| 494 | // constants for test |
| 495 | final String TEST_NAME = "testPlaybackHeadPositionAfterPause"; |
| 496 | final int TEST_SR = 22050; |
| 497 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 498 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 499 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 500 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 501 | |
| 502 | // -------- initialization -------------- |
| 503 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 504 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 505 | 2 * minBuffSize, TEST_MODE); |
| 506 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 507 | // -------- test -------------- |
| 508 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 509 | track.write(data, OFFSET_DEFAULT, data.length); |
| 510 | track.write(data, OFFSET_DEFAULT, data.length); |
| 511 | track.play(); |
| 512 | Thread.sleep(100); |
| 513 | track.pause(); |
| 514 | int pos = track.getPlaybackHeadPosition(); |
| 515 | log(TEST_NAME, "position =" + pos); |
| 516 | assertTrue(TEST_NAME, pos > 0); |
| 517 | // -------- tear down -------------- |
| 518 | track.release(); |
| 519 | } |
| 520 | |
Vytautas Vaitukaitis | ba339d9 | 2013-07-05 16:54:56 +0100 | [diff] [blame] | 521 | // Test case 5: getPlaybackHeadPosition() remains 0 after pause(); flush(); play(); |
| 522 | public void testPlaybackHeadPositionAfterFlushAndPlay() throws Exception { |
| 523 | // constants for test |
| 524 | final String TEST_NAME = "testPlaybackHeadPositionAfterFlushAndPlay"; |
Vytautas Vaitukaitis | ba339d9 | 2013-07-05 16:54:56 +0100 | [diff] [blame] | 525 | final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO; |
| 526 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 527 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 528 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
Glenn Kasten | ea60bad | 2013-10-18 16:17:39 -0700 | [diff] [blame] | 529 | final int TEST_SR = AudioTrack.getNativeOutputSampleRate(TEST_STREAM_TYPE); |
Vytautas Vaitukaitis | ba339d9 | 2013-07-05 16:54:56 +0100 | [diff] [blame] | 530 | |
| 531 | // -------- initialization -------------- |
| 532 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 533 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 534 | 2 * minBuffSize, TEST_MODE); |
| 535 | byte data[] = new byte[minBuffSize]; |
| 536 | // -------- test -------------- |
| 537 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 538 | track.write(data, OFFSET_DEFAULT, data.length); |
| 539 | track.write(data, OFFSET_DEFAULT, data.length); |
| 540 | track.play(); |
| 541 | Thread.sleep(100); |
| 542 | track.pause(); |
| 543 | |
| 544 | int pos = track.getPlaybackHeadPosition(); |
| 545 | log(TEST_NAME, "position after pause =" + pos); |
| 546 | assertTrue(TEST_NAME, pos > 0); |
| 547 | |
| 548 | track.flush(); |
| 549 | pos = track.getPlaybackHeadPosition(); |
| 550 | log(TEST_NAME, "position after flush =" + pos); |
| 551 | assertTrue(TEST_NAME, pos == 0); |
| 552 | |
| 553 | track.play(); |
| 554 | pos = track.getPlaybackHeadPosition(); |
| 555 | log(TEST_NAME, "position after play =" + pos); |
| 556 | assertTrue(TEST_NAME, pos == 0); |
| 557 | |
| 558 | Thread.sleep(100); |
| 559 | pos = track.getPlaybackHeadPosition(); |
| 560 | log(TEST_NAME, "position after 100 ms sleep =" + pos); |
| 561 | assertTrue(TEST_NAME, pos == 0); |
| 562 | // -------- tear down -------------- |
| 563 | track.release(); |
| 564 | } |
| 565 | |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 566 | // ----------------------------------------------------------------- |
| 567 | // Playback properties |
| 568 | // ---------------------------------- |
| 569 | |
Glenn Kasten | 1c56173 | 2014-01-14 13:57:38 -0800 | [diff] [blame] | 570 | // Common code for the testSetStereoVolume* and testSetVolume* tests |
| 571 | private void testSetVolumeCommon(String testName, float vol, boolean isStereo) throws Exception { |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 572 | // constants for test |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 573 | final int TEST_SR = 22050; |
| 574 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 575 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 576 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 577 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 578 | |
| 579 | // -------- initialization -------------- |
| 580 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 581 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 582 | 2 * minBuffSize, TEST_MODE); |
| 583 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 584 | // -------- test -------------- |
| 585 | track.write(data, OFFSET_DEFAULT, data.length); |
| 586 | track.write(data, OFFSET_DEFAULT, data.length); |
| 587 | track.play(); |
Glenn Kasten | 1c56173 | 2014-01-14 13:57:38 -0800 | [diff] [blame] | 588 | if (isStereo) { |
| 589 | // TODO to really test this, do a pan instead of using same value for left and right |
| 590 | assertTrue(testName, track.setStereoVolume(vol, vol) == AudioTrack.SUCCESS); |
| 591 | } else { |
| 592 | assertTrue(testName, track.setVolume(vol) == AudioTrack.SUCCESS); |
| 593 | } |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 594 | // -------- tear down -------------- |
| 595 | track.release(); |
| 596 | } |
| 597 | |
Glenn Kasten | 1c56173 | 2014-01-14 13:57:38 -0800 | [diff] [blame] | 598 | // Test case 1: setStereoVolume() with max volume returns SUCCESS |
| 599 | public void testSetStereoVolumeMax() throws Exception { |
| 600 | final String TEST_NAME = "testSetStereoVolumeMax"; |
| 601 | float maxVol = AudioTrack.getMaxVolume(); |
| 602 | testSetVolumeCommon(TEST_NAME, maxVol, true /*isStereo*/); |
| 603 | } |
| 604 | |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 605 | // Test case 2: setStereoVolume() with min volume returns SUCCESS |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 606 | public void testSetStereoVolumeMin() throws Exception { |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 607 | final String TEST_NAME = "testSetStereoVolumeMin"; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 608 | float minVol = AudioTrack.getMinVolume(); |
Glenn Kasten | 1c56173 | 2014-01-14 13:57:38 -0800 | [diff] [blame] | 609 | testSetVolumeCommon(TEST_NAME, minVol, true /*isStereo*/); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 610 | } |
| 611 | |
| 612 | // Test case 3: setStereoVolume() with mid volume returns SUCCESS |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 613 | public void testSetStereoVolumeMid() throws Exception { |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 614 | final String TEST_NAME = "testSetStereoVolumeMid"; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 615 | float midVol = (AudioTrack.getMaxVolume() - AudioTrack.getMinVolume()) / 2; |
Glenn Kasten | 1c56173 | 2014-01-14 13:57:38 -0800 | [diff] [blame] | 616 | testSetVolumeCommon(TEST_NAME, midVol, true /*isStereo*/); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 617 | } |
| 618 | |
| 619 | // Test case 4: setPlaybackRate() with half the content rate returns SUCCESS |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 620 | public void testSetPlaybackRate() throws Exception { |
| 621 | // constants for test |
| 622 | final String TEST_NAME = "testSetPlaybackRate"; |
| 623 | final int TEST_SR = 22050; |
| 624 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 625 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 626 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 627 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 628 | |
| 629 | // -------- initialization -------------- |
| 630 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 631 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 632 | 2 * minBuffSize, TEST_MODE); |
| 633 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 634 | // -------- test -------------- |
| 635 | track.write(data, OFFSET_DEFAULT, data.length); |
| 636 | track.write(data, OFFSET_DEFAULT, data.length); |
| 637 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 638 | track.play(); |
| 639 | assertTrue(TEST_NAME, track.setPlaybackRate((int) (TEST_SR / 2)) == AudioTrack.SUCCESS); |
| 640 | // -------- tear down -------------- |
| 641 | track.release(); |
| 642 | } |
| 643 | |
| 644 | // Test case 5: setPlaybackRate(0) returns bad value error |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 645 | public void testSetPlaybackRateZero() throws Exception { |
| 646 | // constants for test |
| 647 | final String TEST_NAME = "testSetPlaybackRateZero"; |
| 648 | final int TEST_SR = 22050; |
| 649 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 650 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 651 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 652 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 653 | |
| 654 | // -------- initialization -------------- |
| 655 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 656 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 657 | minBuffSize, TEST_MODE); |
| 658 | // -------- test -------------- |
| 659 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 660 | assertTrue(TEST_NAME, track.setPlaybackRate(0) == AudioTrack.ERROR_BAD_VALUE); |
| 661 | // -------- tear down -------------- |
| 662 | track.release(); |
| 663 | } |
| 664 | |
| 665 | // Test case 6: setPlaybackRate() accepts values twice the output sample |
| 666 | // rate |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 667 | public void testSetPlaybackRateTwiceOutputSR() throws Exception { |
| 668 | // constants for test |
| 669 | final String TEST_NAME = "testSetPlaybackRateTwiceOutputSR"; |
| 670 | final int TEST_SR = 22050; |
| 671 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 672 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 673 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 674 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 675 | |
| 676 | // -------- initialization -------------- |
| 677 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 678 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 679 | 2 * minBuffSize, TEST_MODE); |
| 680 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 681 | int outputSR = AudioTrack.getNativeOutputSampleRate(TEST_STREAM_TYPE); |
| 682 | // -------- test -------------- |
| 683 | track.write(data, OFFSET_DEFAULT, data.length); |
| 684 | track.write(data, OFFSET_DEFAULT, data.length); |
| 685 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 686 | track.play(); |
| 687 | assertTrue(TEST_NAME, track.setPlaybackRate(2 * outputSR) == AudioTrack.SUCCESS); |
| 688 | // -------- tear down -------------- |
| 689 | track.release(); |
| 690 | } |
| 691 | |
| 692 | // Test case 7: setPlaybackRate() and retrieve value, should be the same for |
| 693 | // half the content SR |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 694 | public void testSetGetPlaybackRate() throws Exception { |
| 695 | // constants for test |
| 696 | final String TEST_NAME = "testSetGetPlaybackRate"; |
| 697 | final int TEST_SR = 22050; |
| 698 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; |
| 699 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 700 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 701 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 702 | |
| 703 | // -------- initialization -------------- |
| 704 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 705 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
karimuddin sayed | 4d12521 | 2011-03-16 06:26:20 -0500 | [diff] [blame] | 706 | 2 * minBuffSize, TEST_MODE); |
| 707 | byte data[] = new byte[minBuffSize]; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 708 | // -------- test -------------- |
| 709 | track.write(data, OFFSET_DEFAULT, data.length); |
| 710 | track.write(data, OFFSET_DEFAULT, data.length); |
| 711 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 712 | track.play(); |
| 713 | track.setPlaybackRate((int) (TEST_SR / 2)); |
| 714 | assertTrue(TEST_NAME, track.getPlaybackRate() == (int) (TEST_SR / 2)); |
| 715 | // -------- tear down -------------- |
| 716 | track.release(); |
| 717 | } |
| 718 | |
| 719 | // Test case 8: setPlaybackRate() invalid operation if track not initialized |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 720 | public void testSetPlaybackRateUninit() throws Exception { |
| 721 | // constants for test |
| 722 | final String TEST_NAME = "testSetPlaybackRateUninit"; |
| 723 | final int TEST_SR = 22050; |
| 724 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 725 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 726 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 727 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 728 | |
| 729 | // -------- initialization -------------- |
| 730 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 731 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 732 | minBuffSize, TEST_MODE); |
| 733 | // -------- test -------------- |
Glenn Kasten | c19c9a9 | 2013-04-03 17:08:43 -0700 | [diff] [blame] | 734 | assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); |
| 735 | assertEquals(TEST_NAME, AudioTrack.ERROR_INVALID_OPERATION, |
| 736 | track.setPlaybackRate(TEST_SR / 2)); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 737 | // -------- tear down -------------- |
| 738 | track.release(); |
| 739 | } |
| 740 | |
Glenn Kasten | 1c56173 | 2014-01-14 13:57:38 -0800 | [diff] [blame] | 741 | // Test case 9: setVolume() with max volume returns SUCCESS |
| 742 | public void testSetVolumeMax() throws Exception { |
| 743 | final String TEST_NAME = "testSetVolumeMax"; |
| 744 | float maxVol = AudioTrack.getMaxVolume(); |
| 745 | testSetVolumeCommon(TEST_NAME, maxVol, false /*isStereo*/); |
| 746 | } |
| 747 | |
| 748 | // Test case 10: setVolume() with min volume returns SUCCESS |
| 749 | public void testSetVolumeMin() throws Exception { |
| 750 | final String TEST_NAME = "testSetVolumeMin"; |
| 751 | float minVol = AudioTrack.getMinVolume(); |
| 752 | testSetVolumeCommon(TEST_NAME, minVol, false /*isStereo*/); |
| 753 | } |
| 754 | |
| 755 | // Test case 11: setVolume() with mid volume returns SUCCESS |
| 756 | public void testSetVolumeMid() throws Exception { |
| 757 | final String TEST_NAME = "testSetVolumeMid"; |
| 758 | float midVol = (AudioTrack.getMaxVolume() - AudioTrack.getMinVolume()) / 2; |
| 759 | testSetVolumeCommon(TEST_NAME, midVol, false /*isStereo*/); |
| 760 | } |
| 761 | |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 762 | // ----------------------------------------------------------------- |
| 763 | // Playback progress |
| 764 | // ---------------------------------- |
| 765 | |
| 766 | // Test case 1: setPlaybackHeadPosition() on playing track |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 767 | public void testSetPlaybackHeadPositionPlaying() throws Exception { |
| 768 | // constants for test |
| 769 | final String TEST_NAME = "testSetPlaybackHeadPositionPlaying"; |
| 770 | final int TEST_SR = 22050; |
| 771 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 772 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 773 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 774 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 775 | |
| 776 | // -------- initialization -------------- |
| 777 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 778 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 779 | 2 * minBuffSize, TEST_MODE); |
| 780 | byte data[] = new byte[minBuffSize]; |
| 781 | // -------- test -------------- |
| 782 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 783 | track.write(data, OFFSET_DEFAULT, data.length); |
| 784 | track.write(data, OFFSET_DEFAULT, data.length); |
| 785 | track.play(); |
| 786 | assertTrue(TEST_NAME, |
| 787 | track.setPlaybackHeadPosition(10) == AudioTrack.ERROR_INVALID_OPERATION); |
| 788 | // -------- tear down -------------- |
| 789 | track.release(); |
| 790 | } |
| 791 | |
| 792 | // Test case 2: setPlaybackHeadPosition() on stopped track |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 793 | public void testSetPlaybackHeadPositionStopped() throws Exception { |
| 794 | // constants for test |
| 795 | final String TEST_NAME = "testSetPlaybackHeadPositionStopped"; |
| 796 | final int TEST_SR = 22050; |
| 797 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 798 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 799 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 800 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 801 | |
| 802 | // -------- initialization -------------- |
| 803 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 804 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 805 | 2 * minBuffSize, TEST_MODE); |
| 806 | byte data[] = new byte[minBuffSize]; |
| 807 | // -------- test -------------- |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 808 | assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 809 | track.write(data, OFFSET_DEFAULT, data.length); |
| 810 | track.write(data, OFFSET_DEFAULT, data.length); |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 811 | assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 812 | track.play(); |
| 813 | track.stop(); |
Glenn Kasten | c19c9a9 | 2013-04-03 17:08:43 -0700 | [diff] [blame] | 814 | assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_STOPPED, track.getPlayState()); |
| 815 | assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackHeadPosition(10)); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 816 | // -------- tear down -------------- |
| 817 | track.release(); |
| 818 | } |
| 819 | |
| 820 | // Test case 3: setPlaybackHeadPosition() on paused track |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 821 | public void testSetPlaybackHeadPositionPaused() throws Exception { |
| 822 | // constants for test |
| 823 | final String TEST_NAME = "testSetPlaybackHeadPositionPaused"; |
| 824 | final int TEST_SR = 22050; |
| 825 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 826 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 827 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 828 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 829 | |
| 830 | // -------- initialization -------------- |
| 831 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 832 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 833 | 2 * minBuffSize, TEST_MODE); |
| 834 | byte data[] = new byte[minBuffSize]; |
| 835 | // -------- test -------------- |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 836 | assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 837 | track.write(data, OFFSET_DEFAULT, data.length); |
| 838 | track.write(data, OFFSET_DEFAULT, data.length); |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 839 | assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 840 | track.play(); |
| 841 | track.pause(); |
Glenn Kasten | c19c9a9 | 2013-04-03 17:08:43 -0700 | [diff] [blame] | 842 | assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_PAUSED, track.getPlayState()); |
| 843 | assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackHeadPosition(10)); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 844 | // -------- tear down -------------- |
| 845 | track.release(); |
| 846 | } |
| 847 | |
| 848 | // Test case 4: setPlaybackHeadPosition() beyond what has been written |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 849 | public void testSetPlaybackHeadPositionTooFar() throws Exception { |
| 850 | // constants for test |
| 851 | final String TEST_NAME = "testSetPlaybackHeadPositionTooFar"; |
| 852 | final int TEST_SR = 22050; |
| 853 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 854 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 855 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 856 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 857 | |
| 858 | // -------- initialization -------------- |
| 859 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 860 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 861 | 2 * minBuffSize, TEST_MODE); |
| 862 | byte data[] = new byte[minBuffSize]; |
| 863 | // make up a frame index that's beyond what has been written: go from |
| 864 | // buffer size to frame |
| 865 | // count (given the audio track properties), and add 77. |
| 866 | int frameIndexTooFar = (2 * minBuffSize / 2) + 77; |
| 867 | // -------- test -------------- |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 868 | assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 869 | track.write(data, OFFSET_DEFAULT, data.length); |
| 870 | track.write(data, OFFSET_DEFAULT, data.length); |
Glenn Kasten | 8391e6f | 2013-04-03 17:19:41 -0700 | [diff] [blame] | 871 | assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 872 | track.play(); |
| 873 | track.stop(); |
Glenn Kasten | c19c9a9 | 2013-04-03 17:08:43 -0700 | [diff] [blame] | 874 | assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_STOPPED, track.getPlayState()); |
| 875 | assertEquals(TEST_NAME, AudioTrack.ERROR_BAD_VALUE, |
| 876 | track.setPlaybackHeadPosition(frameIndexTooFar)); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 877 | // -------- tear down -------------- |
| 878 | track.release(); |
| 879 | } |
| 880 | |
| 881 | // Test case 5: setLoopPoints() fails for MODE_STREAM |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 882 | public void testSetLoopPointsStream() throws Exception { |
| 883 | // constants for test |
| 884 | final String TEST_NAME = "testSetLoopPointsStream"; |
| 885 | final int TEST_SR = 22050; |
| 886 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 887 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 888 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 889 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 890 | |
| 891 | // -------- initialization -------------- |
| 892 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 893 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 894 | 2 * minBuffSize, TEST_MODE); |
| 895 | byte data[] = new byte[minBuffSize]; |
| 896 | // -------- test -------------- |
| 897 | track.write(data, OFFSET_DEFAULT, data.length); |
| 898 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 899 | assertTrue(TEST_NAME, track.setLoopPoints(2, 50, 2) == AudioTrack.ERROR_INVALID_OPERATION); |
| 900 | // -------- tear down -------------- |
| 901 | track.release(); |
| 902 | } |
| 903 | |
| 904 | // Test case 6: setLoopPoints() fails start > end |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 905 | public void testSetLoopPointsStartAfterEnd() throws Exception { |
| 906 | // constants for test |
| 907 | final String TEST_NAME = "testSetLoopPointsStartAfterEnd"; |
| 908 | final int TEST_SR = 22050; |
| 909 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 910 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 911 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 912 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 913 | |
| 914 | // -------- initialization -------------- |
| 915 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 916 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 917 | minBuffSize, TEST_MODE); |
| 918 | byte data[] = new byte[minBuffSize]; |
| 919 | // -------- test -------------- |
| 920 | track.write(data, OFFSET_DEFAULT, data.length); |
| 921 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 922 | assertTrue(TEST_NAME, track.setLoopPoints(50, 0, 2) == AudioTrack.ERROR_BAD_VALUE); |
| 923 | // -------- tear down -------------- |
| 924 | track.release(); |
| 925 | } |
| 926 | |
| 927 | // Test case 6: setLoopPoints() success |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 928 | public void testSetLoopPointsSuccess() throws Exception { |
| 929 | // constants for test |
| 930 | final String TEST_NAME = "testSetLoopPointsSuccess"; |
| 931 | final int TEST_SR = 22050; |
| 932 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 933 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 934 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 935 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 936 | |
| 937 | // -------- initialization -------------- |
| 938 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 939 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 940 | minBuffSize, TEST_MODE); |
| 941 | byte data[] = new byte[minBuffSize]; |
| 942 | // -------- test -------------- |
| 943 | track.write(data, OFFSET_DEFAULT, data.length); |
| 944 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 945 | assertTrue(TEST_NAME, track.setLoopPoints(0, 50, 2) == AudioTrack.SUCCESS); |
| 946 | // -------- tear down -------------- |
| 947 | track.release(); |
| 948 | } |
| 949 | |
| 950 | // Test case 7: setLoopPoints() fails with loop length bigger than content |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 951 | public void testSetLoopPointsLoopTooLong() throws Exception { |
| 952 | // constants for test |
| 953 | final String TEST_NAME = "testSetLoopPointsLoopTooLong"; |
| 954 | final int TEST_SR = 22050; |
| 955 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 956 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 957 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 958 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 959 | |
| 960 | // -------- initialization -------------- |
| 961 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 962 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 963 | minBuffSize, TEST_MODE); |
| 964 | byte data[] = new byte[minBuffSize]; |
| 965 | int dataSizeInFrames = minBuffSize / 2; |
| 966 | // -------- test -------------- |
| 967 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA); |
| 968 | track.write(data, OFFSET_DEFAULT, data.length); |
| 969 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 970 | assertTrue(TEST_NAME, track.setLoopPoints(10, dataSizeInFrames + 20, 2) == |
| 971 | AudioTrack.ERROR_BAD_VALUE); |
| 972 | // -------- tear down -------------- |
| 973 | track.release(); |
| 974 | } |
| 975 | |
| 976 | // Test case 8: setLoopPoints() fails with start beyond what can be written |
| 977 | // for the track |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 978 | public void testSetLoopPointsStartTooFar() throws Exception { |
| 979 | // constants for test |
| 980 | final String TEST_NAME = "testSetLoopPointsStartTooFar"; |
| 981 | final int TEST_SR = 22050; |
| 982 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 983 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 984 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 985 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 986 | |
| 987 | // -------- initialization -------------- |
| 988 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 989 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 990 | minBuffSize, TEST_MODE); |
| 991 | byte data[] = new byte[minBuffSize]; |
| 992 | int dataSizeInFrames = minBuffSize / 2;// 16bit data |
| 993 | // -------- test -------------- |
| 994 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA); |
| 995 | track.write(data, OFFSET_DEFAULT, data.length); |
| 996 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 997 | assertTrue(TEST_NAME, |
| 998 | track.setLoopPoints(dataSizeInFrames + 20, dataSizeInFrames + 50, 2) == |
| 999 | AudioTrack.ERROR_BAD_VALUE); |
| 1000 | // -------- tear down -------------- |
| 1001 | track.release(); |
| 1002 | } |
| 1003 | |
| 1004 | // Test case 9: setLoopPoints() fails with end beyond what can be written |
| 1005 | // for the track |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1006 | public void testSetLoopPointsEndTooFar() throws Exception { |
| 1007 | // constants for test |
| 1008 | final String TEST_NAME = "testSetLoopPointsEndTooFar"; |
| 1009 | final int TEST_SR = 22050; |
| 1010 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1011 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1012 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 1013 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1014 | |
| 1015 | // -------- initialization -------------- |
| 1016 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1017 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1018 | minBuffSize, TEST_MODE); |
| 1019 | byte data[] = new byte[minBuffSize]; |
| 1020 | int dataSizeInFrames = minBuffSize / 2;// 16bit data |
| 1021 | // -------- test -------------- |
| 1022 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA); |
| 1023 | track.write(data, OFFSET_DEFAULT, data.length); |
| 1024 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1025 | int loopCount = 2; |
| 1026 | assertTrue(TEST_NAME, |
| 1027 | track.setLoopPoints(dataSizeInFrames - 10, dataSizeInFrames + 50, loopCount) == |
| 1028 | AudioTrack.ERROR_BAD_VALUE); |
| 1029 | // -------- tear down -------------- |
| 1030 | track.release(); |
| 1031 | } |
| 1032 | |
| 1033 | // ----------------------------------------------------------------- |
| 1034 | // Audio data supply |
| 1035 | // ---------------------------------- |
| 1036 | |
| 1037 | // Test case 1: write() fails when supplying less data (bytes) than declared |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1038 | public void testWriteByteOffsetTooBig() throws Exception { |
| 1039 | // constants for test |
| 1040 | final String TEST_NAME = "testWriteByteOffsetTooBig"; |
| 1041 | final int TEST_SR = 22050; |
| 1042 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1043 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1044 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1045 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1046 | |
| 1047 | // -------- initialization -------------- |
| 1048 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1049 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1050 | 2 * minBuffSize, TEST_MODE); |
| 1051 | byte data[] = new byte[minBuffSize]; |
| 1052 | // -------- test -------------- |
| 1053 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1054 | int offset = 10; |
| 1055 | assertTrue(TEST_NAME, track.write(data, offset, data.length) == AudioTrack.ERROR_BAD_VALUE); |
| 1056 | // -------- tear down -------------- |
| 1057 | track.release(); |
| 1058 | } |
| 1059 | |
| 1060 | // Test case 2: write() fails when supplying less data (shorts) than |
| 1061 | // declared |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1062 | public void testWriteShortOffsetTooBig() throws Exception { |
| 1063 | // constants for test |
| 1064 | final String TEST_NAME = "testWriteShortOffsetTooBig"; |
| 1065 | final int TEST_SR = 22050; |
| 1066 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1067 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1068 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1069 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1070 | |
| 1071 | // -------- initialization -------------- |
| 1072 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1073 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1074 | 2 * minBuffSize, TEST_MODE); |
| 1075 | short data[] = new short[minBuffSize / 2]; |
| 1076 | // -------- test -------------- |
| 1077 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1078 | int offset = 10; |
| 1079 | assertTrue(TEST_NAME, track.write(data, offset, data.length) |
| 1080 | == AudioTrack.ERROR_BAD_VALUE); |
| 1081 | // -------- tear down -------------- |
| 1082 | track.release(); |
| 1083 | } |
| 1084 | |
| 1085 | // Test case 3: write() fails when supplying less data (bytes) than declared |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1086 | public void testWriteByteSizeTooBig() throws Exception { |
| 1087 | // constants for test |
| 1088 | final String TEST_NAME = "testWriteByteSizeTooBig"; |
| 1089 | final int TEST_SR = 22050; |
| 1090 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1091 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1092 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1093 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1094 | |
| 1095 | // -------- initialization -------------- |
| 1096 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1097 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1098 | 2 * minBuffSize, TEST_MODE); |
| 1099 | byte data[] = new byte[minBuffSize]; |
| 1100 | // -------- test -------------- |
| 1101 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1102 | assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length + 10) |
| 1103 | == AudioTrack.ERROR_BAD_VALUE); |
| 1104 | // -------- tear down -------------- |
| 1105 | track.release(); |
| 1106 | } |
| 1107 | |
| 1108 | // Test case 4: write() fails when supplying less data (shorts) than |
| 1109 | // declared |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1110 | public void testWriteShortSizeTooBig() throws Exception { |
| 1111 | // constants for test |
| 1112 | final String TEST_NAME = "testWriteShortSizeTooBig"; |
| 1113 | final int TEST_SR = 22050; |
| 1114 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1115 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1116 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1117 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1118 | |
| 1119 | // -------- initialization -------------- |
| 1120 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1121 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1122 | 2 * minBuffSize, TEST_MODE); |
| 1123 | short data[] = new short[minBuffSize / 2]; |
| 1124 | // -------- test -------------- |
| 1125 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1126 | assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length + 10) |
| 1127 | == AudioTrack.ERROR_BAD_VALUE); |
| 1128 | // -------- tear down -------------- |
| 1129 | track.release(); |
| 1130 | } |
| 1131 | |
| 1132 | // Test case 5: write() fails with negative offset |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1133 | public void testWriteByteNegativeOffset() throws Exception { |
| 1134 | // constants for test |
| 1135 | final String TEST_NAME = "testWriteByteNegativeOffset"; |
| 1136 | final int TEST_SR = 22050; |
| 1137 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1138 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1139 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1140 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1141 | |
| 1142 | // -------- initialization -------------- |
| 1143 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1144 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1145 | 2 * minBuffSize, TEST_MODE); |
| 1146 | byte data[] = new byte[minBuffSize]; |
| 1147 | // -------- test -------------- |
| 1148 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1149 | assertTrue(TEST_NAME, track.write(data, OFFSET_NEGATIVE, data.length - 10) == |
| 1150 | AudioTrack.ERROR_BAD_VALUE); |
| 1151 | // -------- tear down -------------- |
| 1152 | track.release(); |
| 1153 | } |
| 1154 | |
| 1155 | // Test case 6: write() fails with negative offset |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1156 | public void testWriteShortNegativeOffset() throws Exception { |
| 1157 | // constants for test |
| 1158 | final String TEST_NAME = "testWriteShortNegativeOffset"; |
| 1159 | final int TEST_SR = 22050; |
| 1160 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1161 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1162 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1163 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1164 | |
| 1165 | // -------- initialization -------------- |
| 1166 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1167 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1168 | 2 * minBuffSize, TEST_MODE); |
| 1169 | short data[] = new short[minBuffSize / 2]; |
| 1170 | // -------- test -------------- |
| 1171 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1172 | assertTrue(TEST_NAME, |
| 1173 | track.write(data, OFFSET_NEGATIVE, data.length - 10) == AudioTrack.ERROR_BAD_VALUE); |
| 1174 | // -------- tear down -------------- |
| 1175 | track.release(); |
| 1176 | } |
| 1177 | |
| 1178 | // Test case 7: write() fails with negative size |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1179 | public void testWriteByteNegativeSize() throws Exception { |
| 1180 | // constants for test |
| 1181 | final String TEST_NAME = "testWriteByteNegativeSize"; |
| 1182 | final int TEST_SR = 22050; |
| 1183 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1184 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1185 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1186 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1187 | |
| 1188 | // -------- initialization -------------- |
| 1189 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1190 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1191 | 2 * minBuffSize, TEST_MODE); |
| 1192 | byte data[] = new byte[minBuffSize]; |
| 1193 | // -------- test -------------- |
| 1194 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1195 | int dataLength = -10; |
| 1196 | assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, dataLength) |
| 1197 | == AudioTrack.ERROR_BAD_VALUE); |
| 1198 | // -------- tear down -------------- |
| 1199 | track.release(); |
| 1200 | } |
| 1201 | |
| 1202 | // Test case 8: write() fails with negative size |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1203 | public void testWriteShortNegativeSize() throws Exception { |
| 1204 | // constants for test |
| 1205 | final String TEST_NAME = "testWriteShortNegativeSize"; |
| 1206 | final int TEST_SR = 22050; |
| 1207 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1208 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1209 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1210 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1211 | |
| 1212 | // -------- initialization -------------- |
| 1213 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1214 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1215 | 2 * minBuffSize, TEST_MODE); |
| 1216 | short data[] = new short[minBuffSize / 2]; |
| 1217 | // -------- test -------------- |
| 1218 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1219 | int dataLength = -10; |
| 1220 | assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, dataLength) |
| 1221 | == AudioTrack.ERROR_BAD_VALUE); |
| 1222 | // -------- tear down -------------- |
| 1223 | track.release(); |
| 1224 | } |
| 1225 | |
| 1226 | // Test case 9: write() succeeds and returns the size that was written for |
| 1227 | // 16bit |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1228 | public void testWriteByte() throws Exception { |
| 1229 | // constants for test |
| 1230 | final String TEST_NAME = "testWriteByte"; |
| 1231 | final int TEST_SR = 22050; |
| 1232 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1233 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1234 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1235 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1236 | |
| 1237 | // -------- initialization -------------- |
| 1238 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1239 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1240 | 2 * minBuffSize, TEST_MODE); |
| 1241 | byte data[] = new byte[minBuffSize]; |
| 1242 | // -------- test -------------- |
| 1243 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1244 | assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length) == data.length); |
| 1245 | // -------- tear down -------------- |
| 1246 | track.release(); |
| 1247 | } |
| 1248 | |
| 1249 | // Test case 10: write() succeeds and returns the size that was written for |
| 1250 | // 16bit |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1251 | public void testWriteShort() throws Exception { |
| 1252 | // constants for test |
| 1253 | final String TEST_NAME = "testWriteShort"; |
| 1254 | final int TEST_SR = 22050; |
| 1255 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1256 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1257 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1258 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1259 | |
| 1260 | // -------- initialization -------------- |
| 1261 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1262 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1263 | 2 * minBuffSize, TEST_MODE); |
| 1264 | short data[] = new short[minBuffSize / 2]; |
| 1265 | // -------- test -------------- |
| 1266 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1267 | assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length) == data.length); |
| 1268 | track.flush(); |
| 1269 | // -------- tear down -------------- |
| 1270 | track.release(); |
| 1271 | } |
| 1272 | |
| 1273 | // Test case 11: write() succeeds and returns the size that was written for |
| 1274 | // 8bit |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1275 | public void testWriteByte8bit() throws Exception { |
| 1276 | // constants for test |
| 1277 | final String TEST_NAME = "testWriteByte8bit"; |
| 1278 | final int TEST_SR = 22050; |
| 1279 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1280 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; |
| 1281 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1282 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1283 | |
| 1284 | // -------- initialization -------------- |
| 1285 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1286 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1287 | 2 * minBuffSize, TEST_MODE); |
| 1288 | byte data[] = new byte[minBuffSize]; |
| 1289 | // -------- test -------------- |
| 1290 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
Glenn Kasten | c19c9a9 | 2013-04-03 17:08:43 -0700 | [diff] [blame] | 1291 | assertEquals(TEST_NAME, data.length, track.write(data, OFFSET_DEFAULT, data.length)); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1292 | // -------- tear down -------------- |
| 1293 | track.release(); |
| 1294 | } |
| 1295 | |
| 1296 | // Test case 12: write() succeeds and returns the size that was written for |
| 1297 | // 8bit |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1298 | public void testWriteShort8bit() throws Exception { |
| 1299 | // constants for test |
| 1300 | final String TEST_NAME = "testWriteShort8bit"; |
| 1301 | final int TEST_SR = 22050; |
| 1302 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1303 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; |
| 1304 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1305 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1306 | |
| 1307 | // -------- initialization -------------- |
| 1308 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1309 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1310 | 2 * minBuffSize, TEST_MODE); |
| 1311 | short data[] = new short[minBuffSize / 2]; |
| 1312 | // -------- test -------------- |
| 1313 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
Glenn Kasten | c19c9a9 | 2013-04-03 17:08:43 -0700 | [diff] [blame] | 1314 | assertEquals(TEST_NAME, data.length, track.write(data, OFFSET_DEFAULT, data.length)); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1315 | // -------- tear down -------------- |
| 1316 | track.release(); |
| 1317 | } |
| 1318 | |
| 1319 | // ----------------------------------------------------------------- |
| 1320 | // Getters |
| 1321 | // ---------------------------------- |
| 1322 | |
| 1323 | // Test case 1: getMinBufferSize() return ERROR_BAD_VALUE if SR < 4000 |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1324 | public void testGetMinBufferSizeTooLowSR() throws Exception { |
| 1325 | // constant for test |
| 1326 | final String TEST_NAME = "testGetMinBufferSizeTooLowSR"; |
| 1327 | final int TEST_SR = 3999; |
| 1328 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1329 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; |
| 1330 | |
| 1331 | // -------- initialization & test -------------- |
| 1332 | assertTrue(TEST_NAME, AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT) == |
| 1333 | AudioTrack.ERROR_BAD_VALUE); |
| 1334 | } |
| 1335 | |
Glenn Kasten | ce4552f | 2014-09-02 09:31:33 -0700 | [diff] [blame] | 1336 | // Test case 2: getMinBufferSize() return ERROR_BAD_VALUE if sample rate too high |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1337 | public void testGetMinBufferSizeTooHighSR() throws Exception { |
| 1338 | // constant for test |
| 1339 | final String TEST_NAME = "testGetMinBufferSizeTooHighSR"; |
Glenn Kasten | ce4552f | 2014-09-02 09:31:33 -0700 | [diff] [blame] | 1340 | // FIXME need an API to retrieve AudioTrack.SAMPLE_RATE_HZ_MAX |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1341 | final int TEST_SR = 192001; |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1342 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1343 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; |
| 1344 | |
| 1345 | // -------- initialization & test -------------- |
| 1346 | assertTrue(TEST_NAME, AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT) == |
| 1347 | AudioTrack.ERROR_BAD_VALUE); |
| 1348 | } |
| 1349 | |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1350 | public void testAudioTrackProperties() throws Exception { |
| 1351 | // constants for test |
| 1352 | final String TEST_NAME = "testAudioTrackProperties"; |
| 1353 | final int TEST_SR = 22050; |
| 1354 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1355 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; |
| 1356 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1357 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1358 | |
| 1359 | // -------- initialization -------------- |
| 1360 | int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 1361 | MockAudioTrack track = new MockAudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, |
| 1362 | TEST_FORMAT, 2 * minBuffSize, TEST_MODE); |
Eric Laurent | 9ab726d | 2010-02-12 09:10:56 -0800 | [diff] [blame] | 1363 | assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1364 | assertEquals(TEST_NAME, TEST_FORMAT, track.getAudioFormat()); |
| 1365 | assertEquals(TEST_NAME, TEST_CONF, track.getChannelConfiguration()); |
| 1366 | assertEquals(TEST_NAME, TEST_SR, track.getSampleRate()); |
| 1367 | assertEquals(TEST_NAME, TEST_STREAM_TYPE, track.getStreamType()); |
| 1368 | final int hannelCount = 1; |
| 1369 | assertEquals(hannelCount, track.getChannelCount()); |
| 1370 | final int notificationMarkerPosition = 0; |
| 1371 | assertEquals(TEST_NAME, notificationMarkerPosition, track.getNotificationMarkerPosition()); |
| 1372 | final int markerInFrames = 2; |
| 1373 | assertEquals(TEST_NAME, AudioTrack.SUCCESS, |
| 1374 | track.setNotificationMarkerPosition(markerInFrames)); |
| 1375 | assertEquals(TEST_NAME, markerInFrames, track.getNotificationMarkerPosition()); |
| 1376 | final int positionNotificationPeriod = 0; |
| 1377 | assertEquals(TEST_NAME, positionNotificationPeriod, track.getPositionNotificationPeriod()); |
| 1378 | final int periodInFrames = 2; |
| 1379 | assertEquals(TEST_NAME, AudioTrack.SUCCESS, |
| 1380 | track.setPositionNotificationPeriod(periodInFrames)); |
| 1381 | assertEquals(TEST_NAME, periodInFrames, track.getPositionNotificationPeriod()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1382 | track.setState(AudioTrack.STATE_NO_STATIC_DATA); |
| 1383 | assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); |
| 1384 | track.setState(AudioTrack.STATE_UNINITIALIZED); |
| 1385 | assertEquals(TEST_NAME, AudioTrack.STATE_UNINITIALIZED, track.getState()); |
Eric Laurent | 9ab726d | 2010-02-12 09:10:56 -0800 | [diff] [blame] | 1386 | int frameCount = 2 * minBuffSize; |
| 1387 | if (TEST_CONF == AudioFormat.CHANNEL_CONFIGURATION_STEREO) { |
| 1388 | frameCount /= 2; |
| 1389 | } |
| 1390 | if (TEST_FORMAT == AudioFormat.ENCODING_PCM_16BIT) { |
| 1391 | frameCount /= 2; |
| 1392 | } |
| 1393 | assertTrue(TEST_NAME, track.getNativeFrameCount() >= frameCount); |
Andy Hung | 7405d52 | 2015-05-19 22:38:23 -0700 | [diff] [blame] | 1394 | assertEquals(TEST_NAME, track.getNativeFrameCount(), track.getBufferSizeInFrames()); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1395 | } |
| 1396 | |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1397 | public void testReloadStaticData() throws Exception { |
| 1398 | // constants for test |
| 1399 | final String TEST_NAME = "testReloadStaticData"; |
| 1400 | final int TEST_SR = 22050; |
| 1401 | final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; |
| 1402 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; |
| 1403 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 1404 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1405 | |
| 1406 | // -------- initialization -------------- |
| 1407 | int bufferSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1408 | byte data[] = AudioHelper.createSoundDataInByteArray( |
| 1409 | bufferSize, TEST_SR, 1024 /* frequency */, 0 /* sweep */); |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1410 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
| 1411 | bufferSize, TEST_MODE); |
| 1412 | // -------- test -------------- |
| 1413 | track.write(data, OFFSET_DEFAULT, bufferSize); |
| 1414 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1415 | track.play(); |
| 1416 | Thread.sleep(WAIT_MSEC); |
| 1417 | track.stop(); |
| 1418 | Thread.sleep(WAIT_MSEC); |
| 1419 | assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.reloadStaticData()); |
| 1420 | track.play(); |
| 1421 | Thread.sleep(WAIT_MSEC); |
| 1422 | track.stop(); |
| 1423 | // -------- tear down -------------- |
| 1424 | track.release(); |
| 1425 | } |
| 1426 | |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1427 | public void testPlayStaticData() throws Exception { |
Ricardo Garcia | 39f3207 | 2015-02-23 15:06:50 -0800 | [diff] [blame] | 1428 | if (!hasAudioOutput()) { |
| 1429 | Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid " |
| 1430 | + "audio output HAL"); |
| 1431 | return; |
| 1432 | } |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1433 | // constants for test |
| 1434 | final String TEST_NAME = "testPlayStaticData"; |
| 1435 | final int TEST_FORMAT_ARRAY[] = { // 6 chirps repeated (TEST_LOOPS+1) times, 3 times |
| 1436 | AudioFormat.ENCODING_PCM_8BIT, |
| 1437 | AudioFormat.ENCODING_PCM_16BIT, |
| 1438 | AudioFormat.ENCODING_PCM_FLOAT, |
| 1439 | }; |
| 1440 | final int TEST_SR_ARRAY[] = { |
| 1441 | 12055, // Note multichannel tracks will sound very short at low sample rates |
| 1442 | 48000, |
| 1443 | }; |
| 1444 | final int TEST_CONF_ARRAY[] = { |
| 1445 | AudioFormat.CHANNEL_OUT_MONO, // 1.0 |
| 1446 | AudioFormat.CHANNEL_OUT_STEREO, // 2.0 |
| 1447 | AudioFormat.CHANNEL_OUT_7POINT1_SURROUND, // 7.1 |
| 1448 | }; |
| 1449 | final int TEST_MODE = AudioTrack.MODE_STATIC; |
| 1450 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1451 | final double TEST_SWEEP = 100; |
| 1452 | final int TEST_LOOPS = 1; |
| 1453 | |
| 1454 | for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { |
| 1455 | double frequency = 400; // frequency changes for each test |
| 1456 | for (int TEST_SR : TEST_SR_ARRAY) { |
| 1457 | for (int TEST_CONF : TEST_CONF_ARRAY) { |
| 1458 | // -------- initialization -------------- |
| 1459 | final int seconds = 1; |
| 1460 | final int channelCount = Integer.bitCount(TEST_CONF); |
| 1461 | final int bufferFrames = seconds * TEST_SR; |
| 1462 | final int bufferSamples = bufferFrames * channelCount; |
| 1463 | final int bufferSize = bufferSamples |
| 1464 | * AudioFormat.getBytesPerSample(TEST_FORMAT); |
| 1465 | final double testFrequency = frequency / channelCount; |
| 1466 | final long MILLISECONDS_PER_SECOND = 1000; |
| 1467 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, |
| 1468 | TEST_CONF, TEST_FORMAT, bufferSize, TEST_MODE); |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1469 | assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1470 | |
| 1471 | // -------- test -------------- |
| 1472 | |
| 1473 | // test setLoopPoints and setPosition can be called here. |
| 1474 | assertEquals(TEST_NAME, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1475 | android.media.AudioTrack.SUCCESS, |
| 1476 | track.setPlaybackHeadPosition(bufferFrames/2)); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1477 | assertEquals(TEST_NAME, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1478 | android.media.AudioTrack.SUCCESS, |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1479 | track.setLoopPoints( |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1480 | 0 /*startInFrames*/, bufferFrames, 10 /*loopCount*/)); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1481 | // only need to write once to the static track |
| 1482 | switch (TEST_FORMAT) { |
| 1483 | case AudioFormat.ENCODING_PCM_8BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1484 | byte data[] = AudioHelper.createSoundDataInByteArray( |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1485 | bufferSamples, TEST_SR, |
| 1486 | testFrequency, TEST_SWEEP); |
| 1487 | assertEquals(TEST_NAME, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1488 | bufferSamples, |
| 1489 | track.write(data, 0 /*offsetInBytes*/, data.length)); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1490 | } break; |
| 1491 | case AudioFormat.ENCODING_PCM_16BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1492 | short data[] = AudioHelper.createSoundDataInShortArray( |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1493 | bufferSamples, TEST_SR, |
| 1494 | testFrequency, TEST_SWEEP); |
| 1495 | assertEquals(TEST_NAME, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1496 | bufferSamples, |
| 1497 | track.write(data, 0 /*offsetInBytes*/, data.length)); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1498 | } break; |
| 1499 | case AudioFormat.ENCODING_PCM_FLOAT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1500 | float data[] = AudioHelper.createSoundDataInFloatArray( |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1501 | bufferSamples, TEST_SR, |
| 1502 | testFrequency, TEST_SWEEP); |
| 1503 | assertEquals(TEST_NAME, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1504 | bufferSamples, |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1505 | track.write(data, 0 /*offsetInBytes*/, data.length, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1506 | AudioTrack.WRITE_BLOCKING)); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1507 | } break; |
| 1508 | } |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1509 | assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1510 | // test setLoopPoints and setPosition can be called here. |
| 1511 | assertEquals(TEST_NAME, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1512 | android.media.AudioTrack.SUCCESS, |
| 1513 | track.setPlaybackHeadPosition(0 /*positionInFrames*/)); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1514 | assertEquals(TEST_NAME, |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1515 | android.media.AudioTrack.SUCCESS, |
| 1516 | track.setLoopPoints(0 /*startInFrames*/, bufferFrames, TEST_LOOPS)); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1517 | |
| 1518 | track.play(); |
| 1519 | Thread.sleep(seconds * MILLISECONDS_PER_SECOND * (TEST_LOOPS + 1)); |
| 1520 | Thread.sleep(WAIT_MSEC); |
| 1521 | |
| 1522 | // Check position after looping. AudioTrack.getPlaybackHeadPosition() returns |
| 1523 | // the running count of frames played, not the actual static buffer position. |
| 1524 | int position = track.getPlaybackHeadPosition(); |
Andy Hung | 60fad0d | 2014-12-29 12:27:21 -0800 | [diff] [blame] | 1525 | assertEquals(TEST_NAME, bufferFrames * (TEST_LOOPS + 1), position); |
Andy Hung | d0697c1 | 2014-11-18 18:42:34 -0800 | [diff] [blame] | 1526 | |
| 1527 | track.stop(); |
| 1528 | Thread.sleep(WAIT_MSEC); |
| 1529 | // -------- tear down -------------- |
| 1530 | track.release(); |
| 1531 | frequency += 70; // increment test tone frequency |
| 1532 | } |
| 1533 | } |
| 1534 | } |
| 1535 | } |
| 1536 | |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 1537 | public void testPlayStreamData() throws Exception { |
| 1538 | // constants for test |
| 1539 | final String TEST_NAME = "testPlayStreamData"; |
Andy Hung | a560619 | 2014-08-15 18:27:56 -0700 | [diff] [blame] | 1540 | final int TEST_FORMAT_ARRAY[] = { // should hear 40 increasing frequency tones, 3 times |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1541 | AudioFormat.ENCODING_PCM_8BIT, |
| 1542 | AudioFormat.ENCODING_PCM_16BIT, |
| 1543 | AudioFormat.ENCODING_PCM_FLOAT, |
| 1544 | }; |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1545 | // due to downmixer algorithmic latency, source channels greater than 2 may |
| 1546 | // sound shorter in duration at 4kHz sampling rate. |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1547 | final int TEST_SR_ARRAY[] = { |
Andy Hung | caef8b1 | 2014-08-07 11:12:16 -0700 | [diff] [blame] | 1548 | 4000, |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1549 | 44100, |
| 1550 | 48000, |
Andy Hung | a560619 | 2014-08-15 18:27:56 -0700 | [diff] [blame] | 1551 | 96000, |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1552 | 192000, |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1553 | }; |
| 1554 | final int TEST_CONF_ARRAY[] = { |
Andy Hung | 15679e5 | 2014-08-11 14:11:57 -0700 | [diff] [blame] | 1555 | AudioFormat.CHANNEL_OUT_MONO, // 1.0 |
| 1556 | AudioFormat.CHANNEL_OUT_STEREO, // 2.0 |
| 1557 | AudioFormat.CHANNEL_OUT_STEREO | AudioFormat.CHANNEL_OUT_FRONT_CENTER, // 3.0 |
| 1558 | AudioFormat.CHANNEL_OUT_QUAD, // 4.0 |
| 1559 | AudioFormat.CHANNEL_OUT_QUAD | AudioFormat.CHANNEL_OUT_FRONT_CENTER, // 5.0 |
| 1560 | AudioFormat.CHANNEL_OUT_5POINT1, // 5.1 |
| 1561 | AudioFormat.CHANNEL_OUT_5POINT1 | AudioFormat.CHANNEL_OUT_BACK_CENTER, // 6.1 |
Andy Hung | a560619 | 2014-08-15 18:27:56 -0700 | [diff] [blame] | 1562 | AudioFormat.CHANNEL_OUT_7POINT1_SURROUND, // 7.1 |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1563 | }; |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1564 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1565 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1566 | final float TEST_SWEEP = 0; // sine wave only |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1567 | |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1568 | for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { |
Andy Hung | caef8b1 | 2014-08-07 11:12:16 -0700 | [diff] [blame] | 1569 | double frequency = 400; // frequency changes for each test |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1570 | for (int TEST_SR : TEST_SR_ARRAY) { |
| 1571 | for (int TEST_CONF : TEST_CONF_ARRAY) { |
| 1572 | // -------- initialization -------------- |
Andy Hung | caef8b1 | 2014-08-07 11:12:16 -0700 | [diff] [blame] | 1573 | final int minBufferSize = AudioTrack.getMinBufferSize(TEST_SR, |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1574 | TEST_CONF, TEST_FORMAT); // in bytes |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1575 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, |
| 1576 | TEST_CONF, TEST_FORMAT, minBufferSize, TEST_MODE); |
| 1577 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1578 | |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1579 | // compute parameters for the source signal data. |
| 1580 | final int channelCount = Integer.bitCount(TEST_CONF); |
| 1581 | AudioFormat format = track.getFormat(); |
| 1582 | assertEquals(TEST_NAME, TEST_SR, format.getSampleRate()); |
| 1583 | assertEquals(TEST_NAME, TEST_CONF, format.getChannelMask()); |
| 1584 | assertEquals(TEST_NAME, channelCount, format.getChannelCount()); |
| 1585 | assertEquals(TEST_NAME, TEST_FORMAT, format.getEncoding()); |
| 1586 | final int sourceSamples = channelCount |
| 1587 | * AudioHelper.frameCountFromMsec(500, |
| 1588 | format); // duration of test tones |
| 1589 | final double testFrequency = frequency / channelCount; |
| 1590 | |
| 1591 | int written = 0; |
| 1592 | // For streaming tracks, it's ok to issue the play() command |
| 1593 | // before any audio is written. |
| 1594 | track.play(); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1595 | // -------- test -------------- |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1596 | |
| 1597 | // samplesPerWrite can be any positive value. |
| 1598 | // We prefer this to be a multiple of channelCount so write() |
| 1599 | // does not return a short count. |
| 1600 | // If samplesPerWrite is very large, it is limited to the data length |
| 1601 | // and we simply write (blocking) the entire source data and not even loop. |
| 1602 | // We choose a value here which simulates double buffer writes. |
| 1603 | final int buffers = 2; // double buffering mode |
| 1604 | final int samplesPerWrite = |
Andy Hung | 7405d52 | 2015-05-19 22:38:23 -0700 | [diff] [blame] | 1605 | (track.getBufferSizeInFrames() / buffers) * channelCount; |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1606 | switch (TEST_FORMAT) { |
| 1607 | case AudioFormat.ENCODING_PCM_8BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1608 | byte data[] = AudioHelper.createSoundDataInByteArray( |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1609 | sourceSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1610 | testFrequency, TEST_SWEEP); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1611 | while (written < data.length) { |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1612 | int samples = Math.min(data.length - written, samplesPerWrite); |
| 1613 | int ret = track.write(data, written, samples); |
| 1614 | assertEquals(TEST_NAME, samples, ret); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1615 | written += ret; |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1616 | } |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1617 | } break; |
| 1618 | case AudioFormat.ENCODING_PCM_16BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1619 | short data[] = AudioHelper.createSoundDataInShortArray( |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1620 | sourceSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1621 | testFrequency, TEST_SWEEP); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1622 | while (written < data.length) { |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1623 | int samples = Math.min(data.length - written, samplesPerWrite); |
| 1624 | int ret = track.write(data, written, samples); |
| 1625 | assertEquals(TEST_NAME, samples, ret); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1626 | written += ret; |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1627 | } |
| 1628 | } break; |
| 1629 | case AudioFormat.ENCODING_PCM_FLOAT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1630 | float data[] = AudioHelper.createSoundDataInFloatArray( |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1631 | sourceSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1632 | testFrequency, TEST_SWEEP); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1633 | while (written < data.length) { |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1634 | int samples = Math.min(data.length - written, samplesPerWrite); |
| 1635 | int ret = track.write(data, written, samples, |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1636 | AudioTrack.WRITE_BLOCKING); |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1637 | assertEquals(TEST_NAME, samples, ret); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1638 | written += ret; |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1639 | } |
| 1640 | } break; |
| 1641 | } |
| 1642 | |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1643 | // For streaming tracks, AudioTrack.stop() doesn't immediately stop playback. |
| 1644 | // Rather, it allows the remaining data in the internal buffer to drain. |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1645 | track.stop(); |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1646 | Thread.sleep(WAIT_MSEC); // wait for the data to drain. |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1647 | // -------- tear down -------------- |
| 1648 | track.release(); |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1649 | frequency += 50; // increment test tone frequency |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1650 | } |
| 1651 | } |
| 1652 | } |
| 1653 | } |
| 1654 | |
| 1655 | public void testPlayStreamByteBuffer() throws Exception { |
| 1656 | // constants for test |
| 1657 | final String TEST_NAME = "testPlayStreamByteBuffer"; |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1658 | final int TEST_FORMAT_ARRAY[] = { // should hear 4 tones played 3 times |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1659 | AudioFormat.ENCODING_PCM_8BIT, |
| 1660 | AudioFormat.ENCODING_PCM_16BIT, |
| 1661 | AudioFormat.ENCODING_PCM_FLOAT, |
| 1662 | }; |
| 1663 | final int TEST_SR_ARRAY[] = { |
| 1664 | 48000, |
| 1665 | }; |
| 1666 | final int TEST_CONF_ARRAY[] = { |
| 1667 | AudioFormat.CHANNEL_OUT_STEREO, |
| 1668 | }; |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1669 | final int TEST_WRITE_MODE_ARRAY[] = { |
| 1670 | AudioTrack.WRITE_BLOCKING, |
| 1671 | AudioTrack.WRITE_NON_BLOCKING, |
| 1672 | }; |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1673 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1674 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1675 | final float TEST_SWEEP = 0; // sine wave only |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1676 | |
| 1677 | for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { |
| 1678 | double frequency = 800; // frequency changes for each test |
| 1679 | for (int TEST_SR : TEST_SR_ARRAY) { |
| 1680 | for (int TEST_CONF : TEST_CONF_ARRAY) { |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1681 | for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) { |
| 1682 | for (int useDirect = 0; useDirect < 2; ++useDirect) { |
| 1683 | // -------- initialization -------------- |
| 1684 | int minBufferSize = AudioTrack.getMinBufferSize(TEST_SR, |
| 1685 | TEST_CONF, TEST_FORMAT); // in bytes |
| 1686 | int bufferSize = 12 * minBufferSize; |
| 1687 | int bufferSamples = bufferSize |
| 1688 | / AudioFormat.getBytesPerSample(TEST_FORMAT); |
Andy Hung | 581ea15 | 2015-05-14 18:45:04 -0700 | [diff] [blame] | 1689 | |
| 1690 | // create audio track and confirm settings |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1691 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, |
| 1692 | TEST_CONF, TEST_FORMAT, minBufferSize, TEST_MODE); |
Andy Hung | 581ea15 | 2015-05-14 18:45:04 -0700 | [diff] [blame] | 1693 | assertEquals(TEST_NAME + ": state", |
| 1694 | AudioTrack.STATE_INITIALIZED, track.getState()); |
| 1695 | assertEquals(TEST_NAME + ": sample rate", |
| 1696 | TEST_SR, track.getSampleRate()); |
| 1697 | assertEquals(TEST_NAME + ": channel mask", |
| 1698 | TEST_CONF, track.getChannelConfiguration()); |
| 1699 | assertEquals(TEST_NAME + ": encoding", |
| 1700 | TEST_FORMAT, track.getAudioFormat()); |
| 1701 | |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1702 | ByteBuffer bb = (useDirect == 1) |
| 1703 | ? ByteBuffer.allocateDirect(bufferSize) |
| 1704 | : ByteBuffer.allocate(bufferSize); |
| 1705 | bb.order(java.nio.ByteOrder.nativeOrder()); |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1706 | |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1707 | // -------- test -------------- |
| 1708 | switch (TEST_FORMAT) { |
| 1709 | case AudioFormat.ENCODING_PCM_8BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1710 | byte data[] = AudioHelper.createSoundDataInByteArray( |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1711 | bufferSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1712 | frequency, TEST_SWEEP); |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1713 | bb.put(data); |
| 1714 | bb.flip(); |
| 1715 | } break; |
| 1716 | case AudioFormat.ENCODING_PCM_16BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1717 | short data[] = AudioHelper.createSoundDataInShortArray( |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1718 | bufferSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1719 | frequency, TEST_SWEEP); |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1720 | ShortBuffer sb = bb.asShortBuffer(); |
| 1721 | sb.put(data); |
| 1722 | bb.limit(sb.limit() * 2); |
| 1723 | } break; |
| 1724 | case AudioFormat.ENCODING_PCM_FLOAT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1725 | float data[] = AudioHelper.createSoundDataInFloatArray( |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1726 | bufferSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1727 | frequency, TEST_SWEEP); |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1728 | FloatBuffer fb = bb.asFloatBuffer(); |
| 1729 | fb.put(data); |
| 1730 | bb.limit(fb.limit() * 4); |
| 1731 | } break; |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1732 | } |
Andy Hung | 4045c90 | 2014-07-08 15:30:20 -0700 | [diff] [blame] | 1733 | |
Andy Hung | c41a7db | 2015-04-25 23:30:24 -0700 | [diff] [blame] | 1734 | boolean hasPlayed = false; |
| 1735 | int written = 0; |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1736 | while (written < bufferSize) { |
| 1737 | int ret = track.write(bb, |
| 1738 | Math.min(bufferSize - written, minBufferSize), |
| 1739 | TEST_WRITE_MODE); |
| 1740 | assertTrue(TEST_NAME, ret >= 0); |
| 1741 | written += ret; |
| 1742 | if (!hasPlayed) { |
| 1743 | track.play(); |
| 1744 | hasPlayed = true; |
| 1745 | } |
| 1746 | } |
| 1747 | |
Andy Hung | 28f648a | 2014-10-06 13:53:09 -0700 | [diff] [blame] | 1748 | track.stop(); |
| 1749 | Thread.sleep(WAIT_MSEC); |
| 1750 | // -------- tear down -------------- |
| 1751 | track.release(); |
| 1752 | frequency += 200; // increment test tone frequency |
| 1753 | } |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1754 | } |
| 1755 | } |
Andy Hung | 869ddce | 2014-06-18 09:57:15 -0700 | [diff] [blame] | 1756 | } |
| 1757 | } |
| 1758 | } |
| 1759 | |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1760 | public void testPlayChannelIndexStreamBuffer() throws Exception { |
| 1761 | // should hear 4 tones played 3 or 4 times depending |
| 1762 | // on the device output capabilities (e.g. stereo or 5.1 or otherwise) |
| 1763 | final String TEST_NAME = "testPlayChannelIndexStreamBuffer"; |
| 1764 | final int TEST_FORMAT_ARRAY[] = { |
| 1765 | AudioFormat.ENCODING_PCM_8BIT, |
| 1766 | //AudioFormat.ENCODING_PCM_16BIT, |
| 1767 | //AudioFormat.ENCODING_PCM_FLOAT, |
| 1768 | }; |
| 1769 | final int TEST_SR_ARRAY[] = { |
| 1770 | 48000, |
| 1771 | }; |
| 1772 | // The following channel index masks are iterated over and route |
| 1773 | // the AudioTrack channels to the output sink channels based on |
| 1774 | // the set bits in counting order (lsb to msb). |
| 1775 | // |
| 1776 | // For a stereo output sink, the sound may come from L and R, L only, none, or R only. |
| 1777 | // For a 5.1 output sink, the sound may come from a variety of outputs |
| 1778 | // as commented below. |
| 1779 | final int TEST_CONF_ARRAY[] = { // matches output sink channels: |
| 1780 | (1 << 0) | (1 << 1), // Stereo(L, R) 5.1(FL, FR) |
| 1781 | (1 << 0) | (1 << 2), // Stereo(L) 5.1(FL, FC) |
| 1782 | (1 << 4) | (1 << 5), // Stereo(None) 5.1(BL, BR) |
| 1783 | (1 << 1) | (1 << 2), // Stereo(R) 5.1(FR, FC) |
| 1784 | }; |
| 1785 | final int TEST_WRITE_MODE_ARRAY[] = { |
| 1786 | AudioTrack.WRITE_BLOCKING, |
| 1787 | AudioTrack.WRITE_NON_BLOCKING, |
| 1788 | }; |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1789 | final float TEST_SWEEP = 0; |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1790 | |
| 1791 | for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { |
| 1792 | for (int TEST_CONF : TEST_CONF_ARRAY) { |
| 1793 | double frequency = 800; // frequency changes for each test |
| 1794 | for (int TEST_SR : TEST_SR_ARRAY) { |
| 1795 | for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) { |
| 1796 | for (int useDirect = 0; useDirect < 2; ++useDirect) { |
| 1797 | AudioFormat format = new AudioFormat.Builder() |
| 1798 | .setEncoding(TEST_FORMAT) |
| 1799 | .setSampleRate(TEST_SR) |
| 1800 | .setChannelIndexMask(TEST_CONF) |
| 1801 | .build(); |
| 1802 | AudioTrack track = new AudioTrack.Builder() |
| 1803 | .setAudioFormat(format) |
| 1804 | .build(); |
| 1805 | assertEquals(TEST_NAME, |
| 1806 | AudioTrack.STATE_INITIALIZED, track.getState()); |
| 1807 | |
| 1808 | // create the byte buffer and fill with test data |
| 1809 | final int frameSize = AudioHelper.frameSizeFromFormat(format); |
| 1810 | final int frameCount = |
| 1811 | AudioHelper.frameCountFromMsec(300 /* ms */, format); |
| 1812 | final int bufferSize = frameCount * frameSize; |
| 1813 | final int bufferSamples = frameCount * format.getChannelCount(); |
| 1814 | ByteBuffer bb = (useDirect == 1) |
| 1815 | ? ByteBuffer.allocateDirect(bufferSize) |
| 1816 | : ByteBuffer.allocate(bufferSize); |
| 1817 | bb.order(java.nio.ByteOrder.nativeOrder()); |
| 1818 | |
| 1819 | switch (TEST_FORMAT) { |
| 1820 | case AudioFormat.ENCODING_PCM_8BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1821 | byte data[] = AudioHelper.createSoundDataInByteArray( |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1822 | bufferSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1823 | frequency, TEST_SWEEP); |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1824 | bb.put(data); |
| 1825 | bb.flip(); |
| 1826 | } break; |
| 1827 | case AudioFormat.ENCODING_PCM_16BIT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1828 | short data[] = AudioHelper.createSoundDataInShortArray( |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1829 | bufferSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1830 | frequency, TEST_SWEEP); |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1831 | ShortBuffer sb = bb.asShortBuffer(); |
| 1832 | sb.put(data); |
| 1833 | bb.limit(sb.limit() * 2); |
| 1834 | } break; |
| 1835 | case AudioFormat.ENCODING_PCM_FLOAT: { |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1836 | float data[] = AudioHelper.createSoundDataInFloatArray( |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1837 | bufferSamples, TEST_SR, |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 1838 | frequency, TEST_SWEEP); |
Andy Hung | d85d98e | 2015-01-28 19:35:44 -0800 | [diff] [blame] | 1839 | FloatBuffer fb = bb.asFloatBuffer(); |
| 1840 | fb.put(data); |
| 1841 | bb.limit(fb.limit() * 4); |
| 1842 | } break; |
| 1843 | } |
| 1844 | |
| 1845 | // start the AudioTrack |
| 1846 | // This can be done before or after the first write. |
| 1847 | // Current behavior for streaming tracks is that |
| 1848 | // actual playback does not begin before the internal |
| 1849 | // data buffer is completely full. |
| 1850 | track.play(); |
| 1851 | |
| 1852 | // write data |
| 1853 | final long startTime = System.currentTimeMillis(); |
| 1854 | final long maxDuration = frameCount * 1000 / TEST_SR + 1000; |
| 1855 | for (int written = 0; written < bufferSize; ) { |
| 1856 | // ret may return a short count if write |
| 1857 | // is non blocking or even if write is blocking |
| 1858 | // when a stop/pause/flush is issued from another thread. |
| 1859 | final int kBatchFrames = 1000; |
| 1860 | int ret = track.write(bb, |
| 1861 | Math.min(bufferSize - written, frameSize * kBatchFrames), |
| 1862 | TEST_WRITE_MODE); |
| 1863 | // for non-blocking mode, this loop may spin quickly |
| 1864 | assertTrue(TEST_NAME + ": write error " + ret, ret >= 0); |
| 1865 | assertTrue(TEST_NAME + ": write timeout", |
| 1866 | (System.currentTimeMillis() - startTime) <= maxDuration); |
| 1867 | written += ret; |
| 1868 | } |
| 1869 | |
| 1870 | // for streaming tracks, stop will allow the rest of the data to |
| 1871 | // drain out, but we don't know how long to wait unless |
| 1872 | // we check the position before stop. if we check position |
| 1873 | // after we stop, we read 0. |
| 1874 | final int position = track.getPlaybackHeadPosition(); |
| 1875 | final int remainingTimeMs = (int)((double)(frameCount - position) |
| 1876 | * 1000 / TEST_SR); |
| 1877 | track.stop(); |
| 1878 | Thread.sleep(remainingTimeMs); |
| 1879 | // tear down |
| 1880 | track.release(); |
| 1881 | // add a gap to make tones distinct |
| 1882 | Thread.sleep(100 /* millis */); |
| 1883 | frequency += 200; // increment test tone frequency |
| 1884 | } |
| 1885 | } |
| 1886 | } |
| 1887 | } |
| 1888 | } |
| 1889 | } |
| 1890 | |
Jason Parks | 62563bc | 2014-11-07 14:52:56 -0600 | [diff] [blame] | 1891 | private boolean hasAudioOutput() { |
| 1892 | return getContext().getPackageManager() |
| 1893 | .hasSystemFeature(PackageManager.FEATURE_AUDIO_OUTPUT); |
| 1894 | } |
| 1895 | |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1896 | public void testGetTimestamp() throws Exception { |
Jason Parks | 62563bc | 2014-11-07 14:52:56 -0600 | [diff] [blame] | 1897 | if (!hasAudioOutput()) { |
Ricardo Garcia | 39f3207 | 2015-02-23 15:06:50 -0800 | [diff] [blame] | 1898 | Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid " |
| 1899 | + "audio output HAL"); |
Jason Parks | 62563bc | 2014-11-07 14:52:56 -0600 | [diff] [blame] | 1900 | return; |
| 1901 | } |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1902 | // constants for test |
| 1903 | final String TEST_NAME = "testGetTimestamp"; |
| 1904 | final int TEST_SR = 22050; |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1905 | final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1906 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; |
| 1907 | final int TEST_MODE = AudioTrack.MODE_STREAM; |
| 1908 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 1909 | final int TEST_LOOP_CNT = 10; |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1910 | // For jitter we allow 30 msec in frames. This is a large margin. |
| 1911 | // Often this is just 0 or 1 frames, but that can depend on hardware. |
| 1912 | final int TEST_JITTER_FRAMES_ALLOWED = TEST_SR * 30 / 1000; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1913 | |
| 1914 | // -------- initialization -------------- |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1915 | final int bytesPerFrame = |
| 1916 | AudioFormat.getBytesPerSample(TEST_FORMAT) |
| 1917 | * AudioFormat.channelCountFromOutChannelMask(TEST_CONF); |
| 1918 | final int minBufferSizeInBytes = |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1919 | AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1920 | final int bufferSizeInBytes = minBufferSizeInBytes * 3; |
| 1921 | byte[] data = new byte[bufferSizeInBytes]; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1922 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1923 | minBufferSizeInBytes, TEST_MODE); |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1924 | // -------- test -------------- |
| 1925 | assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); |
| 1926 | |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1927 | AudioTimestamp timestamp = new AudioTimestamp(); |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1928 | boolean hasPlayed = false; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1929 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1930 | long framesWritten = 0, lastFramesPresented = 0, lastFramesPresentedAt = 0; |
| 1931 | int cumulativeJitterCount = 0; |
| 1932 | float cumulativeJitter = 0; |
| 1933 | float maxJitter = 0; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1934 | for (int i = 0; i < TEST_LOOP_CNT; i++) { |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1935 | final long writeTime = System.nanoTime(); |
| 1936 | |
| 1937 | for (int written = 0; written < data.length;) { |
| 1938 | int ret = track.write(data, written, |
| 1939 | Math.min(data.length - written, minBufferSizeInBytes)); |
| 1940 | assertTrue(TEST_NAME, ret >= 0); |
| 1941 | written += ret; |
| 1942 | if (!hasPlayed) { |
| 1943 | track.play(); |
| 1944 | hasPlayed = true; |
| 1945 | } |
| 1946 | } |
| 1947 | framesWritten += data.length / bytesPerFrame; |
| 1948 | |
| 1949 | // track.getTimestamp may return false if there are no physical HAL outputs. |
| 1950 | // This may occur on TV devices without connecting an HDMI monitor. |
| 1951 | // It may also be true immediately after start-up, as the mixing thread could |
| 1952 | // be idle, but since we've already pushed much more than the minimum buffer size, |
| 1953 | // that is unlikely. |
| 1954 | // Nevertheless, we don't want to have unnecessary failures, so we ignore the |
| 1955 | // first iteration if we don't get a timestamp. |
| 1956 | final boolean result = track.getTimestamp(timestamp); |
| 1957 | assertTrue(TEST_NAME, result || i == 0); |
| 1958 | if (!result) { |
| 1959 | continue; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1960 | } |
| 1961 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1962 | final long framesPresented = timestamp.framePosition; |
| 1963 | final long framesPresentedAt = timestamp.nanoTime; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1964 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1965 | // We read timestamp here to ensure that seen is greater than presented. |
| 1966 | // This is an "on-the-fly" read without pausing because pausing may cause the |
| 1967 | // timestamp to become stale and affect our jitter measurements. |
| 1968 | final int framesSeen = track.getPlaybackHeadPosition(); |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1969 | assertTrue(TEST_NAME, framesWritten >= framesSeen); |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1970 | assertTrue(TEST_NAME, framesSeen >= framesPresented); |
| 1971 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1972 | if (i > 1) { // need delta info from previous iteration (skipping first) |
| 1973 | final long deltaFrames = framesPresented - lastFramesPresented; |
| 1974 | final long deltaTime = framesPresentedAt - lastFramesPresentedAt; |
| 1975 | final long NANOSECONDS_PER_SECOND = 1000000000; |
| 1976 | final long expectedFrames = deltaTime * TEST_SR / NANOSECONDS_PER_SECOND; |
| 1977 | final long jitterFrames = Math.abs(deltaFrames - expectedFrames); |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1978 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1979 | //Log.d(TAG, "framesWritten(" + framesWritten |
| 1980 | // + ") framesSeen(" + framesSeen |
| 1981 | // + ") framesPresented(" + framesPresented |
| 1982 | // + ") jitter(" + jitterFrames + ")"); |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 1983 | |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 1984 | // We check that the timestamp position is reasonably accurate. |
| 1985 | assertTrue(TEST_NAME, deltaTime >= 0); |
| 1986 | assertTrue(TEST_NAME, deltaFrames >= 0); |
| 1987 | if (i > 2) { |
| 1988 | // The first two periods may have inherent jitter as the audio pipe |
| 1989 | // is filling up. We check jitter only after that. |
| 1990 | assertTrue(TEST_NAME, jitterFrames < TEST_JITTER_FRAMES_ALLOWED); |
| 1991 | cumulativeJitter += jitterFrames; |
| 1992 | cumulativeJitterCount++; |
| 1993 | if (jitterFrames > maxJitter) { |
| 1994 | maxJitter = jitterFrames; |
| 1995 | } |
| 1996 | } |
| 1997 | |
| 1998 | //Log.d(TAG, "lastFramesPresentedAt(" + lastFramesPresentedAt |
| 1999 | // + ") writeTime(" + writeTime |
| 2000 | // + ") framesPresentedAt(" + framesPresentedAt + ")"); |
| 2001 | |
| 2002 | // We check that the timestamp time is reasonably current. |
| 2003 | assertTrue(TEST_NAME, framesPresentedAt >= writeTime); |
| 2004 | assertTrue(TEST_NAME, writeTime >= lastFramesPresentedAt); |
| 2005 | } |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 2006 | lastFramesPresented = framesPresented; |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 2007 | lastFramesPresentedAt = framesPresentedAt; |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 2008 | } |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 2009 | // Full drain. |
| 2010 | Thread.sleep(WAIT_MSEC); |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 2011 | track.stop(); |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 2012 | Thread.sleep(WAIT_MSEC); |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 2013 | track.release(); |
Andy Hung | 07ed4e6 | 2014-09-19 11:16:42 -0700 | [diff] [blame] | 2014 | // Log the average jitter |
| 2015 | if (cumulativeJitterCount > 0) { |
| 2016 | ReportLog log = getReportLog(); |
| 2017 | final float averageJitterInFrames = cumulativeJitter / cumulativeJitterCount; |
| 2018 | final float averageJitterInMs = averageJitterInFrames * 1000 / TEST_SR; |
| 2019 | final float maxJitterInMs = maxJitter * 1000 / TEST_SR; |
| 2020 | // ReportLog needs at least one Value and Summary. |
| 2021 | log.printValue("Maximum Jitter", maxJitterInMs, |
| 2022 | ResultType.LOWER_BETTER, ResultUnit.MS); |
| 2023 | log.printSummary("Average Jitter", averageJitterInMs, |
| 2024 | ResultType.LOWER_BETTER, ResultUnit.MS); |
| 2025 | } |
Robert Shih | 1cf9253 | 2014-03-24 16:00:23 -0700 | [diff] [blame] | 2026 | } |
| 2027 | |
Andy Hung | 88e2723 | 2015-01-09 16:14:28 -0800 | [diff] [blame] | 2028 | public void testVariableRatePlayback() throws Exception { |
| 2029 | final String TEST_NAME = "testVariableRatePlayback"; |
| 2030 | final int TEST_SR = 24000; |
| 2031 | final int TEST_FINAL_SR = 96000; |
| 2032 | final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO; |
| 2033 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; // required for test |
| 2034 | final int TEST_MODE = AudioTrack.MODE_STATIC; // required for test |
| 2035 | final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; |
| 2036 | |
| 2037 | final int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); |
| 2038 | final int bufferSizeInBytes = minBuffSize * 100; |
| 2039 | final int numChannels = AudioFormat.channelCountFromOutChannelMask(TEST_CONF); |
| 2040 | final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT); |
| 2041 | final int bytesPerFrame = numChannels * bytesPerSample; |
| 2042 | final int frameCount = bufferSizeInBytes / bytesPerFrame; |
| 2043 | |
| 2044 | AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, |
| 2045 | TEST_FORMAT, bufferSizeInBytes, TEST_MODE); |
| 2046 | |
| 2047 | // create byte array and write it |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 2048 | byte[] vai = AudioHelper.createSoundDataInByteArray(bufferSizeInBytes, TEST_SR, |
| 2049 | 600 /* frequency */, 0 /* sweep */); |
Andy Hung | 88e2723 | 2015-01-09 16:14:28 -0800 | [diff] [blame] | 2050 | assertEquals(vai.length, track.write(vai, 0 /* offsetInBytes */, vai.length)); |
| 2051 | |
| 2052 | // sweep up test and sweep down test |
| 2053 | int[] sampleRates = {TEST_SR, TEST_FINAL_SR}; |
| 2054 | int[] deltaMss = {10, 10}; |
| 2055 | int[] deltaFreqs = {200, -200}; |
| 2056 | |
| 2057 | for (int i = 0; i < 2; ++i) { |
| 2058 | int remainingTime; |
| 2059 | int sampleRate = sampleRates[i]; |
| 2060 | final int deltaMs = deltaMss[i]; |
| 2061 | final int deltaFreq = deltaFreqs[i]; |
| 2062 | final int lastCheckMs = 500; // check the last 500 ms |
| 2063 | |
| 2064 | assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackRate(sampleRate)); |
| 2065 | track.play(); |
| 2066 | do { |
| 2067 | Thread.sleep(deltaMs); |
| 2068 | final int position = track.getPlaybackHeadPosition(); |
| 2069 | sampleRate += deltaFreq; |
| 2070 | sampleRate = Math.min(TEST_FINAL_SR, Math.max(TEST_SR, sampleRate)); |
| 2071 | assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackRate(sampleRate)); |
| 2072 | remainingTime = (int)((double)(frameCount - position) * 1000 |
| 2073 | / sampleRate / bytesPerFrame); |
| 2074 | } while (remainingTime >= lastCheckMs + deltaMs); |
| 2075 | |
| 2076 | // ensure the final frequency set is constant and plays frames as expected |
| 2077 | final int position1 = track.getPlaybackHeadPosition(); |
| 2078 | Thread.sleep(lastCheckMs); |
| 2079 | final int position2 = track.getPlaybackHeadPosition(); |
| 2080 | |
| 2081 | final int tolerance60MsInFrames = sampleRate * 60 / 1000; |
| 2082 | final int expected = lastCheckMs * sampleRate / 1000; |
| 2083 | final int actual = position2 - position1; |
| 2084 | |
| 2085 | // Log.d(TAG, "Variable Playback: expected(" + expected + ") actual(" + actual |
| 2086 | // + ") diff(" + (expected - actual) + ")"); |
| 2087 | assertEquals(expected, actual, tolerance60MsInFrames); |
| 2088 | track.stop(); |
| 2089 | } |
| 2090 | track.release(); |
| 2091 | } |
| 2092 | |
Andy Hung | 9a81d63 | 2015-04-17 18:13:10 -0700 | [diff] [blame] | 2093 | public void testVariableSpeedPlayback() throws Exception { |
| 2094 | final String TEST_NAME = "testVariableSpeedPlayback"; |
| 2095 | final int TEST_FORMAT = AudioFormat.ENCODING_PCM_FLOAT; // required for test |
| 2096 | final int TEST_MODE = AudioTrack.MODE_STATIC; // required for test |
| 2097 | final int TEST_SR = 48000; |
| 2098 | |
| 2099 | AudioFormat format = new AudioFormat.Builder() |
| 2100 | //.setChannelIndexMask((1 << 0)) // output to first channel, FL |
| 2101 | .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) |
| 2102 | .setEncoding(TEST_FORMAT) |
| 2103 | .setSampleRate(TEST_SR) |
| 2104 | .build(); |
| 2105 | |
| 2106 | // create track |
| 2107 | final int frameCount = AudioHelper.frameCountFromMsec(100 /*ms*/, format); |
| 2108 | final int frameSize = AudioHelper.frameSizeFromFormat(format); |
| 2109 | AudioTrack track = new AudioTrack.Builder() |
| 2110 | .setAudioFormat(format) |
| 2111 | .setBufferSizeInBytes(frameCount * frameSize) |
| 2112 | .setTransferMode(TEST_MODE) |
| 2113 | .build(); |
| 2114 | |
| 2115 | // create float array and write it |
| 2116 | final int sampleCount = frameCount * format.getChannelCount(); |
Andy Hung | 781c7b5 | 2015-05-14 22:44:52 -0700 | [diff] [blame] | 2117 | float[] vaf = AudioHelper.createSoundDataInFloatArray( |
| 2118 | sampleCount, TEST_SR, 600 /* frequency */, 0 /* sweep */); |
Andy Hung | 9a81d63 | 2015-04-17 18:13:10 -0700 | [diff] [blame] | 2119 | assertEquals(vaf.length, track.write(vaf, 0 /* offsetInFloats */, vaf.length, |
| 2120 | AudioTrack.WRITE_NON_BLOCKING)); |
| 2121 | |
| 2122 | // sweep speed and pitch |
| 2123 | final float[][][] speedAndPitch = { |
| 2124 | // { {speedStart, pitchStart} {speedEnd, pitchEnd} } |
| 2125 | { {0.5f, 0.5f}, {2.0f, 2.0f} }, // speed by SR conversion (chirp) |
| 2126 | { {0.5f, 1.0f}, {2.0f, 1.0f} }, // speed by time stretch (constant pitch) |
| 2127 | { {1.0f, 0.5f}, {1.0f, 2.0f} }, // pitch by SR conversion (chirp) |
| 2128 | }; |
| 2129 | |
Andy Hung | 581ea15 | 2015-05-14 18:45:04 -0700 | [diff] [blame] | 2130 | // sanity test that playback params works as expected |
| 2131 | PlaybackParams params = new PlaybackParams().allowDefaults(); |
| 2132 | assertEquals(TEST_NAME, 1.0f, params.getSpeed()); |
| 2133 | assertEquals(TEST_NAME, 1.0f, params.getPitch()); |
| 2134 | assertEquals(TEST_NAME, |
| 2135 | params.AUDIO_FALLBACK_MODE_DEFAULT, |
| 2136 | params.getAudioFallbackMode()); |
| 2137 | track.setPlaybackParams(params); // OK |
| 2138 | params.setAudioFallbackMode(params.AUDIO_FALLBACK_MODE_FAIL); |
| 2139 | assertEquals(TEST_NAME, |
| 2140 | params.AUDIO_FALLBACK_MODE_FAIL, params.getAudioFallbackMode()); |
| 2141 | params.setPitch(0.0f); |
| 2142 | try { |
| 2143 | track.setPlaybackParams(params); |
| 2144 | fail("IllegalArgumentException should be thrown on out of range data"); |
| 2145 | } catch (IllegalArgumentException e) { |
| 2146 | ; // expect this is invalid |
| 2147 | } |
| 2148 | // on failure, the AudioTrack params should not change. |
| 2149 | PlaybackParams paramCheck = track.getPlaybackParams(); |
| 2150 | assertEquals(TEST_NAME, |
| 2151 | paramCheck.AUDIO_FALLBACK_MODE_DEFAULT, paramCheck.getAudioFallbackMode()); |
| 2152 | assertEquals(TEST_NAME, |
| 2153 | 1.0f, paramCheck.getPitch()); |
| 2154 | |
| 2155 | // now try to see if we can do extreme pitch correction that should probably be muted. |
| 2156 | params.setAudioFallbackMode(params.AUDIO_FALLBACK_MODE_MUTE); |
| 2157 | assertEquals(TEST_NAME, |
| 2158 | params.AUDIO_FALLBACK_MODE_MUTE, params.getAudioFallbackMode()); |
| 2159 | params.setPitch(0.1f); |
| 2160 | track.setPlaybackParams(params); // OK |
| 2161 | |
| 2162 | // now do our actual playback |
Andy Hung | 9a81d63 | 2015-04-17 18:13:10 -0700 | [diff] [blame] | 2163 | final int TEST_TIME_MS = 2000; |
| 2164 | final int TEST_DELTA_MS = 100; |
| 2165 | final int testSteps = TEST_TIME_MS / TEST_DELTA_MS; |
| 2166 | |
| 2167 | for (int i = 0; i < speedAndPitch.length; ++i) { |
| 2168 | final float speedStart = speedAndPitch[i][0][0]; |
| 2169 | final float pitchStart = speedAndPitch[i][0][1]; |
| 2170 | final float speedEnd = speedAndPitch[i][1][0]; |
| 2171 | final float pitchEnd = speedAndPitch[i][1][1]; |
| 2172 | final float speedInc = (speedEnd - speedStart) / testSteps; |
| 2173 | final float pitchInc = (pitchEnd - pitchStart) / testSteps; |
| 2174 | |
Wei Jia | 86ab058 | 2015-05-08 15:27:46 -0700 | [diff] [blame] | 2175 | PlaybackParams playbackParams = new PlaybackParams() |
Andy Hung | 9a81d63 | 2015-04-17 18:13:10 -0700 | [diff] [blame] | 2176 | .setPitch(pitchStart) |
| 2177 | .setSpeed(speedStart) |
| 2178 | .allowDefaults(); |
| 2179 | |
| 2180 | // set track in infinite loop to be a sine generator |
| 2181 | track.setLoopPoints(0, frameCount, -1 /* loopCount */); // cleared by stop() |
| 2182 | track.play(); |
| 2183 | |
| 2184 | Thread.sleep(300 /* millis */); // warm up track |
| 2185 | |
| 2186 | int anticipatedPosition = track.getPlaybackHeadPosition(); |
| 2187 | for (int j = 0; j < testSteps; ++j) { |
Andy Hung | adec961 | 2015-04-27 18:29:31 -0700 | [diff] [blame] | 2188 | // set playback settings |
Wei Jia | 86ab058 | 2015-05-08 15:27:46 -0700 | [diff] [blame] | 2189 | final float pitch = playbackParams.getPitch(); |
| 2190 | final float speed = playbackParams.getSpeed(); |
Andy Hung | adec961 | 2015-04-27 18:29:31 -0700 | [diff] [blame] | 2191 | |
Wei Jia | 86ab058 | 2015-05-08 15:27:46 -0700 | [diff] [blame] | 2192 | track.setPlaybackParams(playbackParams); |
Andy Hung | adec961 | 2015-04-27 18:29:31 -0700 | [diff] [blame] | 2193 | |
| 2194 | // verify that settings have changed |
Wei Jia | 86ab058 | 2015-05-08 15:27:46 -0700 | [diff] [blame] | 2195 | PlaybackParams checkParams = track.getPlaybackParams(); |
| 2196 | assertEquals(TAG, pitch, checkParams.getPitch()); |
| 2197 | assertEquals(TAG, speed, checkParams.getSpeed()); |
Andy Hung | adec961 | 2015-04-27 18:29:31 -0700 | [diff] [blame] | 2198 | |
| 2199 | // sleep for playback |
Andy Hung | 9a81d63 | 2015-04-17 18:13:10 -0700 | [diff] [blame] | 2200 | Thread.sleep(TEST_DELTA_MS); |
| 2201 | // Log.d(TAG, "position[" + j + "] " + track.getPlaybackHeadPosition()); |
| 2202 | anticipatedPosition += |
Wei Jia | 86ab058 | 2015-05-08 15:27:46 -0700 | [diff] [blame] | 2203 | playbackParams.getSpeed() * TEST_DELTA_MS * TEST_SR / 1000; |
| 2204 | playbackParams.setPitch(playbackParams.getPitch() + pitchInc); |
| 2205 | playbackParams.setSpeed(playbackParams.getSpeed() + speedInc); |
Andy Hung | 9a81d63 | 2015-04-17 18:13:10 -0700 | [diff] [blame] | 2206 | } |
| 2207 | final int endPosition = track.getPlaybackHeadPosition(); |
| 2208 | final int tolerance100MsInFrames = 100 * TEST_SR / 1000; |
| 2209 | assertEquals(TAG, anticipatedPosition, endPosition, tolerance100MsInFrames); |
| 2210 | track.stop(); |
| 2211 | |
| 2212 | Thread.sleep(100 /* millis */); // distinct pause between each test |
| 2213 | } |
| 2214 | track.release(); |
| 2215 | } |
| 2216 | |
Andy Hung | 4690584 | 2015-06-12 23:04:54 -0700 | [diff] [blame] | 2217 | // Test AudioTrack to ensure we can build after a failure. |
| 2218 | public void testAudioTrackBufferSize() throws Exception { |
| 2219 | // constants for test |
| 2220 | final String TEST_NAME = "testAudioTrackBufferSize"; |
| 2221 | |
| 2222 | // use builder with parameters that should fail |
| 2223 | final int superBigBufferSize = 1 << 28; |
| 2224 | try { |
| 2225 | final AudioTrack track = new AudioTrack.Builder() |
| 2226 | .setBufferSizeInBytes(superBigBufferSize) |
| 2227 | .build(); |
| 2228 | track.release(); |
| 2229 | fail(TEST_NAME + ": should throw exception on failure"); |
| 2230 | } catch (UnsupportedOperationException e) { |
| 2231 | ; |
| 2232 | } |
| 2233 | |
| 2234 | // we should be able to create again with minimum buffer size |
| 2235 | final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples |
| 2236 | final AudioTrack track2 = new AudioTrack.Builder() |
| 2237 | .setBufferSizeInBytes(verySmallBufferSize) |
| 2238 | .build(); |
| 2239 | |
| 2240 | final int observedState2 = track2.getState(); |
| 2241 | final int observedBufferSize2 = track2.getBufferSizeInFrames(); |
| 2242 | track2.release(); |
| 2243 | |
| 2244 | // succeeds for minimum buffer size |
| 2245 | assertEquals(TEST_NAME + ": state", AudioTrack.STATE_INITIALIZED, observedState2); |
| 2246 | // should force the minimum size buffer which is > 0 |
| 2247 | assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0); |
| 2248 | } |
| 2249 | |
Keun young Park | c0ef6f8 | 2012-10-03 11:39:54 -0700 | [diff] [blame] | 2250 | /* Do not run in JB-MR1. will be re-opened in the next platform release. |
Keun young Park | 88c3175 | 2012-06-26 11:46:02 -0700 | [diff] [blame] | 2251 | public void testResourceLeakage() throws Exception { |
| 2252 | final int BUFFER_SIZE = 600 * 1024; |
| 2253 | ByteBuffer data = ByteBuffer.allocate(BUFFER_SIZE); |
| 2254 | for (int i = 0; i < 10; i++) { |
| 2255 | Log.i(TAG, "testResourceLeakage round " + i); |
| 2256 | data.rewind(); |
| 2257 | AudioTrack track = new AudioTrack(AudioManager.STREAM_VOICE_CALL, |
| 2258 | 44100, |
| 2259 | AudioFormat.CHANNEL_OUT_STEREO, |
| 2260 | AudioFormat.ENCODING_PCM_16BIT, |
| 2261 | data.capacity(), |
| 2262 | AudioTrack.MODE_STREAM); |
| 2263 | assertTrue(track != null); |
| 2264 | track.write(data.array(), 0, data.capacity()); |
| 2265 | track.play(); |
| 2266 | Thread.sleep(100); |
| 2267 | track.stop(); |
| 2268 | track.release(); |
| 2269 | } |
| 2270 | } |
Keun young Park | c0ef6f8 | 2012-10-03 11:39:54 -0700 | [diff] [blame] | 2271 | */ |
Keun young Park | 88c3175 | 2012-06-26 11:46:02 -0700 | [diff] [blame] | 2272 | |
Andy Hung | 7405d52 | 2015-05-19 22:38:23 -0700 | [diff] [blame] | 2273 | /* MockAudioTrack allows testing of protected getNativeFrameCount() and setState(). */ |
Phil Dubach | adffe61 | 2009-07-24 09:43:01 -0700 | [diff] [blame] | 2274 | private class MockAudioTrack extends AudioTrack { |
| 2275 | |
| 2276 | public MockAudioTrack(int streamType, int sampleRateInHz, int channelConfig, |
| 2277 | int audioFormat, int bufferSizeInBytes, int mode) throws IllegalArgumentException { |
| 2278 | super(streamType, sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes, mode); |
| 2279 | } |
| 2280 | |
| 2281 | public void setState(int state) { |
| 2282 | super.setState(state); |
| 2283 | } |
| 2284 | |
| 2285 | public int getNativeFrameCount() { |
| 2286 | return super.getNativeFrameCount(); |
| 2287 | } |
| 2288 | } |
| 2289 | |
Eunice | d26929a | 2011-01-05 23:35:19 -0800 | [diff] [blame] | 2290 | } |