Fix the threading behaviour of TTS audio requests.

All audio is played back on a separate thread.

Change-Id: I2bbb7b3140f6a04ef705cadb2bd1ae88951e3c48
diff --git a/core/java/android/speech/tts/AudioMessageParams.java b/core/java/android/speech/tts/AudioMessageParams.java
new file mode 100644
index 0000000..db4d622
--- /dev/null
+++ b/core/java/android/speech/tts/AudioMessageParams.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package android.speech.tts;
+
+import android.speech.tts.TextToSpeechService.UtteranceCompletedDispatcher;
+
+class AudioMessageParams extends MessageParams {
+    private final BlockingMediaPlayer mPlayer;
+
+    AudioMessageParams(UtteranceCompletedDispatcher dispatcher, BlockingMediaPlayer player) {
+        super(dispatcher);
+        mPlayer = player;
+    }
+
+    BlockingMediaPlayer getPlayer() {
+        return mPlayer;
+    }
+
+    @Override
+    int getType() {
+        return TYPE_AUDIO;
+    }
+
+}
diff --git a/core/java/android/speech/tts/AudioPlaybackHandler.java b/core/java/android/speech/tts/AudioPlaybackHandler.java
new file mode 100644
index 0000000..924bbbc
--- /dev/null
+++ b/core/java/android/speech/tts/AudioPlaybackHandler.java
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package android.speech.tts;
+
+import android.media.AudioFormat;
+import android.media.AudioTrack;
+import android.os.Handler;
+import android.os.Looper;
+import android.os.Message;
+import android.speech.tts.SynthesisMessageParams.ListEntry;
+import android.util.Log;
+
+class AudioPlaybackHandler extends Handler {
+    private static final String TAG = "TTS.AudioPlaybackHandler";
+    private static final boolean DBG = false;
+
+    private static final int MIN_AUDIO_BUFFER_SIZE = 8192;
+
+    private static final int SYNTHESIS_START = 1;
+    private static final int SYNTHESIS_DATA_AVAILABLE = 2;
+    private static final int SYNTHESIS_COMPLETE_DATA_AVAILABLE = 3;
+    private static final int SYNTHESIS_DONE = 4;
+
+    private static final int PLAY_AUDIO = 5;
+    private static final int PLAY_SILENCE = 6;
+
+    // Accessed by multiple threads, synchronized by "this".
+    private MessageParams mCurrentParams;
+    // Used only for book keeping and error detection.
+    private SynthesisMessageParams mLastSynthesisRequest;
+
+    AudioPlaybackHandler(Looper looper) {
+        super(looper);
+    }
+
+    @Override
+    public synchronized void handleMessage(Message msg) {
+        if (msg.what == SYNTHESIS_START) {
+            mCurrentParams = (SynthesisMessageParams) msg.obj;
+            handleSynthesisStart(msg);
+        } else if (msg.what == SYNTHESIS_DATA_AVAILABLE) {
+            handleSynthesisDataAvailable(msg);
+        } else if (msg.what == SYNTHESIS_DONE) {
+            handleSynthesisDone(msg);
+        } else if (msg.what == SYNTHESIS_COMPLETE_DATA_AVAILABLE) {
+            handleSynthesisCompleteDataAvailable(msg);
+        } else if (msg.what == PLAY_AUDIO) {
+            handleAudio(msg);
+        } else if (msg.what == PLAY_SILENCE) {
+            handleSilence(msg);
+        }
+
+        mCurrentParams = null;
+    }
+
+    /**
+     * Stops all synthesis for a given {@code token}. If the current token
+     * is currently being processed, an effort will be made to stop it but
+     * that is not guaranteed.
+     */
+    synchronized public void stop(MessageParams token) {
+        removeCallbacksAndMessages(token);
+
+        if (token.getType() == MessageParams.TYPE_SYNTHESIS) {
+            sendMessageAtFrontOfQueue(obtainMessage(SYNTHESIS_DONE, token));
+        } else if (token == mCurrentParams) {
+            if (token.getType() == MessageParams.TYPE_AUDIO) {
+                ((AudioMessageParams) mCurrentParams).getPlayer().stop();
+            } else if (token.getType() == MessageParams.TYPE_SILENCE) {
+                ((SilenceMessageParams) mCurrentParams).getConditionVariable().open();
+            }
+        }
+    }
+
+    /**
+     * Shut down the audio playback thread.
+     */
+    synchronized public void quit() {
+        if (mCurrentParams != null) {
+            stop(mCurrentParams);
+        }
+        getLooper().quit();
+    }
+
+    void enqueueSynthesisStart(SynthesisMessageParams token) {
+        sendMessage(obtainMessage(SYNTHESIS_START, token));
+    }
+
+    void enqueueSynthesisDataAvailable(SynthesisMessageParams token) {
+        sendMessage(obtainMessage(SYNTHESIS_DATA_AVAILABLE, token));
+    }
+
+    void enqueueSynthesisCompleteDataAvailable(SynthesisMessageParams token) {
+        sendMessage(obtainMessage(SYNTHESIS_COMPLETE_DATA_AVAILABLE, token));
+    }
+
+    void enqueueSynthesisDone(SynthesisMessageParams token) {
+        sendMessage(obtainMessage(SYNTHESIS_DONE, token));
+    }
+
+    void enqueueAudio(AudioMessageParams token) {
+        sendMessage(obtainMessage(PLAY_AUDIO, token));
+    }
+
+    void enqueueSilence(SilenceMessageParams token) {
+        sendMessage(obtainMessage(PLAY_SILENCE, token));
+    }
+
+    // -----------------------------------------
+    // End of public API methods.
+    // -----------------------------------------
+
+    // Currently implemented as blocking the audio playback thread for the
+    // specified duration. If a call to stop() is made, the thread
+    // unblocks.
+    private void handleSilence(Message msg) {
+        if (DBG) Log.d(TAG, "handleSilence()");
+        SilenceMessageParams params = (SilenceMessageParams) msg.obj;
+        if (params.getSilenceDurationMs() > 0) {
+            params.getConditionVariable().block(params.getSilenceDurationMs());
+        }
+        params.getDispatcher().dispatchUtteranceCompleted();
+        if (DBG) Log.d(TAG, "handleSilence() done.");
+    }
+
+    // Plays back audio from a given URI. No TTS engine involvement here.
+    private void handleAudio(Message msg) {
+        if (DBG) Log.d(TAG, "handleAudio()");
+        AudioMessageParams params = (AudioMessageParams) msg.obj;
+        // Note that the BlockingMediaPlayer spawns a separate thread.
+        //
+        // TODO: This can be avoided.
+        params.getPlayer().startAndWait();
+        params.getDispatcher().dispatchUtteranceCompleted();
+        if (DBG) Log.d(TAG, "handleAudio() done.");
+    }
+
+    // Denotes the start of a new synthesis request. We create a new
+    // audio track, and prepare it for incoming data.
+    //
+    // Note that since all TTS synthesis happens on a single thread, we
+    // should ALWAYS see the following order :
+    //
+    // handleSynthesisStart -> handleSynthesisDataAvailable(*) -> handleSynthesisDone
+    // OR
+    // handleSynthesisCompleteDataAvailable.
+    private void handleSynthesisStart(Message msg) {
+        if (DBG) Log.d(TAG, "handleSynthesisStart()");
+        final SynthesisMessageParams param = (SynthesisMessageParams) msg.obj;
+
+        // Oops, looks like the engine forgot to call done(). We go through
+        // extra trouble to clean the data to prevent the AudioTrack resources
+        // from being leaked.
+        if (mLastSynthesisRequest != null) {
+            Log.w(TAG, "Error : Missing call to done() for request : " +
+                    mLastSynthesisRequest);
+            handleSynthesisDone(mLastSynthesisRequest);
+        }
+
+        mLastSynthesisRequest = param;
+
+        // Create the audio track.
+        final AudioTrack audioTrack = createStreamingAudioTrack(
+                param.mStreamType, param.mSampleRateInHz, param.mAudioFormat,
+                param.mChannelCount, param.mVolume, param.mPan);
+
+        param.setAudioTrack(audioTrack);
+    }
+
+    // More data available to be flushed to the audio track.
+    private void handleSynthesisDataAvailable(Message msg) {
+        final SynthesisMessageParams param = (SynthesisMessageParams) msg.obj;
+        if (param.getAudioTrack() == null) {
+            Log.w(TAG, "Error : null audio track in handleDataAvailable.");
+            return;
+        }
+
+        if (param != mLastSynthesisRequest) {
+            Log.e(TAG, "Call to dataAvailable without done() / start()");
+            return;
+        }
+
+        final AudioTrack audioTrack = param.getAudioTrack();
+        final ListEntry bufferCopy = param.getNextBuffer();
+
+        if (bufferCopy == null) {
+            Log.e(TAG, "No buffers available to play.");
+            return;
+        }
+
+        int playState = audioTrack.getPlayState();
+        if (playState == AudioTrack.PLAYSTATE_STOPPED) {
+            if (DBG) Log.d(TAG, "AudioTrack stopped, restarting : " + audioTrack.hashCode());
+            audioTrack.play();
+        }
+        int count = 0;
+        while (count < bufferCopy.mLength) {
+            // Note that we don't take bufferCopy.mOffset into account because
+            // it is guaranteed to be 0.
+            int written = audioTrack.write(bufferCopy.mBytes, count, bufferCopy.mLength);
+            if (written <= 0) {
+                break;
+            }
+            count += written;
+        }
+    }
+
+    private void handleSynthesisDone(Message msg) {
+        final SynthesisMessageParams params = (SynthesisMessageParams) msg.obj;
+        handleSynthesisDone(params);
+    }
+
+    // Flush all remaining data to the audio track, stop it and release
+    // all it's resources.
+    private void handleSynthesisDone(SynthesisMessageParams params) {
+        if (DBG) Log.d(TAG, "handleSynthesisDone()");
+        final AudioTrack audioTrack = params.getAudioTrack();
+
+        try {
+            if (audioTrack != null) {
+                audioTrack.flush();
+                audioTrack.stop();
+                audioTrack.release();
+            }
+        } finally {
+            params.setAudioTrack(null);
+            params.getDispatcher().dispatchUtteranceCompleted();
+            mLastSynthesisRequest = null;
+        }
+    }
+
+    private void handleSynthesisCompleteDataAvailable(Message msg) {
+        final SynthesisMessageParams params = (SynthesisMessageParams) msg.obj;
+        if (DBG) Log.d(TAG, "completeAudioAvailable(" + params + ")");
+
+        // Channel config and bytes per frame are checked before
+        // this message is sent.
+        int channelConfig = AudioPlaybackHandler.getChannelConfig(params.mChannelCount);
+        int bytesPerFrame = AudioPlaybackHandler.getBytesPerFrame(params.mAudioFormat);
+
+        ListEntry entry = params.getNextBuffer();
+
+        if (entry == null) {
+            Log.w(TAG, "completeDataAvailable : No buffers available to play.");
+            return;
+        }
+
+        final AudioTrack audioTrack = new AudioTrack(params.mStreamType, params.mSampleRateInHz,
+                channelConfig, params.mAudioFormat, entry.mLength, AudioTrack.MODE_STATIC);
+
+        // So that handleDone can access this correctly.
+        params.mAudioTrack = audioTrack;
+
+        try {
+            audioTrack.write(entry.mBytes, entry.mOffset, entry.mLength);
+            setupVolume(audioTrack, params.mVolume, params.mPan);
+            audioTrack.play();
+            blockUntilDone(audioTrack, bytesPerFrame, entry.mLength);
+            if (DBG) Log.d(TAG, "Wrote data to audio track successfully : " + entry.mLength);
+        } catch (IllegalStateException ex) {
+            Log.e(TAG, "Playback error", ex);
+        } finally {
+            handleSynthesisDone(msg);
+        }
+    }
+
+
+    private static void blockUntilDone(AudioTrack audioTrack, int bytesPerFrame, int length) {
+        int lengthInFrames = length / bytesPerFrame;
+        int currentPosition = 0;
+        while ((currentPosition = audioTrack.getPlaybackHeadPosition()) < lengthInFrames) {
+            long estimatedTimeMs = ((lengthInFrames - currentPosition) * 1000) /
+                    audioTrack.getSampleRate();
+            audioTrack.getPlayState();
+            if (DBG) Log.d(TAG, "About to sleep for : " + estimatedTimeMs + " ms," +
+                    " Playback position : " + currentPosition);
+            try {
+                Thread.sleep(estimatedTimeMs);
+            } catch (InterruptedException ie) {
+                break;
+            }
+        }
+    }
+
+    private static AudioTrack createStreamingAudioTrack(int streamType, int sampleRateInHz,
+            int audioFormat, int channelCount, float volume, float pan) {
+        int channelConfig = getChannelConfig(channelCount);
+
+        int minBufferSizeInBytes
+                = AudioTrack.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
+        int bufferSizeInBytes = Math.max(MIN_AUDIO_BUFFER_SIZE, minBufferSizeInBytes);
+
+        AudioTrack audioTrack = new AudioTrack(streamType, sampleRateInHz, channelConfig,
+                audioFormat, bufferSizeInBytes, AudioTrack.MODE_STREAM);
+        if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
+            Log.w(TAG, "Unable to create audio track.");
+            audioTrack.release();
+            return null;
+        }
+
+        setupVolume(audioTrack, volume, pan);
+        return audioTrack;
+    }
+
+    static int getChannelConfig(int channelCount) {
+        if (channelCount == 1) {
+            return AudioFormat.CHANNEL_OUT_MONO;
+        } else if (channelCount == 2){
+            return AudioFormat.CHANNEL_OUT_STEREO;
+        }
+
+        return 0;
+    }
+
+    static int getBytesPerFrame(int audioFormat) {
+        if (audioFormat == AudioFormat.ENCODING_PCM_8BIT) {
+            return 1;
+        } else if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
+            return 2;
+        }
+
+        return -1;
+    }
+
+    private static void setupVolume(AudioTrack audioTrack, float volume, float pan) {
+        float vol = clip(volume, 0.0f, 1.0f);
+        float panning = clip(pan, -1.0f, 1.0f);
+        float volLeft = vol;
+        float volRight = vol;
+        if (panning > 0.0f) {
+            volLeft *= (1.0f - panning);
+        } else if (panning < 0.0f) {
+            volRight *= (1.0f + panning);
+        }
+        if (DBG) Log.d(TAG, "volLeft=" + volLeft + ",volRight=" + volRight);
+        if (audioTrack.setStereoVolume(volLeft, volRight) != AudioTrack.SUCCESS) {
+            Log.e(TAG, "Failed to set volume");
+        }
+    }
+
+    private static float clip(float value, float min, float max) {
+        return value > max ? max : (value < min ? min : value);
+    }
+
+}
diff --git a/core/java/android/speech/tts/MessageParams.java b/core/java/android/speech/tts/MessageParams.java
new file mode 100644
index 0000000..2d96df4
--- /dev/null
+++ b/core/java/android/speech/tts/MessageParams.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package android.speech.tts;
+
+import android.speech.tts.TextToSpeechService.UtteranceCompletedDispatcher;
+
+abstract class MessageParams {
+    private final UtteranceCompletedDispatcher mDispatcher;
+
+    static final int TYPE_SYNTHESIS = 1;
+    static final int TYPE_AUDIO = 2;
+    static final int TYPE_SILENCE = 3;
+
+    MessageParams(UtteranceCompletedDispatcher dispatcher) {
+        mDispatcher = dispatcher;
+    }
+
+    UtteranceCompletedDispatcher getDispatcher() {
+        return mDispatcher;
+    }
+
+    abstract int getType();
+}
diff --git a/core/java/android/speech/tts/PlaybackSynthesisRequest.java b/core/java/android/speech/tts/PlaybackSynthesisRequest.java
index d698b54..34b263c 100644
--- a/core/java/android/speech/tts/PlaybackSynthesisRequest.java
+++ b/core/java/android/speech/tts/PlaybackSynthesisRequest.java
@@ -15,10 +15,8 @@
  */
 package android.speech.tts;
 
-import android.media.AudioFormat;
-import android.media.AudioTrack;
 import android.os.Bundle;
-import android.os.Handler;
+import android.speech.tts.TextToSpeechService.UtteranceCompletedDispatcher;
 import android.util.Log;
 
 /**
@@ -49,54 +47,48 @@
      */
     private final float mPan;
 
+    /**
+     * Guards {@link #mAudioTrackHandler}, {@link #mToken} and {@link #mStopped}.
+     */
     private final Object mStateLock = new Object();
-    private final Handler mAudioTrackHandler;
-    private volatile AudioTrack mAudioTrack = null;
-    private boolean mStopped = false;
-    private boolean mDone = false;
-    private volatile boolean mWriteErrorOccured;
 
-    PlaybackSynthesisRequest(String text, Bundle params,
-            int streamType, float volume, float pan, Handler audioTrackHandler) {
+    // Handler associated with a thread that plays back audio requests.
+    private final AudioPlaybackHandler mAudioTrackHandler;
+    // A request "token", which will be non null after start() or
+    // completeAudioAvailable() have been called.
+    private SynthesisMessageParams mToken = null;
+    // Whether this request has been stopped. This is useful for keeping
+    // track whether stop() has been called before start(). In all other cases,
+    // a non-null value of mToken will provide the same information.
+    private boolean mStopped = false;
+
+    private volatile boolean mDone = false;
+
+    private final UtteranceCompletedDispatcher mDispatcher;
+
+    PlaybackSynthesisRequest(String text, Bundle params, int streamType, float volume, float pan,
+            AudioPlaybackHandler audioTrackHandler, UtteranceCompletedDispatcher dispatcher) {
         super(text, params);
         mStreamType = streamType;
         mVolume = volume;
         mPan = pan;
         mAudioTrackHandler = audioTrackHandler;
-        mWriteErrorOccured = false;
+        mDispatcher = dispatcher;
     }
 
     @Override
     void stop() {
         if (DBG) Log.d(TAG, "stop()");
+
         synchronized (mStateLock) {
-            mStopped = true;
-            cleanUp();
-        }
-    }
-
-    // Always guarded by mStateLock.
-    private void cleanUp() {
-        if (DBG) Log.d(TAG, "cleanUp()");
-        if (mAudioTrack == null) {
-            return;
-        }
-
-        final AudioTrack audioTrack = mAudioTrack;
-        mAudioTrack = null;
-
-        // Clean up on the audiotrack handler thread.
-        //
-        // NOTE: It isn't very clear whether AudioTrack is thread safe.
-        // If it is we can clean up on the current (synthesis) thread.
-        mAudioTrackHandler.post(new Runnable() {
-            @Override
-            public void run() {
-                audioTrack.flush();
-                audioTrack.stop();
-                audioTrack.release();
+            if (mToken == null || mStopped) {
+                Log.w(TAG, "stop() called twice, before start(), or after done()");
+                return;
             }
-        });
+            mAudioTrackHandler.stop(mToken);
+            mToken = null;
+            mStopped = true;
+        }
     }
 
     @Override
@@ -111,7 +103,6 @@
         return mDone;
     }
 
-    // TODO: add a thread that writes to the AudioTrack?
     @Override
     public int start(int sampleRateInHz, int audioFormat, int channelCount) {
         if (DBG) {
@@ -119,45 +110,28 @@
                     + "," + channelCount + ")");
         }
 
+        int channelConfig = AudioPlaybackHandler.getChannelConfig(channelCount);
+        if (channelConfig == 0) {
+            Log.e(TAG, "Unsupported number of channels :" + channelCount);
+            return TextToSpeech.ERROR;
+        }
+
         synchronized (mStateLock) {
             if (mStopped) {
-                if (DBG) Log.d(TAG, "Request has been aborted.");
+                if (DBG) Log.d(TAG, "stop() called before start(), returning.");
                 return TextToSpeech.ERROR;
             }
-            if (mAudioTrack != null) {
-                Log.e(TAG, "start() called twice");
-                cleanUp();
-                return TextToSpeech.ERROR;
-            }
+            SynthesisMessageParams params = new SynthesisMessageParams(
+                    mStreamType, sampleRateInHz, audioFormat, channelCount, mVolume, mPan,
+                    mDispatcher);
+            mAudioTrackHandler.enqueueSynthesisStart(params);
 
-            mAudioTrack = createStreamingAudioTrack(sampleRateInHz, audioFormat, channelCount);
-            if (mAudioTrack == null) {
-                return TextToSpeech.ERROR;
-            }
+            mToken = params;
         }
 
         return TextToSpeech.SUCCESS;
     }
 
-    private void setupVolume(AudioTrack audioTrack, float volume, float pan) {
-        float vol = clip(volume, 0.0f, 1.0f);
-        float panning = clip(pan, -1.0f, 1.0f);
-        float volLeft = vol;
-        float volRight = vol;
-        if (panning > 0.0f) {
-            volLeft *= (1.0f - panning);
-        } else if (panning < 0.0f) {
-            volRight *= (1.0f + panning);
-        }
-        if (DBG) Log.d(TAG, "volLeft=" + volLeft + ",volRight=" + volRight);
-        if (audioTrack.setStereoVolume(volLeft, volRight) != AudioTrack.SUCCESS) {
-            Log.e(TAG, "Failed to set volume");
-        }
-    }
-
-    private float clip(float value, float min, float max) {
-        return value > max ? max : (value < min ? min : value);
-    }
 
     @Override
     public int audioAvailable(byte[] buffer, int offset, int length) {
@@ -169,195 +143,78 @@
             throw new IllegalArgumentException("buffer is too large or of zero length (" +
                     + length + " bytes)");
         }
+
         synchronized (mStateLock) {
-            if (mWriteErrorOccured) {
-                if (DBG) Log.d(TAG, "Error writing to audio track, count < 0");
+            if (mToken == null) {
                 return TextToSpeech.ERROR;
             }
-            if (mStopped) {
-                if (DBG) Log.d(TAG, "Request has been aborted.");
-                return TextToSpeech.ERROR;
-            }
-            if (mAudioTrack == null) {
-                Log.e(TAG, "audioAvailable(): Not started");
-                return TextToSpeech.ERROR;
-            }
-            final AudioTrack audioTrack = mAudioTrack;
+
             // Sigh, another copy.
             final byte[] bufferCopy = new byte[length];
             System.arraycopy(buffer, offset, bufferCopy, 0, length);
-
-            mAudioTrackHandler.post(new Runnable() {
-                @Override
-                public void run() {
-                    int playState = audioTrack.getPlayState();
-                    if (playState == AudioTrack.PLAYSTATE_STOPPED) {
-                        if (DBG) Log.d(TAG, "AudioTrack stopped, restarting");
-                        audioTrack.play();
-                    }
-                    // TODO: loop until all data is written?
-                    if (DBG) Log.d(TAG, "AudioTrack.write()");
-                    int count = audioTrack.write(bufferCopy, 0, bufferCopy.length);
-                    // The semantics of this change very slightly. Earlier, we would
-                    // report an error immediately, Now we will return an error on
-                    // the next API call, usually done( ) or another audioAvailable( )
-                    // call.
-                    if (count < 0) {
-                        mWriteErrorOccured = true;
-                    }
-                }
-            });
-
-            return TextToSpeech.SUCCESS;
+            mToken.addBuffer(bufferCopy);
+            mAudioTrackHandler.enqueueSynthesisDataAvailable(mToken);
         }
+
+        return TextToSpeech.SUCCESS;
     }
 
     @Override
     public int done() {
         if (DBG) Log.d(TAG, "done()");
+
         synchronized (mStateLock) {
-            if (mWriteErrorOccured) {
-                if (DBG) Log.d(TAG, "Error writing to audio track, count < 0");
+            if (mDone) {
+                Log.w(TAG, "Duplicate call to done()");
                 return TextToSpeech.ERROR;
             }
-            if (mStopped) {
-                if (DBG) Log.d(TAG, "Request has been aborted.");
-                return TextToSpeech.ERROR;
-            }
-            if (mAudioTrack == null) {
-                Log.e(TAG, "done(): Not started");
-                return TextToSpeech.ERROR;
-            }
+
             mDone = true;
-            cleanUp();
+
+            if (mToken == null) {
+                return TextToSpeech.ERROR;
+            }
+
+            mAudioTrackHandler.enqueueSynthesisDone(mToken);
         }
         return TextToSpeech.SUCCESS;
     }
 
     @Override
     public void error() {
-        if (DBG) Log.d(TAG, "error()");
-        synchronized (mStateLock) {
-            cleanUp();
-        }
+        if (DBG) Log.d(TAG, "error() [will call stop]");
+        stop();
     }
 
     @Override
     public int completeAudioAvailable(int sampleRateInHz, int audioFormat, int channelCount,
             byte[] buffer, int offset, int length) {
-        if (DBG) {
-            Log.d(TAG, "completeAudioAvailable(" + sampleRateInHz + "," + audioFormat
-                    + "," + channelCount + "byte[" + buffer.length + "],"
-                    + offset + "," + length + ")");
+        int channelConfig = AudioPlaybackHandler.getChannelConfig(channelCount);
+        if (channelConfig == 0) {
+            Log.e(TAG, "Unsupported number of channels :" + channelCount);
+            return TextToSpeech.ERROR;
+        }
+
+        int bytesPerFrame = AudioPlaybackHandler.getBytesPerFrame(audioFormat);
+        if (bytesPerFrame < 0) {
+            Log.e(TAG, "Unsupported audio format :" + audioFormat);
+            return TextToSpeech.ERROR;
         }
 
         synchronized (mStateLock) {
             if (mStopped) {
-                if (DBG) Log.d(TAG, "Request has been aborted.");
                 return TextToSpeech.ERROR;
             }
-            if (mAudioTrack != null) {
-                Log.e(TAG, "start() called before completeAudioAvailable()");
-                cleanUp();
-                return TextToSpeech.ERROR;
-            }
+            SynthesisMessageParams params = new SynthesisMessageParams(
+                    mStreamType, sampleRateInHz, audioFormat, channelCount, mVolume, mPan,
+                    mDispatcher);
+            params.addBuffer(buffer, offset, length);
 
-            int channelConfig = getChannelConfig(channelCount);
-            if (channelConfig < 0) {
-                Log.e(TAG, "Unsupported number of channels :" + channelCount);
-                cleanUp();
-                return TextToSpeech.ERROR;
-            }
-            int bytesPerFrame = getBytesPerFrame(audioFormat);
-            if (bytesPerFrame < 0) {
-                Log.e(TAG, "Unsupported audio format :" + audioFormat);
-                cleanUp();
-                return TextToSpeech.ERROR;
-            }
-
-            mAudioTrack = new AudioTrack(mStreamType, sampleRateInHz, channelConfig,
-                    audioFormat, buffer.length, AudioTrack.MODE_STATIC);
-            if (mAudioTrack == null) {
-                return TextToSpeech.ERROR;
-            }
-
-            try {
-                mAudioTrack.write(buffer, offset, length);
-                setupVolume(mAudioTrack, mVolume, mPan);
-                mAudioTrack.play();
-                blockUntilDone(mAudioTrack, bytesPerFrame, length);
-                mDone = true;
-                if (DBG) Log.d(TAG, "Wrote data to audio track succesfully : " + length);
-            } catch (IllegalStateException ex) {
-                Log.e(TAG, "Playback error", ex);
-                return TextToSpeech.ERROR;
-            } finally {
-                cleanUp();
-            }
+            mAudioTrackHandler.enqueueSynthesisCompleteDataAvailable(params);
+            mToken = params;
         }
 
         return TextToSpeech.SUCCESS;
     }
 
-    private void blockUntilDone(AudioTrack audioTrack, int bytesPerFrame, int length) {
-        int lengthInFrames = length / bytesPerFrame;
-        int currentPosition = 0;
-        while ((currentPosition = audioTrack.getPlaybackHeadPosition()) < lengthInFrames) {
-            long estimatedTimeMs = ((lengthInFrames - currentPosition) * 1000) /
-                    audioTrack.getSampleRate();
-            if (DBG) Log.d(TAG, "About to sleep for : " + estimatedTimeMs + " ms," +
-                    " Playback position : " + currentPosition);
-            try {
-                Thread.sleep(estimatedTimeMs);
-            } catch (InterruptedException ie) {
-                break;
-            }
-        }
-    }
-
-    private int getBytesPerFrame(int audioFormat) {
-        if (audioFormat == AudioFormat.ENCODING_PCM_8BIT) {
-            return 1;
-        } else if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
-            return 2;
-        }
-
-        return -1;
-    }
-
-    private int getChannelConfig(int channelCount) {
-        if (channelCount == 1) {
-            return AudioFormat.CHANNEL_OUT_MONO;
-        } else if (channelCount == 2){
-            return AudioFormat.CHANNEL_OUT_STEREO;
-        }
-
-        return -1;
-    }
-
-    private AudioTrack createStreamingAudioTrack(int sampleRateInHz, int audioFormat,
-            int channelCount) {
-        int channelConfig = getChannelConfig(channelCount);
-
-        if (channelConfig < 0) {
-            Log.e(TAG, "Unsupported number of channels : " + channelCount);
-            return null;
-        }
-
-        int minBufferSizeInBytes
-                = AudioTrack.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
-        int bufferSizeInBytes = Math.max(MIN_AUDIO_BUFFER_SIZE, minBufferSizeInBytes);
-        AudioTrack audioTrack = new AudioTrack(mStreamType, sampleRateInHz, channelConfig,
-                audioFormat, bufferSizeInBytes, AudioTrack.MODE_STREAM);
-        if (audioTrack == null) {
-            return null;
-        }
-
-        if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
-            audioTrack.release();
-            return null;
-        }
-        setupVolume(audioTrack, mVolume, mPan);
-        return audioTrack;
-    }
 }
diff --git a/core/java/android/speech/tts/SilenceMessageParams.java b/core/java/android/speech/tts/SilenceMessageParams.java
new file mode 100644
index 0000000..eee8b68
--- /dev/null
+++ b/core/java/android/speech/tts/SilenceMessageParams.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package android.speech.tts;
+
+import android.os.ConditionVariable;
+import android.speech.tts.TextToSpeechService.UtteranceCompletedDispatcher;
+
+class SilenceMessageParams extends MessageParams {
+    private final ConditionVariable mCondVar = new ConditionVariable();
+    private final long mSilenceDurationMs;
+
+    SilenceMessageParams(UtteranceCompletedDispatcher dispatcher, long silenceDurationMs) {
+        super(dispatcher);
+        mSilenceDurationMs = silenceDurationMs;
+    }
+
+    long getSilenceDurationMs() {
+        return mSilenceDurationMs;
+    }
+
+    @Override
+    int getType() {
+        return TYPE_SILENCE;
+    }
+
+    ConditionVariable getConditionVariable() {
+        return mCondVar;
+    }
+
+}
diff --git a/core/java/android/speech/tts/SynthesisMessageParams.java b/core/java/android/speech/tts/SynthesisMessageParams.java
new file mode 100644
index 0000000..aabaa5a
--- /dev/null
+++ b/core/java/android/speech/tts/SynthesisMessageParams.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package android.speech.tts;
+
+import android.media.AudioTrack;
+import android.speech.tts.TextToSpeechService.UtteranceCompletedDispatcher;
+
+import java.util.LinkedList;
+
+/**
+ * Params required to play back a synthesis request.
+ */
+final class SynthesisMessageParams extends MessageParams {
+    final int mStreamType;
+    final int mSampleRateInHz;
+    final int mAudioFormat;
+    final int mChannelCount;
+    final float mVolume;
+    final float mPan;
+
+    public volatile AudioTrack mAudioTrack;
+
+    private final LinkedList<ListEntry> mDataBufferList = new LinkedList<ListEntry>();
+
+    SynthesisMessageParams(int streamType, int sampleRate,
+            int audioFormat, int channelCount,
+            float volume, float pan, UtteranceCompletedDispatcher dispatcher) {
+        super(dispatcher);
+
+        mStreamType = streamType;
+        mSampleRateInHz = sampleRate;
+        mAudioFormat = audioFormat;
+        mChannelCount = channelCount;
+        mVolume = volume;
+        mPan = pan;
+
+        // initially null.
+        mAudioTrack = null;
+    }
+
+    @Override
+    int getType() {
+        return TYPE_SYNTHESIS;
+    }
+
+    synchronized void addBuffer(byte[] buffer, int offset, int length) {
+        mDataBufferList.add(new ListEntry(buffer, offset, length));
+    }
+
+    synchronized void addBuffer(byte[] buffer) {
+        mDataBufferList.add(new ListEntry(buffer, 0, buffer.length));
+    }
+
+    synchronized ListEntry getNextBuffer() {
+        return mDataBufferList.poll();
+    }
+
+
+    void setAudioTrack(AudioTrack audioTrack) {
+        mAudioTrack = audioTrack;
+    }
+
+    AudioTrack getAudioTrack() {
+        return mAudioTrack;
+    }
+
+    static final class ListEntry {
+        final byte[] mBytes;
+        final int mOffset;
+        final int mLength;
+
+        ListEntry(byte[] bytes, int offset, int length) {
+            mBytes = bytes;
+            mOffset = offset;
+            mLength = length;
+        }
+    }
+}
+
diff --git a/core/java/android/speech/tts/TextToSpeechService.java b/core/java/android/speech/tts/TextToSpeechService.java
index ddd3252..e553f77 100644
--- a/core/java/android/speech/tts/TextToSpeechService.java
+++ b/core/java/android/speech/tts/TextToSpeechService.java
@@ -51,7 +51,10 @@
     private static final String SYNTH_THREAD_NAME = "SynthThread";
 
     private SynthHandler mSynthHandler;
-    private Handler mAudioTrackHandler;
+    // A thread and it's associated handler for playing back any audio
+    // associated with this TTS engine. Will handle all requests except synthesis
+    // to file requests, which occur on the synthesis thread.
+    private AudioPlaybackHandler mAudioPlaybackHandler;
 
     private CallbackMap mCallbacks;
 
@@ -68,7 +71,7 @@
 
         HandlerThread audioTrackThread = new HandlerThread("TTS.audioTrackThread");
         audioTrackThread.start();
-        mAudioTrackHandler = new Handler(audioTrackThread.getLooper());
+        mAudioPlaybackHandler = new AudioPlaybackHandler(audioTrackThread.getLooper());
 
         mCallbacks = new CallbackMap();
 
@@ -83,8 +86,8 @@
 
         // Tell the synthesizer to stop
         mSynthHandler.quit();
-        mAudioTrackHandler.getLooper().quit();
-
+        // Tell the audio playback thread to stop.
+        mAudioPlaybackHandler.quit();
         // Unregister all callbacks.
         mCallbacks.kill();
 
@@ -236,13 +239,6 @@
             super(looper);
         }
 
-        private void dispatchUtteranceCompleted(SpeechItem item) {
-            String utteranceId = item.getUtteranceId();
-            if (!TextUtils.isEmpty(utteranceId)) {
-                mCallbacks.dispatchUtteranceCompleted(item.getCallingApp(), utteranceId);
-            }
-        }
-
         private synchronized SpeechItem getCurrentSpeechItem() {
             return mCurrentSpeechItem;
         }
@@ -286,9 +282,7 @@
                 @Override
                 public void run() {
                     setCurrentSpeechItem(speechItem);
-                    if (speechItem.play() == TextToSpeech.SUCCESS) {
-                        dispatchUtteranceCompleted(speechItem);
-                    }
+                    speechItem.play();
                     setCurrentSpeechItem(null);
                 }
             };
@@ -318,14 +312,19 @@
             if (current != null && TextUtils.equals(callingApp, current.getCallingApp())) {
                 current.stop();
             }
+
             return TextToSpeech.SUCCESS;
         }
     }
 
+    interface UtteranceCompletedDispatcher {
+        public void dispatchUtteranceCompleted();
+    }
+
     /**
      * An item in the synth thread queue.
      */
-    private static abstract class SpeechItem {
+    private abstract class SpeechItem implements UtteranceCompletedDispatcher {
         private final String mCallingApp;
         protected final Bundle mParams;
         private boolean mStarted = false;
@@ -380,6 +379,13 @@
             stopImpl();
         }
 
+        public void dispatchUtteranceCompleted() {
+            final String utteranceId = getUtteranceId();
+            if (!TextUtils.isEmpty(utteranceId)) {
+                mCallbacks.dispatchUtteranceCompleted(getCallingApp(), utteranceId);
+            }
+        }
+
         protected abstract int playImpl();
 
         protected abstract void stopImpl();
@@ -413,7 +419,7 @@
         }
     }
 
-    private class SynthesisSpeechItem extends SpeechItem {
+    class SynthesisSpeechItem extends SpeechItem {
         private final String mText;
         private SynthesisRequest mSynthesisRequest;
 
@@ -453,7 +459,8 @@
 
         protected SynthesisRequest createSynthesisRequest() {
             return new PlaybackSynthesisRequest(mText, mParams,
-                    getStreamType(), getVolume(), getPan(), mAudioTrackHandler);
+                    getStreamType(), getVolume(), getPan(), mAudioPlaybackHandler,
+                    this);
         }
 
         private void setRequestParams(SynthesisRequest request) {
@@ -526,6 +533,15 @@
             return new FileSynthesisRequest(getText(), mParams, mFile);
         }
 
+        @Override
+        protected int playImpl() {
+            int status = super.playImpl();
+            if (status == TextToSpeech.SUCCESS) {
+                dispatchUtteranceCompleted();
+            }
+            return status;
+        }
+
         /**
          * Checks that the given file can be used for synthesis output.
          */
@@ -557,6 +573,7 @@
     private class AudioSpeechItem extends SpeechItem {
 
         private final BlockingMediaPlayer mPlayer;
+        private AudioMessageParams mToken;
 
         public AudioSpeechItem(String callingApp, Bundle params, Uri uri) {
             super(callingApp, params);
@@ -570,23 +587,26 @@
 
         @Override
         protected int playImpl() {
-            return mPlayer.startAndWait() ? TextToSpeech.SUCCESS : TextToSpeech.ERROR;
+            mToken = new AudioMessageParams(this, mPlayer);
+            mAudioPlaybackHandler.enqueueAudio(mToken);
+            return TextToSpeech.SUCCESS;
         }
 
         @Override
         protected void stopImpl() {
-            mPlayer.stop();
+            if (mToken != null) {
+                mAudioPlaybackHandler.stop(mToken);
+            }
         }
     }
 
     private class SilenceSpeechItem extends SpeechItem {
         private final long mDuration;
-        private final ConditionVariable mDone;
+        private SilenceMessageParams mToken;
 
         public SilenceSpeechItem(String callingApp, Bundle params, long duration) {
             super(callingApp, params);
             mDuration = duration;
-            mDone = new ConditionVariable();
         }
 
         @Override
@@ -596,13 +616,16 @@
 
         @Override
         protected int playImpl() {
-            boolean aborted = mDone.block(mDuration);
-            return aborted ? TextToSpeech.ERROR : TextToSpeech.SUCCESS;
+            mToken = new SilenceMessageParams(this, mDuration);
+            mAudioPlaybackHandler.enqueueSilence(mToken);
+            return TextToSpeech.SUCCESS;
         }
 
         @Override
         protected void stopImpl() {
-            mDone.open();
+            if (mToken != null) {
+                mAudioPlaybackHandler.stop(mToken);
+            }
         }
     }