Merge "Fix theoretical race using TrackBase::sampleRate()"
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index d10f2e5..c51f265 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -242,7 +242,7 @@
         return;
     }
 
-    if (strchr(value, '=') || strchr(key, ';')) {
+    if (strchr(value, '=') || strchr(value, ';')) {
         //XXX ALOGE("Value \"%s\"contains invalid character (= or ;)", value);
         return;
     }
diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp
index fec5461..1040415 100644
--- a/camera/ProCamera.cpp
+++ b/camera/ProCamera.cpp
@@ -247,7 +247,8 @@
     sp <IProCameraUser> c = mCamera;
     if (c == 0) return NO_INIT;
 
-    sp<CpuConsumer> cc = new CpuConsumer(heapCount, synchronousMode);
+    sp<BufferQueue> bq = new BufferQueue();
+    sp<CpuConsumer> cc = new CpuConsumer(bq, heapCount/*, synchronousMode*/);
     cc->setName(String8("ProCamera::mCpuConsumer"));
 
     sp<Surface> stc = new Surface(
diff --git a/camera/photography/ICameraDeviceUser.cpp b/camera/photography/ICameraDeviceUser.cpp
index 0515bd7..95609da 100644
--- a/camera/photography/ICameraDeviceUser.cpp
+++ b/camera/photography/ICameraDeviceUser.cpp
@@ -40,6 +40,7 @@
     CREATE_STREAM,
     CREATE_DEFAULT_REQUEST,
     GET_CAMERA_INFO,
+    WAIT_UNTIL_IDLE,
 };
 
 class BpCameraDeviceUser : public BpInterface<ICameraDeviceUser>
@@ -151,26 +152,36 @@
     }
 
 
-    virtual status_t getCameraInfo(int cameraId, camera_metadata** info)
+    virtual status_t getCameraInfo(CameraMetadata* info)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(cameraId);
         remote()->transact(GET_CAMERA_INFO, data, &reply);
 
-
         reply.readExceptionCode();
         status_t result = reply.readInt32();
 
+        CameraMetadata out;
         if (reply.readInt32() != 0) {
-            CameraMetadata::readFromParcel(reply, /*out*/info);
-        } else if (info) {
-            *info = NULL;
+            out.readFromParcel(&reply);
+        }
+
+        if (info != NULL) {
+            info->swap(out);
         }
 
         return result;
     }
 
+    virtual status_t waitUntilIdle()
+    {
+        ALOGV("waitUntilIdle");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        remote()->transact(WAIT_UNTIL_IDLE, data, &reply);
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
 
 private:
 
@@ -273,6 +284,7 @@
             reply->writeNoException();
             reply->writeInt32(ret);
 
+            // out-variables are after exception and return value
             reply->writeInt32(1); // to mark presence of metadata object
             request.writeToParcel(const_cast<Parcel*>(reply));
 
@@ -281,22 +293,25 @@
         case GET_CAMERA_INFO: {
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
 
-            int cameraId = data.readInt32();
-
-            camera_metadata_t* info = NULL;
+            CameraMetadata info;
             status_t ret;
-            ret = getCameraInfo(cameraId, &info);
-
-            reply->writeInt32(1); // to mark presence of metadata object
-            CameraMetadata::writeToParcel(*reply, info);
+            ret = getCameraInfo(&info);
 
             reply->writeNoException();
             reply->writeInt32(ret);
 
-            free_camera_metadata(info);
+            // out-variables are after exception and return value
+            reply->writeInt32(1); // to mark presence of metadata object
+            info.writeToParcel(reply);
 
             return NO_ERROR;
         } break;
+        case WAIT_UNTIL_IDLE: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            reply->writeNoException();
+            reply->writeInt32(waitUntilIdle());
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
new file mode 100644
index 0000000..b4a5947
--- /dev/null
+++ b/cmds/screenrecord/Android.mk
@@ -0,0 +1,38 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+	screenrecord.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright libmedia libutils libbinder libstagefright_foundation \
+	libjpeg libgui libcutils liblog
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/media/libstagefright \
+	frameworks/av/media/libstagefright/include \
+	$(TOP)/frameworks/native/include/media/openmax \
+	external/jpeg
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_MODULE:= screenrecord
+
+include $(BUILD_EXECUTABLE)
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
new file mode 100644
index 0000000..3e79ee0
--- /dev/null
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ScreenRecord"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+#include <utils/Errors.h>
+#include <utils/Thread.h>
+
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <gui/ISurfaceComposer.h>
+#include <ui/DisplayInfo.h>
+#include <media/openmax/OMX_IVCommon.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaMuxer.h>
+#include <media/ICrypto.h>
+
+#include <stdio.h>
+#include <signal.h>
+#include <getopt.h>
+
+using namespace android;
+
+// Command-line parameters.
+static bool gVerbose = false;               // chatty on stdout
+static bool gRotate = false;                // rotate 90 degrees
+static uint32_t gVideoWidth = 1280;         // 720p
+static uint32_t gVideoHeight = 720;
+static uint32_t gBitRate = 4000000;         // 4Mbps
+
+// Set by signal handler to stop recording.
+static bool gStopRequested;
+
+// Previous signal handler state, restored after first hit.
+static struct sigaction gOrigSigactionINT;
+static struct sigaction gOrigSigactionHUP;
+
+static const uint32_t kMinBitRate = 100000;         // 0.1Mbps
+static const uint32_t kMaxBitRate = 100 * 1000000;  // 100Mbps
+
+/*
+ * Catch keyboard interrupt signals.  On receipt, the "stop requested"
+ * flag is raised, and the original handler is restored (so that, if
+ * we get stuck finishing, a second Ctrl-C will kill the process).
+ */
+static void signalCatcher(int signum)
+{
+    gStopRequested = true;
+    switch (signum) {
+    case SIGINT:
+        sigaction(SIGINT, &gOrigSigactionINT, NULL);
+        break;
+    case SIGHUP:
+        sigaction(SIGHUP, &gOrigSigactionHUP, NULL);
+        break;
+    default:
+        abort();
+        break;
+    }
+}
+
+/*
+ * Configures signal handlers.  The previous handlers are saved.
+ *
+ * If the command is run from an interactive adb shell, we get SIGINT
+ * when Ctrl-C is hit.  If we're run from the host, the local adb process
+ * gets the signal, and we get a SIGHUP when the terminal disconnects.
+ */
+static status_t configureSignals()
+{
+    struct sigaction act;
+    memset(&act, 0, sizeof(act));
+    act.sa_handler = signalCatcher;
+    if (sigaction(SIGINT, &act, &gOrigSigactionINT) != 0) {
+        status_t err = -errno;
+        fprintf(stderr, "Unable to configure SIGINT handler: %s\n",
+                strerror(errno));
+        return err;
+    }
+    if (sigaction(SIGHUP, &act, &gOrigSigactionHUP) != 0) {
+        status_t err = -errno;
+        fprintf(stderr, "Unable to configure SIGHUP handler: %s\n",
+                strerror(errno));
+        return err;
+    }
+    return NO_ERROR;
+}
+
+/*
+ * Configures and starts the MediaCodec encoder.  Obtains an input surface
+ * from the codec.
+ */
+static status_t prepareEncoder(float displayFps, sp<MediaCodec>* pCodec,
+        sp<IGraphicBufferProducer>* pBufferProducer) {
+    status_t err;
+
+    sp<AMessage> format = new AMessage;
+    format->setInt32("width", gVideoWidth);
+    format->setInt32("height", gVideoHeight);
+    format->setString("mime", "video/avc");
+    format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+    format->setInt32("bitrate", gBitRate);
+    format->setFloat("frame-rate", displayFps);
+    format->setInt32("i-frame-interval", 10);
+
+    /// MediaCodec
+    sp<ALooper> looper = new ALooper;
+    looper->setName("screenrecord_looper");
+    looper->start();
+    ALOGV("Creating codec");
+    sp<MediaCodec> codec = MediaCodec::CreateByType(looper, "video/avc", true);
+    err = codec->configure(format, NULL, NULL,
+            MediaCodec::CONFIGURE_FLAG_ENCODE);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to configure codec (err=%d)\n", err);
+        return err;
+    }
+
+    ALOGV("Creating buffer producer");
+    sp<IGraphicBufferProducer> bufferProducer;
+    err = codec->createInputSurface(&bufferProducer);
+    if (err != NO_ERROR) {
+        fprintf(stderr,
+            "ERROR: unable to create encoder input surface (err=%d)\n", err);
+        return err;
+    }
+
+    ALOGV("Starting codec");
+    err = codec->start();
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to start codec (err=%d)\n", err);
+        return err;
+    }
+
+    *pCodec = codec;
+    *pBufferProducer = bufferProducer;
+    return 0;
+}
+
+/*
+ * Configures the virtual display.  When this completes, virtual display
+ * frames will start being sent to the encoder's surface.
+ */
+static status_t prepareVirtualDisplay(const DisplayInfo& mainDpyInfo,
+        const sp<IGraphicBufferProducer>& bufferProducer,
+        sp<IBinder>* pDisplayHandle) {
+    status_t err;
+
+    // Set the region of the layer stack we're interested in, which in our
+    // case is "all of it".  If the app is rotated (so that the width of the
+    // app is based on the height of the display), reverse width/height.
+    bool deviceRotated = mainDpyInfo.orientation != DISPLAY_ORIENTATION_0 &&
+            mainDpyInfo.orientation != DISPLAY_ORIENTATION_180;
+    uint32_t sourceWidth, sourceHeight;
+    if (!deviceRotated) {
+        sourceWidth = mainDpyInfo.w;
+        sourceHeight = mainDpyInfo.h;
+    } else {
+        ALOGV("using rotated width/height");
+        sourceHeight = mainDpyInfo.w;
+        sourceWidth = mainDpyInfo.h;
+    }
+    Rect layerStackRect(sourceWidth, sourceHeight);
+
+    // We need to preserve the aspect ratio of the display.
+    float displayAspect = (float) sourceHeight / (float) sourceWidth;
+
+
+    // Set the way we map the output onto the display surface (which will
+    // be e.g. 1280x720 for a 720p video).  The rect is interpreted
+    // post-rotation, so if the display is rotated 90 degrees we need to
+    // "pre-rotate" it by flipping width/height, so that the orientation
+    // adjustment changes it back.
+    //
+    // We might want to encode a portrait display as landscape to use more
+    // of the screen real estate.  (If players respect a 90-degree rotation
+    // hint, we can essentially get a 720x1280 video instead of 1280x720.)
+    // In that case, we swap the configured video width/height and then
+    // supply a rotation value to the display projection.
+    uint32_t videoWidth, videoHeight;
+    uint32_t outWidth, outHeight;
+    if (!gRotate) {
+        videoWidth = gVideoWidth;
+        videoHeight = gVideoHeight;
+    } else {
+        videoWidth = gVideoHeight;
+        videoHeight = gVideoWidth;
+    }
+    if (videoHeight > (uint32_t)(videoWidth * displayAspect)) {
+        // limited by narrow width; reduce height
+        outWidth = videoWidth;
+        outHeight = (uint32_t)(videoWidth * displayAspect);
+    } else {
+        // limited by short height; restrict width
+        outHeight = videoHeight;
+        outWidth = (uint32_t)(videoHeight / displayAspect);
+    }
+    uint32_t offX, offY;
+    offX = (videoWidth - outWidth) / 2;
+    offY = (videoHeight - outHeight) / 2;
+    Rect displayRect(offX, offY, offX + outWidth, offY + outHeight);
+
+    if (gVerbose) {
+        if (gRotate) {
+            printf("Rotated content area is %ux%u at offset x=%d y=%d\n",
+                    outHeight, outWidth, offY, offX);
+        } else {
+            printf("Content area is %ux%u at offset x=%d y=%d\n",
+                    outWidth, outHeight, offX, offY);
+        }
+    }
+
+
+    sp<IBinder> dpy = SurfaceComposerClient::createDisplay(
+            String8("ScreenRecorder"), false /* secure */);
+
+    SurfaceComposerClient::openGlobalTransaction();
+    SurfaceComposerClient::setDisplaySurface(dpy, bufferProducer);
+    SurfaceComposerClient::setDisplayProjection(dpy,
+            gRotate ? DISPLAY_ORIENTATION_90 : DISPLAY_ORIENTATION_0,
+            layerStackRect, displayRect);
+    SurfaceComposerClient::setDisplayLayerStack(dpy, 0);    // default stack
+    SurfaceComposerClient::closeGlobalTransaction();
+
+    *pDisplayHandle = dpy;
+
+    return NO_ERROR;
+}
+
+/*
+ * Runs the MediaCodec encoder, sending the output to the MediaMuxer.  The
+ * input frames are coming from the virtual display as fast as SurfaceFlinger
+ * wants to send them.
+ *
+ * The muxer must *not* have been started before calling.
+ */
+static status_t runEncoder(const sp<MediaCodec>& encoder,
+        const sp<MediaMuxer>& muxer) {
+    static int kTimeout = 250000;   // be responsive on signal
+    status_t err;
+    ssize_t trackIdx = -1;
+    uint32_t debugNumFrames = 0;
+    time_t debugStartWhen = time(NULL);
+
+    Vector<sp<ABuffer> > buffers;
+    err = encoder->getOutputBuffers(&buffers);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "Unable to get output buffers (err=%d)\n", err);
+        return err;
+    }
+
+    // This is set by the signal handler.
+    gStopRequested = false;
+
+    // Run until we're signaled.
+    while (!gStopRequested) {
+        size_t bufIndex, offset, size;
+        int64_t ptsUsec;
+        uint32_t flags;
+        ALOGV("Calling dequeueOutputBuffer");
+        err = encoder->dequeueOutputBuffer(&bufIndex, &offset, &size, &ptsUsec,
+                &flags, kTimeout);
+        ALOGV("dequeueOutputBuffer returned %d", err);
+        switch (err) {
+        case NO_ERROR:
+            // got a buffer
+            if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) != 0) {
+                // ignore this -- we passed the CSD into MediaMuxer when
+                // we got the format change notification
+                ALOGV("Got codec config buffer (%u bytes); ignoring", size);
+                size = 0;
+            }
+            if (size != 0) {
+                ALOGV("Got data in buffer %d, size=%d, pts=%lld",
+                        bufIndex, size, ptsUsec);
+                CHECK(trackIdx != -1);
+
+                // The MediaMuxer docs are unclear, but it appears that we
+                // need to pass either the full set of BufferInfo flags, or
+                // (flags & BUFFER_FLAG_SYNCFRAME).
+                err = muxer->writeSampleData(buffers[bufIndex], trackIdx,
+                        ptsUsec, flags);
+                if (err != NO_ERROR) {
+                    fprintf(stderr, "Failed writing data to muxer (err=%d)\n",
+                            err);
+                    return err;
+                }
+                debugNumFrames++;
+            }
+            err = encoder->releaseOutputBuffer(bufIndex);
+            if (err != NO_ERROR) {
+                fprintf(stderr, "Unable to release output buffer (err=%d)\n",
+                        err);
+                return err;
+            }
+            if ((flags & MediaCodec::BUFFER_FLAG_EOS) != 0) {
+                // Not expecting EOS from SurfaceFlinger.  Go with it.
+                ALOGD("Received end-of-stream");
+                gStopRequested = false;
+            }
+            break;
+        case -EAGAIN:                       // INFO_TRY_AGAIN_LATER
+            // not expected with infinite timeout
+            ALOGV("Got -EAGAIN, looping");
+            break;
+        case INFO_FORMAT_CHANGED:           // INFO_OUTPUT_FORMAT_CHANGED
+            {
+                // format includes CSD, which we must provide to muxer
+                ALOGV("Encoder format changed");
+                sp<AMessage> newFormat;
+                encoder->getOutputFormat(&newFormat);
+                trackIdx = muxer->addTrack(newFormat);
+                ALOGV("Starting muxer");
+                err = muxer->start();
+                if (err != NO_ERROR) {
+                    fprintf(stderr, "Unable to start muxer (err=%d)\n", err);
+                    return err;
+                }
+            }
+            break;
+        case INFO_OUTPUT_BUFFERS_CHANGED:   // INFO_OUTPUT_BUFFERS_CHANGED
+            // not expected for an encoder; handle it anyway
+            ALOGV("Encoder buffers changed");
+            err = encoder->getOutputBuffers(&buffers);
+            if (err != NO_ERROR) {
+                fprintf(stderr,
+                        "Unable to get new output buffers (err=%d)\n", err);
+            }
+            break;
+        default:
+            ALOGW("Got weird result %d from dequeueOutputBuffer", err);
+            return err;
+        }
+    }
+
+    ALOGV("Encoder stopping (req=%d)", gStopRequested);
+    if (gVerbose) {
+        printf("Encoder stopping; recorded %u frames in %ld seconds\n",
+                debugNumFrames, time(NULL) - debugStartWhen);
+    }
+    return NO_ERROR;
+}
+
+/*
+ * Main "do work" method.
+ *
+ * Configures codec, muxer, and virtual display, then starts moving bits
+ * around.
+ */
+static status_t recordScreen(const char* fileName) {
+    status_t err;
+
+    if (gVerbose) {
+        printf("Recording %dx%d video at %.2fMbps\n",
+                gVideoWidth, gVideoHeight, gBitRate / 1000000.0);
+    }
+
+    // Configure signal handler.
+    err = configureSignals();
+    if (err != NO_ERROR) return err;
+
+    // Start Binder thread pool.  MediaCodec needs to be able to receive
+    // messages from mediaserver.
+    sp<ProcessState> self = ProcessState::self();
+    self->startThreadPool();
+
+    // Get main display parameters.
+    sp<IBinder> mainDpy = SurfaceComposerClient::getBuiltInDisplay(
+            ISurfaceComposer::eDisplayIdMain);
+    DisplayInfo mainDpyInfo;
+    err = SurfaceComposerClient::getDisplayInfo(mainDpy, &mainDpyInfo);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to get display characteristics\n");
+        return err;
+    }
+    if (gVerbose) {
+        printf("Main display is %dx%d @%.2ffps (orientation=%u)\n",
+                mainDpyInfo.w, mainDpyInfo.h, mainDpyInfo.fps,
+                mainDpyInfo.orientation);
+    }
+
+    // Configure and start the encoder.
+    sp<MediaCodec> encoder;
+    sp<IGraphicBufferProducer> bufferProducer;
+    err = prepareEncoder(mainDpyInfo.fps, &encoder, &bufferProducer);
+    if (err != NO_ERROR) return err;
+
+    // Configure virtual display.
+    sp<IBinder> dpy;
+    err = prepareVirtualDisplay(mainDpyInfo, bufferProducer, &dpy);
+    if (err != NO_ERROR) return err;
+
+    // Configure, but do not start, muxer.
+    sp<MediaMuxer> muxer = new MediaMuxer(fileName,
+            MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+    if (gRotate) {
+        muxer->setOrientationHint(90);
+    }
+
+    // Main encoder loop.
+    err = runEncoder(encoder, muxer);
+    if (err != NO_ERROR) return err;
+
+    if (gVerbose) {
+        printf("Stopping encoder and muxer\n");
+    }
+
+    // Shut everything down.
+    //
+    // The virtual display will continue to produce frames until "dpy"
+    // goes out of scope (and something causes the Binder traffic to transmit;
+    // can be forced with IPCThreadState::self()->flushCommands()).  This
+    // could cause SurfaceFlinger to get stuck trying to feed us, so we want
+    // to set a NULL Surface to make the virtual display "dormant".
+    bufferProducer = NULL;
+    SurfaceComposerClient::openGlobalTransaction();
+    SurfaceComposerClient::setDisplaySurface(dpy, bufferProducer);
+    SurfaceComposerClient::closeGlobalTransaction();
+
+    encoder->stop();
+    muxer->stop();
+    encoder->release();
+
+    return 0;
+}
+
+/*
+ * Parses a string of the form "1280x720".
+ *
+ * Returns true on success.
+ */
+static bool parseWidthHeight(const char* widthHeight, uint32_t* pWidth,
+        uint32_t* pHeight) {
+    long width, height;
+    char* end;
+
+    // Must specify base 10, or "0x0" gets parsed differently.
+    width = strtol(widthHeight, &end, 10);
+    if (end == widthHeight || *end != 'x' || *(end+1) == '\0') {
+        // invalid chars in width, or missing 'x', or missing height
+        return false;
+    }
+    height = strtol(end + 1, &end, 10);
+    if (*end != '\0') {
+        // invalid chars in height
+        return false;
+    }
+
+    *pWidth = width;
+    *pHeight = height;
+    return true;
+}
+
+/*
+ * Dumps usage on stderr.
+ */
+static void usage() {
+    fprintf(stderr,
+        "Usage: screenrecord [options] <filename>\n"
+        "\n"
+        "Options:\n"
+        "--size WIDTHxHEIGHT\n"
+        "    Set the video size, e.g. \"1280x720\".  For best results, use\n"
+        "    a size supported by the AVC encoder.\n"
+        "--bit-rate RATE\n"
+        "    Set the video bit rate, in megabits per second.  Default 4Mbps.\n"
+        "--rotate\n"
+        "    Rotate the output 90 degrees.  Useful for filling the frame\n"
+        "    when in portrait mode.\n"
+        "--verbose\n"
+        "    Display interesting information on stdout.\n"
+        "--help\n"
+        "    Show this message.\n"
+        "\n"
+        "Recording continues until Ctrl-C is hit.\n"
+        "\n"
+        );
+}
+
+/*
+ * Parses args and kicks things off.
+ */
+int main(int argc, char* const argv[]) {
+    static const struct option longOptions[] = {
+        { "help",       no_argument,        NULL, 'h' },
+        { "verbose",    no_argument,        NULL, 'v' },
+        { "size",       required_argument,  NULL, 's' },
+        { "bit-rate",   required_argument,  NULL, 'b' },
+        { "rotate",     no_argument,        NULL, 'r' },
+        { NULL,         0,                  NULL, 0 }
+    };
+
+    while (true) {
+        int optionIndex = 0;
+        int ic = getopt_long(argc, argv, "", longOptions, &optionIndex);
+        if (ic == -1) {
+            break;
+        }
+
+        switch (ic) {
+        case 'h':
+            usage();
+            return 0;
+        case 'v':
+            gVerbose = true;
+            break;
+        case 's':
+            if (!parseWidthHeight(optarg, &gVideoWidth, &gVideoHeight)) {
+                fprintf(stderr, "Invalid size '%s', must be width x height\n",
+                        optarg);
+                return 2;
+            }
+            if (gVideoWidth == 0 || gVideoHeight == 0) {
+                fprintf(stderr,
+                    "Invalid size %ux%u, width and height may not be zero\n",
+                    gVideoWidth, gVideoHeight);
+                return 2;
+            }
+            break;
+        case 'b':
+            gBitRate = atoi(optarg);
+            if (gBitRate < kMinBitRate || gBitRate > kMaxBitRate) {
+                fprintf(stderr,
+                        "Bit rate %dbps outside acceptable range [%d,%d]\n",
+                        gBitRate, kMinBitRate, kMaxBitRate);
+                return 2;
+            }
+            break;
+        case 'r':
+            gRotate = true;
+            break;
+        default:
+            if (ic != '?') {
+                fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
+            }
+            return 2;
+        }
+    }
+
+    if (optind != argc - 1) {
+        fprintf(stderr, "Must specify output file (see --help).\n");
+        return 2;
+    }
+
+    status_t err = recordScreen(argv[optind]);
+    ALOGD(err == NO_ERROR ? "success" : "failed");
+    return (int) err;
+}
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index f8fc8ed..529b96c 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -937,7 +937,8 @@
         } else {
             CHECK(useSurfaceTexAlloc);
 
-            sp<GLConsumer> texture = new GLConsumer(0 /* tex */);
+            sp<BufferQueue> bq = new BufferQueue();
+            sp<GLConsumer> texture = new GLConsumer(bq, 0 /* tex */);
             gSurface = new Surface(texture->getBufferQueue());
         }
 
diff --git a/drm/common/IDrmManagerService.cpp b/drm/common/IDrmManagerService.cpp
index 91fd91e..db41e0b 100644
--- a/drm/common/IDrmManagerService.cpp
+++ b/drm/common/IDrmManagerService.cpp
@@ -153,18 +153,6 @@
     return reply.readInt32();
 }
 
-status_t BpDrmManagerService::installDrmEngine(int uniqueId, const String8& drmEngineFile) {
-    ALOGV("Install DRM Engine");
-    Parcel data, reply;
-
-    data.writeInterfaceToken(IDrmManagerService::getInterfaceDescriptor());
-    data.writeInt32(uniqueId);
-    data.writeString8(drmEngineFile);
-
-    remote()->transact(INSTALL_DRM_ENGINE, data, &reply);
-    return reply.readInt32();
-}
-
 DrmConstraints* BpDrmManagerService::getConstraints(
             int uniqueId, const String8* path, const int action) {
     ALOGV("Get Constraints");
@@ -855,19 +843,6 @@
         return DRM_NO_ERROR;
     }
 
-    case INSTALL_DRM_ENGINE:
-    {
-        ALOGV("BnDrmManagerService::onTransact :INSTALL_DRM_ENGINE");
-        CHECK_INTERFACE(IDrmManagerService, data, reply);
-
-        const int uniqueId = data.readInt32();
-        const String8 engineFile = data.readString8();
-        status_t status = installDrmEngine(uniqueId, engineFile);
-
-        reply->writeInt32(status);
-        return DRM_NO_ERROR;
-    }
-
     case GET_CONSTRAINTS_FROM_CONTENT:
     {
         ALOGV("BnDrmManagerService::onTransact :GET_CONSTRAINTS_FROM_CONTENT");
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index bfaf4bc..dccd23d 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -175,21 +175,6 @@
     return NULL;
 }
 
-status_t DrmManager::installDrmEngine(int uniqueId, const String8& absolutePath) {
-    Mutex::Autolock _l(mLock);
-    mPlugInManager.loadPlugIn(absolutePath);
-
-    IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(absolutePath);
-    rDrmEngine.initialize(uniqueId);
-    rDrmEngine.setOnInfoListener(uniqueId, this);
-
-    DrmSupportInfo* info = rDrmEngine.getSupportInfo(0);
-    mSupportInfoToPlugInIdMap.add(*info, absolutePath);
-    delete info;
-
-    return DRM_NO_ERROR;
-}
-
 bool DrmManager::canHandle(int uniqueId, const String8& path, const String8& mimeType) {
     Mutex::Autolock _l(mLock);
     const String8 plugInId = getSupportedPlugInId(mimeType);
diff --git a/drm/drmserver/DrmManagerService.cpp b/drm/drmserver/DrmManagerService.cpp
index bbd3b7f..2b71904 100644
--- a/drm/drmserver/DrmManagerService.cpp
+++ b/drm/drmserver/DrmManagerService.cpp
@@ -87,11 +87,6 @@
     return DRM_NO_ERROR;
 }
 
-status_t DrmManagerService::installDrmEngine(int uniqueId, const String8& drmEngineFile) {
-    ALOGV("Entering installDrmEngine");
-    return mDrmManager->installDrmEngine(uniqueId, drmEngineFile);
-}
-
 DrmConstraints* DrmManagerService::getConstraints(
             int uniqueId, const String8* path, const int action) {
     ALOGV("Entering getConstraints from content");
diff --git a/drm/libdrmframework/DrmManagerClientImpl.cpp b/drm/libdrmframework/DrmManagerClientImpl.cpp
index a970035..ffefd74 100644
--- a/drm/libdrmframework/DrmManagerClientImpl.cpp
+++ b/drm/libdrmframework/DrmManagerClientImpl.cpp
@@ -86,15 +86,6 @@
             (NULL != infoListener.get()) ? this : NULL);
 }
 
-status_t DrmManagerClientImpl::installDrmEngine(
-        int uniqueId, const String8& drmEngineFile) {
-    status_t status = DRM_ERROR_UNKNOWN;
-    if (EMPTY_STRING != drmEngineFile) {
-        status = getDrmManagerService()->installDrmEngine(uniqueId, drmEngineFile);
-    }
-    return status;
-}
-
 DrmConstraints* DrmManagerClientImpl::getConstraints(
         int uniqueId, const String8* path, const int action) {
     DrmConstraints *drmConstraints = NULL;
diff --git a/drm/libdrmframework/include/DrmManager.h b/drm/libdrmframework/include/DrmManager.h
index 8ab693f..e7cdd36 100644
--- a/drm/libdrmframework/include/DrmManager.h
+++ b/drm/libdrmframework/include/DrmManager.h
@@ -70,8 +70,6 @@
     status_t setDrmServiceListener(
             int uniqueId, const sp<IDrmServiceListener>& drmServiceListener);
 
-    status_t installDrmEngine(int uniqueId, const String8& drmEngineFile);
-
     DrmConstraints* getConstraints(int uniqueId, const String8* path, const int action);
 
     DrmMetadata* getMetadata(int uniqueId, const String8* path);
diff --git a/drm/libdrmframework/include/DrmManagerClientImpl.h b/drm/libdrmframework/include/DrmManagerClientImpl.h
index 9b4c9ae..3400cb1 100644
--- a/drm/libdrmframework/include/DrmManagerClientImpl.h
+++ b/drm/libdrmframework/include/DrmManagerClientImpl.h
@@ -410,17 +410,6 @@
     status_t notify(const DrmInfoEvent& event);
 
 private:
-    /**
-     * Install new DRM Engine Plug-in at the runtime
-     *
-     * @param[in] uniqueId Unique identifier for a session
-     * @param[in] drmEngine Shared Object(so) File in which DRM Engine defined
-     * @return status_t
-     *     Returns DRM_NO_ERROR for success, DRM_ERROR_UNKNOWN for failure
-     */
-    status_t installDrmEngine(int uniqueId, const String8& drmEngineFile);
-
-private:
     Mutex mLock;
     sp<DrmManagerClient::OnInfoListener> mOnInfoListener;
 
diff --git a/drm/libdrmframework/include/DrmManagerService.h b/drm/libdrmframework/include/DrmManagerService.h
index 0dfdca6..8bc59b4 100644
--- a/drm/libdrmframework/include/DrmManagerService.h
+++ b/drm/libdrmframework/include/DrmManagerService.h
@@ -57,8 +57,6 @@
     status_t setDrmServiceListener(
             int uniqueId, const sp<IDrmServiceListener>& drmServiceListener);
 
-    status_t installDrmEngine(int uniqueId, const String8& drmEngineFile);
-
     DrmConstraints* getConstraints(int uniqueId, const String8* path, const int action);
 
     DrmMetadata* getMetadata(int uniqueId, const String8* path);
diff --git a/drm/libdrmframework/include/IDrmManagerService.h b/drm/libdrmframework/include/IDrmManagerService.h
index 5a4d70a..fe55650 100644
--- a/drm/libdrmframework/include/IDrmManagerService.h
+++ b/drm/libdrmframework/include/IDrmManagerService.h
@@ -93,8 +93,6 @@
     virtual status_t setDrmServiceListener(
             int uniqueId, const sp<IDrmServiceListener>& infoListener) = 0;
 
-    virtual status_t installDrmEngine(int uniqueId, const String8& drmEngineFile) = 0;
-
     virtual DrmConstraints* getConstraints(
             int uniqueId, const String8* path, const int action) = 0;
 
@@ -185,8 +183,6 @@
     virtual status_t setDrmServiceListener(
             int uniqueId, const sp<IDrmServiceListener>& infoListener);
 
-    virtual status_t installDrmEngine(int uniqueId, const String8& drmEngineFile);
-
     virtual DrmConstraints* getConstraints(int uniqueId, const String8* path, const int action);
 
     virtual DrmMetadata* getMetadata(int uniqueId, const String8* path);
diff --git a/include/camera/photography/ICameraDeviceUser.h b/include/camera/photography/ICameraDeviceUser.h
index 1b8d666..45988d0 100644
--- a/include/camera/photography/ICameraDeviceUser.h
+++ b/include/camera/photography/ICameraDeviceUser.h
@@ -58,10 +58,11 @@
                                                  /*out*/
                                                  CameraMetadata* request) = 0;
     // Get static camera metadata
-    virtual status_t        getCameraInfo(int cameraId,
-                                          /*out*/
-                                          camera_metadata** info) = 0;
+    virtual status_t        getCameraInfo(/*out*/
+                                          CameraMetadata* info) = 0;
 
+    // Wait until all the submitted requests have finished processing
+    virtual status_t        waitUntilIdle() =  0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/cpustats/CentralTendencyStatistics.h b/include/cpustats/CentralTendencyStatistics.h
new file mode 100644
index 0000000..21b6981
--- /dev/null
+++ b/include/cpustats/CentralTendencyStatistics.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _CENTRAL_TENDENCY_STATISTICS_H
+#define _CENTRAL_TENDENCY_STATISTICS_H
+
+#include <math.h>
+
+// Not multithread safe
+class CentralTendencyStatistics {
+
+public:
+
+    CentralTendencyStatistics() :
+            mMean(NAN), mMedian(NAN), mMinimum(INFINITY), mMaximum(-INFINITY), mN(0), mM2(0),
+            mVariance(NAN), mVarianceKnownForN(0), mStddev(NAN), mStddevKnownForN(0) { }
+
+    ~CentralTendencyStatistics() { }
+
+    // add x to the set of samples
+    void sample(double x);
+
+    // return the arithmetic mean of all samples so far
+    double mean() const { return mMean; }
+
+    // return the minimum of all samples so far
+    double minimum() const { return mMinimum; }
+
+    // return the maximum of all samples so far
+    double maximum() const { return mMaximum; }
+
+    // return the variance of all samples so far
+    double variance() const;
+
+    // return the standard deviation of all samples so far
+    double stddev() const;
+
+    // return the number of samples added so far
+    unsigned n() const { return mN; }
+
+    // reset the set of samples to be empty
+    void reset();
+
+private:
+    double mMean;
+    double mMedian;
+    double mMinimum;
+    double mMaximum;
+    unsigned mN;    // number of samples so far
+    double mM2;
+
+    // cached variance, and n at time of caching
+    mutable double mVariance;
+    mutable unsigned mVarianceKnownForN;
+
+    // cached standard deviation, and n at time of caching
+    mutable double mStddev;
+    mutable unsigned mStddevKnownForN;
+
+};
+
+#endif // _CENTRAL_TENDENCY_STATISTICS_H
diff --git a/include/cpustats/README.txt b/include/cpustats/README.txt
new file mode 100644
index 0000000..14439f0
--- /dev/null
+++ b/include/cpustats/README.txt
@@ -0,0 +1,6 @@
+This is a static library of CPU usage statistics, originally written
+for audio but most are not actually specific to audio.
+
+Requirements to be here:
+ * should be related to CPU usage statistics
+ * should be portable to host; avoid Android OS dependencies without a conditional
diff --git a/include/cpustats/ThreadCpuUsage.h b/include/cpustats/ThreadCpuUsage.h
new file mode 100644
index 0000000..9756844
--- /dev/null
+++ b/include/cpustats/ThreadCpuUsage.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _THREAD_CPU_USAGE_H
+#define _THREAD_CPU_USAGE_H
+
+#include <fcntl.h>
+#include <pthread.h>
+
+namespace android {
+
+// Track CPU usage for the current thread.
+// Units are in per-thread CPU ns, as reported by
+// clock_gettime(CLOCK_THREAD_CPUTIME_ID).  Simple usage: for cyclic
+// threads where you want to measure the execution time of the whole
+// cycle, just call sampleAndEnable() at the start of each cycle.
+// For acyclic threads, or for cyclic threads where you want to measure/track
+// only part of each cycle, call enable(), disable(), and/or setEnabled()
+// to demarcate the region(s) of interest, and then call sample() periodically.
+// This class is not thread-safe for concurrent calls from multiple threads;
+// the methods of this class may only be called by the current thread
+// which constructed the object.
+
+class ThreadCpuUsage
+{
+
+public:
+    ThreadCpuUsage() :
+        mIsEnabled(false),
+        mWasEverEnabled(false),
+        mAccumulator(0),
+        // mPreviousTs
+        // mMonotonicTs
+        mMonotonicKnown(false)
+        {
+            (void) pthread_once(&sOnceControl, &init);
+            for (int i = 0; i < sKernelMax; ++i) {
+                mCurrentkHz[i] = (uint32_t) ~0;   // unknown
+            }
+        }
+
+    ~ThreadCpuUsage() { }
+
+    // Return whether currently tracking CPU usage by current thread
+    bool isEnabled() const  { return mIsEnabled; }
+
+    // Enable tracking of CPU usage by current thread;
+    // any CPU used from this point forward will be tracked.
+    // Returns the previous enabled status.
+    bool enable()       { return setEnabled(true); }
+
+    // Disable tracking of CPU usage by current thread;
+    // any CPU used from this point forward will be ignored.
+    // Returns the previous enabled status.
+    bool disable()      { return setEnabled(false); }
+
+    // Set the enabled status and return the previous enabled status.
+    // This method is intended to be used for safe nested enable/disabling.
+    bool setEnabled(bool isEnabled);
+
+    // Add a sample point, and also enable tracking if needed.
+    // If tracking has never been enabled, then this call enables tracking but
+    // does _not_ add a sample -- it is not possible to add a sample the
+    // first time because there is no previous point to subtract from.
+    // Otherwise, if tracking is enabled,
+    // then adds a sample for tracked CPU ns since the previous
+    // sample, or since the first call to sampleAndEnable(), enable(), or
+    // setEnabled(true).  If there was a previous sample but tracking is
+    // now disabled, then adds a sample for the tracked CPU ns accumulated
+    // up until the most recent disable(), resets this accumulator, and then
+    // enables tracking.  Calling this method rather than enable() followed
+    // by sample() avoids a race condition for the first sample.
+    // Returns true if the sample 'ns' is valid, or false if invalid.
+    // Note that 'ns' is an output parameter passed by reference.
+    // The caller does not need to initialize this variable.
+    // The units are CPU nanoseconds consumed by current thread.
+    bool sampleAndEnable(double& ns);
+
+    // Add a sample point, but do not
+    // change the tracking enabled status.  If tracking has either never been
+    // enabled, or has never been enabled since the last sample, then log a warning
+    // and don't add sample.  Otherwise, adds a sample for tracked CPU ns since
+    // the previous sample or since the first call to sampleAndEnable(),
+    // enable(), or setEnabled(true) if no previous sample.
+    // Returns true if the sample is valid, or false if invalid.
+    // Note that 'ns' is an output parameter passed by reference.
+    // The caller does not need to initialize this variable.
+    // The units are CPU nanoseconds consumed by current thread.
+    bool sample(double& ns);
+
+    // Return the elapsed delta wall clock ns since initial enable or reset,
+    // as reported by clock_gettime(CLOCK_MONOTONIC).
+    long long elapsed() const;
+
+    // Reset elapsed wall clock.  Has no effect on tracking or accumulator.
+    void resetElapsed();
+
+    // Return current clock frequency for specified CPU, in kHz.
+    // You can get your CPU number using sched_getcpu(2).  Note that, unless CPU affinity
+    // has been configured appropriately, the CPU number can change.
+    // Also note that, unless the CPU governor has been configured appropriately,
+    // the CPU frequency can change.  And even if the CPU frequency is locked down
+    // to a particular value, that the frequency might still be adjusted
+    // to prevent thermal overload.  Therefore you should poll for your thread's
+    // current CPU number and clock frequency periodically.
+    uint32_t getCpukHz(int cpuNum);
+
+private:
+    bool mIsEnabled;                // whether tracking is currently enabled
+    bool mWasEverEnabled;           // whether tracking was ever enabled
+    long long mAccumulator;         // accumulated thread CPU time since last sample, in ns
+    struct timespec mPreviousTs;    // most recent thread CPU time, valid only if mIsEnabled is true
+    struct timespec mMonotonicTs;   // most recent monotonic time
+    bool mMonotonicKnown;           // whether mMonotonicTs has been set
+
+    static const int MAX_CPU = 8;
+    static int sScalingFds[MAX_CPU];// file descriptor per CPU for reading scaling_cur_freq
+    uint32_t mCurrentkHz[MAX_CPU];  // current CPU frequency in kHz, not static to avoid a race
+    static pthread_once_t sOnceControl;
+    static int sKernelMax;          // like MAX_CPU, but determined at runtime == cpu/kernel_max + 1
+    static void init();             // called once at first ThreadCpuUsage construction
+    static pthread_mutex_t sMutex;  // protects sScalingFds[] after initialization
+};
+
+}   // namespace android
+
+#endif //  _THREAD_CPU_USAGE_H
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index b11c812..f9e625e 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -17,20 +17,18 @@
 #ifndef ANDROID_AUDIOSYSTEM_H_
 #define ANDROID_AUDIOSYSTEM_H_
 
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-#include <media/IAudioFlinger.h>
-
+#include <hardware/audio_effect.h>
+#include <media/IAudioFlingerClient.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
-
-/* XXX: Should be include by all the users instead */
-#include <media/AudioParameter.h>
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
 
 namespace android {
 
 typedef void (*audio_error_callback)(status_t err);
 
+class IAudioFlinger;
 class IAudioPolicyService;
 class String8;
 
@@ -128,8 +126,10 @@
     // - BAD_VALUE: invalid parameter
     // NOTE: this feature is not supported on all hardware platforms and it is
     // necessary to check returned status before using the returned values.
-    static status_t getRenderPosition(size_t *halFrames, size_t *dspFrames,
-            audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+    static status_t getRenderPosition(audio_io_handle_t output,
+                                      size_t *halFrames,
+                                      size_t *dspFrames,
+                                      audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
 
     // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
     static size_t getInputFramesLost(audio_io_handle_t ioHandle);
@@ -155,11 +155,11 @@
     class OutputDescriptor {
     public:
         OutputDescriptor()
-        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channels(0), frameCount(0), latency(0)  {}
+        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)  {}
 
         uint32_t samplingRate;
         int32_t format;
-        int32_t channels;
+        audio_channel_mask_t channelMask;
         size_t frameCount;
         uint32_t latency;
     };
@@ -197,7 +197,8 @@
                                         uint32_t samplingRate = 0,
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
-                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE);
+                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                        const audio_offload_info_t *offloadInfo = NULL);
     static status_t startOutput(audio_io_handle_t output,
                                 audio_stream_type_t stream,
                                 int session = 0);
@@ -245,6 +246,12 @@
     static uint32_t getPrimaryOutputSamplingRate();
     static size_t getPrimaryOutputFrameCount();
 
+    static status_t setLowRamDevice(bool isLowRamDevice);
+
+    // Check if hw offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming or offload property is enabled
+    static bool isOffloadSupported(const audio_offload_info_t& info);
+
     // ----------------------------------------------------------------------------
 
 private:
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index e9bb76a..da13a7f 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -60,6 +60,8 @@
                                     // Not currently used by android.media.AudioTrack.
         EVENT_NEW_IAUDIOTRACK = 6,  // IAudioTrack was re-created, either due to re-routing and
                                     // voluntary invalidation by mediaserver, or mediaserver crash.
+        EVENT_STREAM_END = 7,       // Sent after all the buffers queued in AF and HW are played
+                                    // back (after stop is called)
     };
 
     /* Client should declare Buffer on the stack and pass address to obtainBuffer()
@@ -73,8 +75,10 @@
         size_t      frameCount;   // number of sample frames corresponding to size;
                                   // on input it is the number of frames desired,
                                   // on output is the number of frames actually filled
+                                  // (currently ignored, but will make the primary field in future)
 
         size_t      size;         // input/output in bytes == frameCount * frameSize
+                                  // on output is the number of bytes actually filled
                                   // FIXME this is redundant with respect to frameCount,
                                   // and TRANSFER_OBTAIN mode is broken for 8-bit data
                                   // since we don't define the frame format
@@ -175,7 +179,8 @@
                                     void* user           = NULL,
                                     int notificationFrames = 0,
                                     int sessionId        = 0,
-                                    transfer_type transferType = TRANSFER_DEFAULT);
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = NULL);
 
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
@@ -198,7 +203,8 @@
                                     void* user          = NULL,
                                     int notificationFrames = 0,
                                     int sessionId       = 0,
-                                    transfer_type transferType = TRANSFER_DEFAULT);
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = NULL);
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioTrack.
@@ -233,7 +239,8 @@
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
                             int sessionId       = 0,
-                            transfer_type transferType = TRANSFER_DEFAULT);
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            const audio_offload_info_t *offloadInfo = NULL);
 
     /* Result of constructing the AudioTrack. This must be checked
      * before using any AudioTrack API (except for set()), because using
@@ -270,7 +277,7 @@
      * make it active. If set, the callback will start being called.
      * If the track was previously paused, volume is ramped up over the first mix buffer.
      */
-            void        start();
+            status_t        start();
 
     /* Stop a track.
      * In static buffer mode, the track is stopped immediately.
@@ -521,6 +528,15 @@
                                      struct timespec *elapsed = NULL, size_t *nonContig = NULL);
 public:
 
+//EL_FIXME to be reconciled with new obtainBuffer() return codes and control block proxy
+//            enum {
+//            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
+//            TEAR_DOWN       = 0x80000002,
+//            STOPPED = 1,
+//            STREAM_END_WAIT,
+//            STREAM_END
+//        };
+
     /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */
     // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(Buffer* audioBuffer);
@@ -550,6 +566,15 @@
      */
             uint32_t    getUnderrunFrames() const;
 
+    /* Get the flags */
+            audio_output_flags_t getFlags() const { return mFlags; }
+
+    /* Set parameters - only possible when using direct output */
+            status_t    setParameters(const String8& keyValuePairs);
+
+    /* Get parameters */
+            String8     getParameters(const String8& keys);
+
 protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
@@ -590,8 +615,11 @@
             //      NS_NEVER    never again
             static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
             nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread);
+            status_t processStreamEnd(int32_t waitCount);
+
 
             // caller must hold lock on mLock for all _l methods
+
             status_t createTrack_l(audio_stream_type_t streamType,
                                  uint32_t sampleRate,
                                  audio_format_t format,
@@ -610,6 +638,9 @@
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreTrack_l(const char *from);
 
+            bool     isOffloaded() const
+                { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
+
     // may be changed if IAudioTrack is re-created
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -646,7 +677,9 @@
         STATE_ACTIVE,
         STATE_STOPPED,
         STATE_PAUSED,
+        STATE_PAUSED_STOPPING,
         STATE_FLUSHED,
+        STATE_STOPPING,
     }                       mState;
 
     callback_t              mCbf;                   // callback handler for events, or NULL
@@ -664,7 +697,7 @@
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
-    int                     mObservedSequence;      // last observed value of mSequence
+    uint32_t                mObservedSequence;      // last observed value of mSequence
 
     sp<IMemory>             mSharedBuffer;
     uint32_t                mLoopPeriod;            // in frames, zero means looping is disabled
@@ -706,6 +739,7 @@
 
     sp<DeathNotifier>       mDeathNotifier;
     uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
+    audio_io_handle_t       mOutput;                // cached output io handle
 };
 
 class TimedAudioTrack : public AudioTrack
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 9c3067e..de45aa8 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -49,6 +49,7 @@
         TRACK_DEFAULT = 0,  // client requests a default AudioTrack
         TRACK_TIMED   = 1,  // client requests a TimedAudioTrack
         TRACK_FAST    = 2,  // client requests a fast AudioTrack or AudioRecord
+        TRACK_OFFLOAD = 4,  // client requests offload to hw codec
     };
     typedef uint32_t track_flags_t;
 
@@ -124,7 +125,9 @@
     virtual     String8     getParameters(audio_io_handle_t ioHandle, const String8& keys)
                                     const = 0;
 
-    // register a current process for audio output change notifications
+    // Register an object to receive audio input/output change and track notifications.
+    // For a given calling pid, AudioFlinger disregards any registrations after the first.
+    // Thus the IAudioFlingerClient must be a singleton per process.
     virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
 
     // retrieve the audio recording buffer size
@@ -137,7 +140,8 @@
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask,
                                          uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags) = 0;
+                                         audio_output_flags_t flags,
+                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
                                     audio_io_handle_t output2) = 0;
     virtual status_t closeOutput(audio_io_handle_t output) = 0;
@@ -193,6 +197,10 @@
     virtual uint32_t getPrimaryOutputSamplingRate() = 0;
     virtual size_t getPrimaryOutputFrameCount() = 0;
 
+    // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
+    // and should be called at most once.  For a definition of what "low RAM" means, see
+    // android.app.ActivityManager.isLowRamDevice().
+    virtual status_t setLowRamDevice(bool isLowRamDevice) = 0;
 };
 
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index b5ad4ef..09b9ea6 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -53,7 +53,8 @@
                                         uint32_t samplingRate = 0,
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = 0,
-                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0;
+                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                        const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0) = 0;
@@ -95,6 +96,9 @@
     virtual status_t queryDefaultPreProcessing(int audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count) = 0;
+   // Check if offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming or offload property is enabled
+    virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
 };
 
 
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index d6e3141..eccc2ca 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -34,6 +34,9 @@
 public:
     DECLARE_META_INTERFACE(AudioRecord);
 
+    /* get this tracks control block */
+    virtual sp<IMemory> getCblk() const = 0;
+
     /* After it's created the track is not active. Call start() to
      * make it active.
      */
@@ -44,9 +47,6 @@
      * will be processed, unless flush() is called.
      */
     virtual void        stop() = 0;
-
-    /* get this tracks control block */
-    virtual sp<IMemory> getCblk() const = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 144be0e..1014403 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -25,6 +25,7 @@
 #include <binder/IInterface.h>
 #include <binder/IMemory.h>
 #include <utils/LinearTransform.h>
+#include <utils/String8.h>
 
 namespace android {
 
@@ -82,6 +83,9 @@
        or Tungsten time. The values for target are defined in AudioTrack.h */
     virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
                                               int target) = 0;
+
+    /* Send parameters to the audio hardware */
+    virtual status_t    setParameters(const String8& keyValuePairs) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 0b1d1e4..38f9d11 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -130,6 +130,16 @@
             node_id node,
             const char *parameter_name,
             OMX_INDEXTYPE *index) = 0;
+
+    enum InternalOptionType {
+        INTERNAL_OPTION_SUSPEND,  // data is a bool
+    };
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *data,
+            size_t size) = 0;
 };
 
 struct omx_message {
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 9a75f81..3b151ef 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -74,9 +74,18 @@
     // AudioSink: abstraction layer for audio output
     class AudioSink : public RefBase {
     public:
+        enum cb_event_t {
+            CB_EVENT_FILL_BUFFER,   // Request to write more data to buffer.
+            CB_EVENT_STREAM_END,    // Sent after all the buffers queued in AF and HW are played
+                                    // back (after stop is called)
+            CB_EVENT_TEAR_DOWN      // The AudioTrack was invalidated due to use case change:
+                                    // Need to re-evaluate offloading options
+        };
+
         // Callback returns the number of bytes actually written to the buffer.
         typedef size_t (*AudioCallback)(
-                AudioSink *audioSink, void *buffer, size_t size, void *cookie);
+                AudioSink *audioSink, void *buffer, size_t size, void *cookie,
+                        cb_event_t event);
 
         virtual             ~AudioSink() {}
         virtual bool        ready() const = 0; // audio output is open and ready
@@ -99,9 +108,10 @@
                 int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
                 AudioCallback cb = NULL,
                 void *cookie = NULL,
-                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0;
+                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                const audio_offload_info_t *offloadInfo = NULL) = 0;
 
-        virtual void        start() = 0;
+        virtual status_t    start() = 0;
         virtual ssize_t     write(const void* buffer, size_t size) = 0;
         virtual void        stop() = 0;
         virtual void        flush() = 0;
@@ -110,6 +120,9 @@
 
         virtual status_t    setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; }
         virtual bool        needsTrailingPadding() { return true; }
+
+        virtual status_t    setParameters(const String8& keyValuePairs) { return NO_ERROR; };
+        virtual String8     getParameters(const String8& keys) { return String8::empty(); };
     };
 
                         MediaPlayerBase() : mCookie(0), mNotify(0) {}
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index aa58905..e429263 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -19,7 +19,7 @@
 
 #include <media/AudioEffect.h>
 #include <audio_effects/effect_visualizer.h>
-#include <string.h>
+#include <utils/Thread.h>
 
 /**
  * The Visualizer class enables application to retrieve part of the currently playing audio for
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index 107ba66..6d59ea7 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -90,6 +90,8 @@
     virtual ~Timeline();
 #endif
 
+    // Input parameter 'size' is the desired size of the timeline in byte units.
+    // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
     static size_t sharedSize(size_t size);
 
 #if 0
@@ -110,8 +112,12 @@
 class Writer : public RefBase {
 public:
     Writer();                   // dummy nop implementation without shared memory
+
+    // Input parameter 'size' is the desired size of the timeline in byte units.
+    // The size of the shared memory must be at least Timeline::sharedSize(size).
     Writer(size_t size, void *shared);
     Writer(size_t size, const sp<IMemory>& iMemory);
+
     virtual ~Writer() { }
 
     virtual void    log(const char *string);
@@ -165,8 +171,12 @@
 
 class Reader : public RefBase {
 public:
+
+    // Input parameter 'size' is the desired size of the timeline in byte units.
+    // The size of the shared memory must be at least Timeline::sharedSize(size).
     Reader(size_t size, const void *shared);
     Reader(size_t size, const sp<IMemory>& iMemory);
+
     virtual ~Reader() { }
 
     void    dump(int fd, size_t indent = 0);
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 3bf046d..912a43c 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -36,8 +36,16 @@
         SEEK_COMPLETE
     };
 
+    enum {
+        ALLOW_DEEP_BUFFERING = 0x01,
+        USE_OFFLOAD = 0x02,
+        HAS_VIDEO   = 0x1000,
+        IS_STREAMING = 0x2000
+
+    };
+
     AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink,
-                bool allowDeepBuffering = false,
+                uint32_t flags = 0,
                 AwesomePlayer *audioObserver = NULL);
 
     virtual ~AudioPlayer();
@@ -51,7 +59,7 @@
     status_t start(bool sourceAlreadyStarted = false);
 
     void pause(bool playPendingSamples = false);
-    void resume();
+    status_t resume();
 
     // Returns the timestamp of the last buffer played (in us).
     int64_t getMediaTimeUs();
@@ -67,6 +75,8 @@
 
     status_t setPlaybackRatePermille(int32_t ratePermille);
 
+    void notifyAudioEOS();
+
 private:
     friend class VideoEditorAudioPlayer;
     sp<MediaSource> mSource;
@@ -97,17 +107,20 @@
     MediaBuffer *mFirstBuffer;
 
     sp<MediaPlayerBase::AudioSink> mAudioSink;
-    bool mAllowDeepBuffering;       // allow audio deep audio buffers. Helps with low power audio
-                                    // playback but implies high latency
     AwesomePlayer *mObserver;
     int64_t mPinnedTimeUs;
 
+    bool mPlaying;
+    int64_t mStartPosUs;
+    const uint32_t mCreateFlags;
+
     static void AudioCallback(int event, void *user, void *info);
     void AudioCallback(int event, void *info);
 
     static size_t AudioSinkCallback(
             MediaPlayerBase::AudioSink *audioSink,
-            void *data, size_t size, void *me);
+            void *data, size_t size, void *me,
+            MediaPlayerBase::AudioSink::cb_event_t event);
 
     size_t fillBuffer(void *data, size_t size);
 
@@ -116,6 +129,10 @@
     void reset();
 
     uint32_t getNumFramesPendingPlayout() const;
+    int64_t getOutputPlayPositionUs_l() const;
+
+    bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; }
+    bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; }
 
     AudioPlayer(const AudioPlayer &);
     AudioPlayer &operator=(const AudioPlayer &);
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 73940d3..c24f612 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -22,6 +22,8 @@
 #include <stdint.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
+#include <system/audio.h>
+#include <media/MediaPlayerInterface.h>
 
 namespace android {
 
@@ -48,6 +50,15 @@
 
 AString MakeUserAgent();
 
+// Convert a MIME type to a AudioSystem::audio_format
+status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime);
+
+// Send information from MetaData to the HAL via AudioSink
+status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink, const sp<MetaData>& meta);
+
+// Check whether the stream defined by meta can be offloaded to hardware
+bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming);
+
 }  // namespace android
 
 #endif  // UTILS_H_
diff --git a/media/libstagefright/wifi-display/ANetworkSession.h b/include/media/stagefright/foundation/ANetworkSession.h
similarity index 97%
rename from media/libstagefright/wifi-display/ANetworkSession.h
rename to include/media/stagefright/foundation/ANetworkSession.h
index 7c62b29..fd3ebaa 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.h
+++ b/include/media/stagefright/foundation/ANetworkSession.h
@@ -77,6 +77,8 @@
             int32_t sessionID, const void *data, ssize_t size = -1,
             bool timeValid = false, int64_t timeUs = -1ll);
 
+    status_t switchToWebSocketMode(int32_t sessionID);
+
     enum NotificationReason {
         kWhatError,
         kWhatConnected,
@@ -84,6 +86,7 @@
         kWhatData,
         kWhatDatagram,
         kWhatBinaryData,
+        kWhatWebSocketMessage,
         kWhatNetworkStall,
     };
 
diff --git a/media/libstagefright/wifi-display/ParsedMessage.h b/include/media/stagefright/foundation/ParsedMessage.h
similarity index 96%
rename from media/libstagefright/wifi-display/ParsedMessage.h
rename to include/media/stagefright/foundation/ParsedMessage.h
index e9a1859..9d43a93 100644
--- a/media/libstagefright/wifi-display/ParsedMessage.h
+++ b/include/media/stagefright/foundation/ParsedMessage.h
@@ -32,7 +32,7 @@
 
     const char *getContent() const;
 
-    void getRequestField(size_t index, AString *field) const;
+    bool getRequestField(size_t index, AString *field) const;
     bool getStatusCode(int32_t *statusCode) const;
 
     AString debugString() const;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ef5bb8d..b890180 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -44,6 +44,10 @@
 #define CBLK_BUFFER_END 0x80 // set by server when the position reaches end of buffer if not looping
 #define CBLK_OVERRUN   0x100 // set by server immediately on input overrun, cleared by client
 #define CBLK_INTERRUPT 0x200 // set by client on interrupt(), cleared by client in obtainBuffer()
+#define CBLK_STREAM_END_DONE 0x400 // set by server on render completion, cleared by client
+
+//EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
+#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 //assuming upto a maximum of 20 seconds of offloaded
 
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
@@ -164,6 +168,7 @@
     const bool      mIsOut;             // true for AudioTrack, false for AudioRecord
     const bool      mClientInServer;    // true for OutputTrack, false for AudioTrack & AudioRecord
     bool            mIsShutdown;        // latch set to true when shared memory corruption detected
+    size_t          mUnreleased;        // unreleased frames remaining from most recent obtainBuffer
 };
 
 // ----------------------------------------------------------------------------
@@ -209,7 +214,7 @@
     //  DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
     //  -EINTR      Call has been interrupted.  Look around to see why, and then perhaps try again.
     //  NO_INIT     Shared memory is corrupt.
-    //  BAD_VALUE   On entry buffer == NULL or buffer->mFrameCount == 0.
+    // Assertion failure on entry, if buffer == NULL or buffer->mFrameCount == 0.
     status_t    obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
             struct timespec *elapsed = NULL);
 
@@ -286,6 +291,12 @@
     virtual uint32_t    getUnderrunFrames() const {
         return mCblk->u.mStreaming.mUnderrunFrames;
     }
+
+    bool        clearStreamEndDone();   // and return previous value
+
+    bool        getStreamEndDone() const;
+
+    status_t    waitStreamEndDone(const struct timespec *requested);
 };
 
 class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
@@ -368,10 +379,9 @@
     virtual void        releaseBuffer(Buffer* buffer);
 
 protected:
-    size_t      mUnreleased;    // unreleased frames remaining from most recent obtainBuffer()
     size_t      mAvailToClient; // estimated frames available to client prior to releaseBuffer()
-private:
     int32_t     mFlush;         // our copy of cblk->u.mStreaming.mFlush, for streaming output only
+private:
     bool        mDeferWake;     // whether another releaseBuffer() is expected soon
 };
 
@@ -401,6 +411,8 @@
     // should avoid doing a state queue poll from within framesReady().
     // FIXME Change AudioFlinger to not call framesReady() from normal mixer thread.
     virtual void        framesReadyIsCalledByMultipleThreads() { }
+
+    bool     setStreamEndDone();    // and return previous value
 };
 
 class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.cpp b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
index 702900b..84a8e15 100755
--- a/libvideoeditor/lvpp/NativeWindowRenderer.cpp
+++ b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
@@ -568,7 +568,8 @@
 RenderInput::RenderInput(NativeWindowRenderer* renderer, GLuint textureId)
     : mRenderer(renderer)
     , mTextureId(textureId) {
-    mST = new GLConsumer(mTextureId);
+    sp<BufferQueue> bq = new BufferQueue();
+    mST = new GLConsumer(bq, mTextureId);
     mSTC = new Surface(mST->getBufferQueue());
     native_window_connect(mSTC.get(), NATIVE_WINDOW_API_MEDIA);
 }
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
index 3fa8b87..176f8e9 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
@@ -149,7 +149,7 @@
     mStarted = false;
 }
 
-void VideoEditorAudioPlayer::resume() {
+status_t VideoEditorAudioPlayer::resume() {
     ALOGV("resume");
 
     AudioMixSettings audioMixSettings;
@@ -180,6 +180,7 @@
     } else {
         mAudioTrack->start();
     }
+    return OK;
 }
 
 status_t VideoEditorAudioPlayer::seekTo(int64_t time_us) {
@@ -575,10 +576,15 @@
 
 size_t VideoEditorAudioPlayer::AudioSinkCallback(
         MediaPlayerBase::AudioSink *audioSink,
-        void *buffer, size_t size, void *cookie) {
+        void *buffer, size_t size, void *cookie,
+        MediaPlayerBase::AudioSink::cb_event_t event) {
     VideoEditorAudioPlayer *me = (VideoEditorAudioPlayer *)cookie;
 
-    return me->fillBuffer(buffer, size);
+    if (event == MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER ) {
+        return me->fillBuffer(buffer, size);
+    } else {
+        return 0;
+    }
 }
 
 
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
index a5616c1..2caf5e8 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
@@ -58,7 +58,7 @@
 
     status_t start(bool sourceAlreadyStarted = false);
     void pause(bool playPendingSamples = false);
-    void resume();
+    status_t resume();
     status_t seekTo(int64_t time_us);
     bool isSeeking();
     bool reachedEOS(status_t *finalStatus);
@@ -124,7 +124,8 @@
     size_t fillBuffer(void *data, size_t size);
     static size_t AudioSinkCallback(
             MediaPlayerBase::AudioSink *audioSink,
-            void *data, size_t size, void *me);
+            void *data, size_t size, void *me,
+            MediaPlayerBase::AudioSink::cb_event_t event);
 
     void reset();
     void clear();
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
index 4a14b40..5aeba4f 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
@@ -391,7 +391,8 @@
 status_t VideoEditorPlayer::VeAudioOutput::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
-        AudioCallback cb, void *cookie, audio_output_flags_t flags) {
+        AudioCallback cb, void *cookie, audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo) {
 
     mCallback = cb;
     mCallbackCookie = cookie;
@@ -467,14 +468,18 @@
     return NO_ERROR;
 }
 
-void VideoEditorPlayer::VeAudioOutput::start() {
+status_t VideoEditorPlayer::VeAudioOutput::start() {
 
     ALOGV("start");
     if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
-        mTrack->start();
-        mTrack->getPosition(&mNumFramesWritten);
+        status_t status = mTrack->start();
+        if (status == NO_ERROR) {
+            mTrack->getPosition(&mNumFramesWritten);
+        }
+        return status;
     }
+    return NO_INIT;
 }
 
 void VideoEditorPlayer::VeAudioOutput::snoopWrite(
@@ -545,7 +550,8 @@
     AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
 
     size_t actualSize = (*me->mCallback)(
-            me, buffer->raw, buffer->size, me->mCallbackCookie);
+            me, buffer->raw, buffer->size, me->mCallbackCookie,
+            MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER);
 
     buffer->size = actualSize;
 
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
index defc90d..ab6d731 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.h
@@ -52,9 +52,10 @@
         virtual status_t        open(
                 uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
                 audio_format_t format, int bufferCount,
-                AudioCallback cb, void *cookie, audio_output_flags_t flags);
+                AudioCallback cb, void *cookie, audio_output_flags_t flags,
+                const audio_offload_info_t *offloadInfo);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush();
diff --git a/media/libcpustats/Android.mk b/media/libcpustats/Android.mk
new file mode 100644
index 0000000..b506353
--- /dev/null
+++ b/media/libcpustats/Android.mk
@@ -0,0 +1,11 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES :=     \
+        CentralTendencyStatistics.cpp \
+        ThreadCpuUsage.cpp
+
+LOCAL_MODULE := libcpustats
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libcpustats/CentralTendencyStatistics.cpp b/media/libcpustats/CentralTendencyStatistics.cpp
new file mode 100644
index 0000000..42ab62b
--- /dev/null
+++ b/media/libcpustats/CentralTendencyStatistics.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <cpustats/CentralTendencyStatistics.h>
+
+void CentralTendencyStatistics::sample(double x)
+{
+    // update min and max
+    if (x < mMinimum)
+        mMinimum = x;
+    if (x > mMaximum)
+        mMaximum = x;
+    // Knuth
+    if (mN == 0) {
+        mMean = 0;
+    }
+    ++mN;
+    double delta = x - mMean;
+    mMean += delta / mN;
+    mM2 += delta * (x - mMean);
+}
+
+void CentralTendencyStatistics::reset()
+{
+    mMean = NAN;
+    mMedian = NAN;
+    mMinimum = INFINITY;
+    mMaximum = -INFINITY;
+    mN = 0;
+    mM2 = 0;
+    mVariance = NAN;
+    mVarianceKnownForN = 0;
+    mStddev = NAN;
+    mStddevKnownForN = 0;
+}
+
+double CentralTendencyStatistics::variance() const
+{
+    double variance;
+    if (mVarianceKnownForN != mN) {
+        if (mN > 1) {
+            // double variance_n = M2/n;
+            variance = mM2 / (mN - 1);
+        } else {
+            variance = NAN;
+        }
+        mVariance = variance;
+        mVarianceKnownForN = mN;
+    } else {
+        variance = mVariance;
+    }
+    return variance;
+}
+
+double CentralTendencyStatistics::stddev() const
+{
+    double stddev;
+    if (mStddevKnownForN != mN) {
+        stddev = sqrt(variance());
+        mStddev = stddev;
+        mStddevKnownForN = mN;
+    } else {
+        stddev = mStddev;
+    }
+    return stddev;
+}
diff --git a/media/libcpustats/ThreadCpuUsage.cpp b/media/libcpustats/ThreadCpuUsage.cpp
new file mode 100644
index 0000000..637402a
--- /dev/null
+++ b/media/libcpustats/ThreadCpuUsage.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ThreadCpuUsage"
+//#define LOG_NDEBUG 0
+
+#include <errno.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <utils/Debug.h>
+#include <utils/Log.h>
+
+#include <cpustats/ThreadCpuUsage.h>
+
+namespace android {
+
+bool ThreadCpuUsage::setEnabled(bool isEnabled)
+{
+    bool wasEnabled = mIsEnabled;
+    // only do something if there is a change
+    if (isEnabled != wasEnabled) {
+        ALOGV("setEnabled(%d)", isEnabled);
+        int rc;
+        // enabling
+        if (isEnabled) {
+            rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &mPreviousTs);
+            if (rc) {
+                ALOGE("clock_gettime(CLOCK_THREAD_CPUTIME_ID) errno=%d", errno);
+                isEnabled = false;
+            } else {
+                mWasEverEnabled = true;
+                // record wall clock time at first enable
+                if (!mMonotonicKnown) {
+                    rc = clock_gettime(CLOCK_MONOTONIC, &mMonotonicTs);
+                    if (rc) {
+                        ALOGE("clock_gettime(CLOCK_MONOTONIC) errno=%d", errno);
+                    } else {
+                        mMonotonicKnown = true;
+                    }
+                }
+            }
+        // disabling
+        } else {
+            struct timespec ts;
+            rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts);
+            if (rc) {
+                ALOGE("clock_gettime(CLOCK_THREAD_CPUTIME_ID) errno=%d", errno);
+            } else {
+                long long delta = (ts.tv_sec - mPreviousTs.tv_sec) * 1000000000LL +
+                        (ts.tv_nsec - mPreviousTs.tv_nsec);
+                mAccumulator += delta;
+#if 0
+                mPreviousTs = ts;
+#endif
+            }
+        }
+        mIsEnabled = isEnabled;
+    }
+    return wasEnabled;
+}
+
+bool ThreadCpuUsage::sampleAndEnable(double& ns)
+{
+    bool ret;
+    bool wasEverEnabled = mWasEverEnabled;
+    if (enable()) {
+        // already enabled, so add a new sample relative to previous
+        return sample(ns);
+    } else if (wasEverEnabled) {
+        // was disabled, but add sample for accumulated time while enabled
+        ns = (double) mAccumulator;
+        mAccumulator = 0;
+        ALOGV("sampleAndEnable %.0f", ns);
+        return true;
+    } else {
+        // first time called
+        ns = 0.0;
+        ALOGV("sampleAndEnable false");
+        return false;
+    }
+}
+
+bool ThreadCpuUsage::sample(double &ns)
+{
+    if (mWasEverEnabled) {
+        if (mIsEnabled) {
+            struct timespec ts;
+            int rc;
+            rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts);
+            if (rc) {
+                ALOGE("clock_gettime(CLOCK_THREAD_CPUTIME_ID) errno=%d", errno);
+                ns = 0.0;
+                return false;
+            } else {
+                long long delta = (ts.tv_sec - mPreviousTs.tv_sec) * 1000000000LL +
+                        (ts.tv_nsec - mPreviousTs.tv_nsec);
+                mAccumulator += delta;
+                mPreviousTs = ts;
+            }
+        } else {
+            mWasEverEnabled = false;
+        }
+        ns = (double) mAccumulator;
+        ALOGV("sample %.0f", ns);
+        mAccumulator = 0;
+        return true;
+    } else {
+        ALOGW("Can't add sample because measurements have never been enabled");
+        ns = 0.0;
+        return false;
+    }
+}
+
+long long ThreadCpuUsage::elapsed() const
+{
+    long long elapsed;
+    if (mMonotonicKnown) {
+        struct timespec ts;
+        int rc;
+        rc = clock_gettime(CLOCK_MONOTONIC, &ts);
+        if (rc) {
+            ALOGE("clock_gettime(CLOCK_MONOTONIC) errno=%d", errno);
+            elapsed = 0;
+        } else {
+            // mMonotonicTs is updated only at first enable and resetStatistics
+            elapsed = (ts.tv_sec - mMonotonicTs.tv_sec) * 1000000000LL +
+                    (ts.tv_nsec - mMonotonicTs.tv_nsec);
+        }
+    } else {
+        ALOGW("Can't compute elapsed time because measurements have never been enabled");
+        elapsed = 0;
+    }
+    ALOGV("elapsed %lld", elapsed);
+    return elapsed;
+}
+
+void ThreadCpuUsage::resetElapsed()
+{
+    ALOGV("resetElapsed");
+    if (mMonotonicKnown) {
+        int rc;
+        rc = clock_gettime(CLOCK_MONOTONIC, &mMonotonicTs);
+        if (rc) {
+            ALOGE("clock_gettime(CLOCK_MONOTONIC) errno=%d", errno);
+            mMonotonicKnown = false;
+        }
+    }
+}
+
+/*static*/
+int ThreadCpuUsage::sScalingFds[ThreadCpuUsage::MAX_CPU];
+pthread_once_t ThreadCpuUsage::sOnceControl = PTHREAD_ONCE_INIT;
+int ThreadCpuUsage::sKernelMax;
+pthread_mutex_t ThreadCpuUsage::sMutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*static*/
+void ThreadCpuUsage::init()
+{
+    // read the number of CPUs
+    sKernelMax = 1;
+    int fd = open("/sys/devices/system/cpu/kernel_max", O_RDONLY);
+    if (fd >= 0) {
+#define KERNEL_MAX_SIZE 12
+        char kernelMax[KERNEL_MAX_SIZE];
+        ssize_t actual = read(fd, kernelMax, sizeof(kernelMax));
+        if (actual >= 2 && kernelMax[actual-1] == '\n') {
+            sKernelMax = atoi(kernelMax);
+            if (sKernelMax >= MAX_CPU - 1) {
+                ALOGW("kernel_max %d but MAX_CPU %d", sKernelMax, MAX_CPU);
+                sKernelMax = MAX_CPU;
+            } else if (sKernelMax < 0) {
+                ALOGW("kernel_max invalid %d", sKernelMax);
+                sKernelMax = 1;
+            } else {
+                ++sKernelMax;
+                ALOGV("number of CPUs %d", sKernelMax);
+            }
+        } else {
+            ALOGW("Can't read number of CPUs");
+        }
+        (void) close(fd);
+    } else {
+        ALOGW("Can't open number of CPUs");
+    }
+    int i;
+    for (i = 0; i < MAX_CPU; ++i) {
+        sScalingFds[i] = -1;
+    }
+}
+
+uint32_t ThreadCpuUsage::getCpukHz(int cpuNum)
+{
+    if (cpuNum < 0 || cpuNum >= MAX_CPU) {
+        ALOGW("getCpukHz called with invalid CPU %d", cpuNum);
+        return 0;
+    }
+    // double-checked locking idiom is not broken for atomic values such as fd
+    int fd = sScalingFds[cpuNum];
+    if (fd < 0) {
+        // some kernels can't open a scaling file until hot plug complete
+        pthread_mutex_lock(&sMutex);
+        fd = sScalingFds[cpuNum];
+        if (fd < 0) {
+#define FREQ_SIZE 64
+            char freq_path[FREQ_SIZE];
+#define FREQ_DIGIT 27
+            COMPILE_TIME_ASSERT_FUNCTION_SCOPE(MAX_CPU <= 10);
+#define FREQ_PATH "/sys/devices/system/cpu/cpu?/cpufreq/scaling_cur_freq"
+            strlcpy(freq_path, FREQ_PATH, sizeof(freq_path));
+            freq_path[FREQ_DIGIT] = cpuNum + '0';
+            fd = open(freq_path, O_RDONLY | O_CLOEXEC);
+            // keep this fd until process exit or exec
+            sScalingFds[cpuNum] = fd;
+        }
+        pthread_mutex_unlock(&sMutex);
+        if (fd < 0) {
+            ALOGW("getCpukHz can't open CPU %d", cpuNum);
+            return 0;
+        }
+    }
+#define KHZ_SIZE 12
+    char kHz[KHZ_SIZE];   // kHz base 10
+    ssize_t actual = pread(fd, kHz, sizeof(kHz), (off_t) 0);
+    uint32_t ret;
+    if (actual >= 2 && kHz[actual-1] == '\n') {
+        ret = atoi(kHz);
+    } else {
+        ret = 0;
+    }
+    if (ret != mCurrentkHz[cpuNum]) {
+        if (ret > 0) {
+            ALOGV("CPU %d frequency %u kHz", cpuNum, ret);
+        } else {
+            ALOGW("Can't read CPU %d frequency", cpuNum);
+        }
+        mCurrentkHz[cpuNum] = ret;
+    }
+    return ret;
+}
+
+}   // namespace android
diff --git a/media/libeffects/testlibs/AudioFormatAdapter.h b/media/libeffects/testlibs/AudioFormatAdapter.h
index 41f1810..dea2734 100644
--- a/media/libeffects/testlibs/AudioFormatAdapter.h
+++ b/media/libeffects/testlibs/AudioFormatAdapter.h
@@ -75,6 +75,7 @@
         while (numSamples > 0) {
             uint32_t numSamplesIter = min(numSamples, mMaxSamplesPerCall);
             uint32_t nSamplesChannels = numSamplesIter * mNumChannels;
+            // This branch of "if" is untested
             if (mPcmFormat == AUDIO_FORMAT_PCM_8_24_BIT) {
                 if (mBehavior == EFFECT_BUFFER_ACCESS_WRITE) {
                     mpProcessor->process(
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index c35453b..8d00206 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -234,8 +234,7 @@
               (pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO));
     CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
               || pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
-    CHECK_ARG(pConfig->inputCfg.format == AUDIO_FORMAT_PCM_8_24_BIT
-              || pConfig->inputCfg.format == AUDIO_FORMAT_PCM_16_BIT);
+    CHECK_ARG(pConfig->inputCfg.format == AUDIO_FORMAT_PCM_16_BIT);
 
     int channelCount;
     if (pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_MONO) {
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 9faa497..8ae0908 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -23,6 +23,7 @@
 #include <media/AudioRecord.h>
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
+#include <media/IAudioFlinger.h>
 
 #define WAIT_PERIOD_MS          10
 
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 693df60..a571fe4 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -20,6 +20,7 @@
 #include <utils/Log.h>
 #include <binder/IServiceManager.h>
 #include <media/AudioSystem.h>
+#include <media/IAudioFlinger.h>
 #include <media/IAudioPolicyService.h>
 #include <math.h>
 
@@ -361,8 +362,8 @@
     return af->setVoiceVolume(value);
 }
 
-status_t AudioSystem::getRenderPosition(size_t *halFrames, size_t *dspFrames,
-        audio_stream_type_t stream)
+status_t AudioSystem::getRenderPosition(audio_io_handle_t output, size_t *halFrames,
+                                        size_t *dspFrames, audio_stream_type_t stream)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
@@ -371,7 +372,11 @@
         stream = AUDIO_STREAM_MUSIC;
     }
 
-    return af->getRenderPosition(halFrames, dspFrames, getOutput(stream));
+    if (output == 0) {
+        output = getOutput(stream);
+    }
+
+    return af->getRenderPosition(halFrames, dspFrames, output);
 }
 
 size_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
@@ -442,14 +447,14 @@
 
         OutputDescriptor *outputDesc =  new OutputDescriptor(*desc);
         gOutputs.add(ioHandle, outputDesc);
-        ALOGV("ioConfigChanged() new output samplingRate %u, format %d channels %#x frameCount %u "
+        ALOGV("ioConfigChanged() new output samplingRate %u, format %d channel mask %#x frameCount %u "
                 "latency %d",
-                outputDesc->samplingRate, outputDesc->format, outputDesc->channels,
+                outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask,
                 outputDesc->frameCount, outputDesc->latency);
         } break;
     case OUTPUT_CLOSED: {
         if (gOutputs.indexOfKey(ioHandle) < 0) {
-            ALOGW("ioConfigChanged() closing unknow output! %d", ioHandle);
+            ALOGW("ioConfigChanged() closing unknown output! %d", ioHandle);
             break;
         }
         ALOGV("ioConfigChanged() output %d closed", ioHandle);
@@ -460,16 +465,16 @@
     case OUTPUT_CONFIG_CHANGED: {
         int index = gOutputs.indexOfKey(ioHandle);
         if (index < 0) {
-            ALOGW("ioConfigChanged() modifying unknow output! %d", ioHandle);
+            ALOGW("ioConfigChanged() modifying unknown output! %d", ioHandle);
             break;
         }
         if (param2 == NULL) break;
         desc = (const OutputDescriptor *)param2;
 
-        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %d channels %#x "
+        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %d channel mask %#x "
                 "frameCount %d latency %d",
                 ioHandle, desc->samplingRate, desc->format,
-                desc->channels, desc->frameCount, desc->latency);
+                desc->channelMask, desc->frameCount, desc->latency);
         OutputDescriptor *outputDesc = gOutputs.valueAt(index);
         delete outputDesc;
         outputDesc =  new OutputDescriptor(*desc);
@@ -532,6 +537,8 @@
     return gAudioPolicyService;
 }
 
+// ---------------------------------------------------------------------------
+
 status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
                                                audio_policy_dev_state_t state,
                                                const char *device_address)
@@ -585,11 +592,12 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags)
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return 0;
-    return aps->getOutput(stream, samplingRate, format, channelMask, flags);
+    return aps->getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo);
 }
 
 status_t AudioSystem::startOutput(audio_io_handle_t output,
@@ -764,6 +772,13 @@
     return af->getPrimaryOutputFrameCount();
 }
 
+status_t AudioSystem::setLowRamDevice(bool isLowRamDevice)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return PERMISSION_DENIED;
+    return af->setLowRamDevice(isLowRamDevice);
+}
+
 void AudioSystem::clearAudioConfigCache()
 {
     Mutex::Autolock _l(gLock);
@@ -771,6 +786,14 @@
     gOutputs.clear();
 }
 
+bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info)
+{
+    ALOGV("isOffloadSupported()");
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return false;
+    return aps->isOffloadSupported(info);
+}
+
 // ---------------------------------------------------------------------------
 
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index faca054..3653b7f 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -25,8 +25,11 @@
 #include <media/AudioTrack.h>
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
+#include <media/IAudioFlinger.h>
 
-#define WAIT_PERIOD_MS          10
+#define WAIT_PERIOD_MS                  10
+#define WAIT_STREAM_END_TIMEOUT_SEC     120
+
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -97,7 +100,8 @@
         void* user,
         int notificationFrames,
         int sessionId,
-        transfer_type transferType)
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -105,7 +109,7 @@
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
-            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType);
+            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
 }
 
 AudioTrack::AudioTrack(
@@ -119,7 +123,8 @@
         void* user,
         int notificationFrames,
         int sessionId,
-        transfer_type transferType)
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -127,7 +132,7 @@
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
-            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType);
+            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
 }
 
 AudioTrack::~AudioTrack()
@@ -138,6 +143,7 @@
         // Otherwise the callback thread will never exit.
         stop();
         if (mAudioTrackThread != 0) {
+            mProxy->interrupt();
             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
@@ -164,7 +170,8 @@
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
         int sessionId,
-        transfer_type transferType)
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo)
 {
     switch (transferType) {
     case TRANSFER_DEFAULT:
@@ -220,6 +227,8 @@
         return INVALID_OPERATION;
     }
 
+    mOutput = 0;
+
     // handle default values first.
     if (streamType == AUDIO_STREAM_DEFAULT) {
         streamType = AUDIO_STREAM_MUSIC;
@@ -255,7 +264,12 @@
     }
 
     // force direct flag if format is not linear PCM
-    if (!audio_is_linear_pcm(format)) {
+    // or offload was requested
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+            || !audio_is_linear_pcm(format)) {
+        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+                    ? "Offload request, forcing to Direct Output"
+                    : "Not linear PCM, forcing to Direct Output");
         flags = (audio_output_flags_t)
                 // FIXME why can't we allow direct AND fast?
                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
@@ -284,7 +298,8 @@
     audio_io_handle_t output = AudioSystem::getOutput(
                                     streamType,
                                     sampleRate, format, channelMask,
-                                    flags);
+                                    flags,
+                                    offloadInfo);
 
     if (output == 0) {
         ALOGE("Could not get audio output for stream type %d", streamType);
@@ -320,9 +335,14 @@
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
-            mAudioTrackThread->requestExit();
+            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
+            mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
+        //Use of direct and offloaded output streams is ref counted by audio policy manager.
+        // As getOutput was called above and resulted in an output stream to be opened,
+        // we need to release it.
+        AudioSystem::releaseOutput(output);
         return status;
     }
 
@@ -341,23 +361,29 @@
     mSequence = 1;
     mObservedSequence = mSequence;
     mInUnderrun = false;
+    mOutput = output;
 
     return NO_ERROR;
 }
 
 // -------------------------------------------------------------------------
 
-void AudioTrack::start()
+status_t AudioTrack::start()
 {
     AutoMutex lock(mLock);
+
     if (mState == STATE_ACTIVE) {
-        return;
+        return INVALID_OPERATION;
     }
 
     mInUnderrun = true;
 
     State previousState = mState;
-    mState = STATE_ACTIVE;
+    if (previousState == STATE_PAUSED_STOPPING) {
+        mState = STATE_STOPPING;
+    } else {
+        mState = STATE_ACTIVE;
+    }
     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
         // reset current position as seen by client to 0
         mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
@@ -367,7 +393,11 @@
 
     sp<AudioTrackThread> t = mAudioTrackThread;
     if (t != 0) {
-        t->resume();
+        if (previousState == STATE_STOPPING) {
+            mProxy->interrupt();
+        } else {
+            t->resume();
+        }
     } else {
         mPreviousPriority = getpriority(PRIO_PROCESS, 0);
         get_sched_policy(0, &mPreviousSchedulingGroup);
@@ -389,14 +419,16 @@
         ALOGE("start() status %d", status);
         mState = previousState;
         if (t != 0) {
-            t->pause();
+            if (previousState != STATE_STOPPING) {
+                t->pause();
+            }
         } else {
             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
             set_sched_policy(0, mPreviousSchedulingGroup);
         }
     }
 
-    // FIXME discarding status
+    return status;
 }
 
 void AudioTrack::stop()
@@ -407,7 +439,12 @@
         return;
     }
 
-    mState = STATE_STOPPED;
+    if (isOffloaded()) {
+        mState = STATE_STOPPING;
+    } else {
+        mState = STATE_STOPPED;
+    }
+
     mProxy->interrupt();
     mAudioTrack->stop();
     // the playback head position will reset to 0, so if a marker is set, we need
@@ -421,9 +458,12 @@
         flush_l();
     }
 #endif
+
     sp<AudioTrackThread> t = mAudioTrackThread;
     if (t != 0) {
-        t->pause();
+        if (!isOffloaded()) {
+            t->pause();
+        }
     } else {
         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
         set_sched_policy(0, mPreviousSchedulingGroup);
@@ -456,8 +496,12 @@
     mMarkerPosition = 0;
     mMarkerReached = false;
     mUpdatePeriod = 0;
+    mRefreshRemaining = true;
 
     mState = STATE_FLUSHED;
+    if (isOffloaded()) {
+        mProxy->interrupt();
+    }
     mProxy->flush();
     mAudioTrack->flush();
 }
@@ -465,10 +509,13 @@
 void AudioTrack::pause()
 {
     AutoMutex lock(mLock);
-    if (mState != STATE_ACTIVE) {
+    if (mState == STATE_ACTIVE) {
+        mState = STATE_PAUSED;
+    } else if (mState == STATE_STOPPING) {
+        mState = STATE_PAUSED_STOPPING;
+    } else {
         return;
     }
-    mState = STATE_PAUSED;
     mProxy->interrupt();
     mAudioTrack->pause();
 }
@@ -515,7 +562,7 @@
 
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
-    if (mIsTimed) {
+    if (mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -547,7 +594,7 @@
 
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -580,7 +627,8 @@
 
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
-    if (mCbf == NULL) {
+    // The only purpose of setting marker position is to get a callback
+    if (mCbf == NULL || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -593,6 +641,9 @@
 
 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
 {
+    if (isOffloaded()) {
+        return INVALID_OPERATION;
+    }
     if (marker == NULL) {
         return BAD_VALUE;
     }
@@ -605,19 +656,22 @@
 
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
-    if (mCbf == NULL) {
+    // The only purpose of setting position update period is to get a callback
+    if (mCbf == NULL || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
     AutoMutex lock(mLock);
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
-
     return NO_ERROR;
 }
 
 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
+    if (isOffloaded()) {
+        return INVALID_OPERATION;
+    }
     if (updatePeriod == NULL) {
         return BAD_VALUE;
     }
@@ -630,7 +684,7 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
     if (position > mFrameCount) {
@@ -663,10 +717,19 @@
     }
 
     AutoMutex lock(mLock);
-    // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
-    *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
-            mProxy->getPosition();
+    if (isOffloaded()) {
+        uint32_t dspFrames = 0;
 
+        if (mOutput != 0) {
+            uint32_t halFrames;
+            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
+        }
+        *position = dspFrames;
+    } else {
+        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
+        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
+                mProxy->getPosition();
+    }
     return NO_ERROR;
 }
 
@@ -686,7 +749,7 @@
 
 status_t AudioTrack::reload()
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -706,14 +769,18 @@
 audio_io_handle_t AudioTrack::getOutput()
 {
     AutoMutex lock(mLock);
-    return getOutput_l();
+    return mOutput;
 }
 
 // must be called with mLock held
 audio_io_handle_t AudioTrack::getOutput_l()
 {
-    return AudioSystem::getOutput(mStreamType,
-            mSampleRate, mFormat, mChannelMask, mFlags);
+    if (mOutput) {
+        return mOutput;
+    } else {
+        return AudioSystem::getOutput(mStreamType,
+                                      mSampleRate, mFormat, mChannelMask, mFlags);
+    }
 }
 
 status_t AudioTrack::attachAuxEffect(int effectId)
@@ -784,7 +851,9 @@
             }
             frameCount = afFrameCount;
         }
-
+        if (mNotificationFramesAct != frameCount) {
+            mNotificationFramesAct = frameCount;
+        }
     } else if (sharedBuffer != 0) {
 
         // Ensure that buffer alignment matches channel count
@@ -868,6 +937,10 @@
         }
     }
 
+    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
+    }
+
     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                       sampleRate,
                                                       // AudioFlinger only sees 16-bit PCM
@@ -930,6 +1003,17 @@
             }
         }
     }
+    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
+            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
+        } else {
+            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
+            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+            mFlags = flags;
+            return NO_INIT;
+        }
+    }
+
     mRefreshRemaining = true;
 
     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
@@ -1033,6 +1117,9 @@
                 if (newSequence == oldSequence) {
                     status = restoreTrack_l("obtainBuffer");
                     if (status != NO_ERROR) {
+                        buffer.mFrameCount = 0;
+                        buffer.mRaw = NULL;
+                        buffer.mNonContig = 0;
                         break;
                     }
                 }
@@ -1043,6 +1130,14 @@
             proxy = mProxy;
             iMem = mCblkMemory;
 
+            if (mState == STATE_STOPPING) {
+                status = -EINTR;
+                buffer.mFrameCount = 0;
+                buffer.mRaw = NULL;
+                buffer.mNonContig = 0;
+                break;
+            }
+
             // Non-blocking if track is stopped or paused
             if (mState != STATE_ACTIVE) {
                 requested = &ClientProxy::kNonBlocking;
@@ -1215,6 +1310,11 @@
 
 nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
 {
+    // Currently the AudioTrack thread is not created if there are no callbacks.
+    // Would it ever make sense to run the thread, even without callbacks?
+    // If so, then replace this by checks at each use for mCbf != NULL.
+    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
+
     mLock.lock();
     if (mAwaitBoost) {
         mAwaitBoost = false;
@@ -1233,7 +1333,8 @@
         if (tryCounter < 0) {
             ALOGE("did not receive expected priority boost on time");
         }
-        return true;
+        // Run again immediately
+        return 0;
     }
 
     // Can only reference mCblk while locked
@@ -1242,12 +1343,18 @@
 
     // Check for track invalidation
     if (flags & CBLK_INVALID) {
-        (void) restoreTrack_l("processAudioBuffer");
-        mLock.unlock();
-        // Run again immediately, but with a new IAudioTrack
-        return 0;
+        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
+        // AudioSystem cache. We should not exit here but after calling the callback so
+        // that the upper layers can recreate the track
+        if (!isOffloaded() || (mSequence == mObservedSequence)) {
+            status_t status = restoreTrack_l("processAudioBuffer");
+            mLock.unlock();
+            // Run again immediately, but with a new IAudioTrack
+            return 0;
+        }
     }
 
+    bool waitStreamEnd = mState == STATE_STOPPING;
     bool active = mState == STATE_ACTIVE;
 
     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
@@ -1301,7 +1408,7 @@
         mRetryOnPartialBuffer = false;
     }
     size_t misalignment = mProxy->getMisalignment();
-    int32_t sequence = mSequence;
+    uint32_t sequence = mSequence;
 
     // These fields don't need to be cached, because they are assigned only by set():
     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
@@ -1309,6 +1416,38 @@
 
     mLock.unlock();
 
+    if (waitStreamEnd) {
+        AutoMutex lock(mLock);
+
+        sp<AudioTrackClientProxy> proxy = mProxy;
+        sp<IMemory> iMem = mCblkMemory;
+
+        struct timespec timeout;
+        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
+        timeout.tv_nsec = 0;
+
+        mLock.unlock();
+        status_t status = mProxy->waitStreamEndDone(&timeout);
+        mLock.lock();
+        switch (status) {
+        case NO_ERROR:
+        case DEAD_OBJECT:
+        case TIMED_OUT:
+            mLock.unlock();
+            mCbf(EVENT_STREAM_END, mUserData, NULL);
+            mLock.lock();
+            if (mState == STATE_STOPPING) {
+                mState = STATE_STOPPED;
+                if (status != DEAD_OBJECT) {
+                   return NS_INACTIVE;
+                }
+            }
+            return 0;
+        default:
+            return 0;
+        }
+    }
+
     // perform callbacks while unlocked
     if (newUnderrun) {
         mCbf(EVENT_UNDERRUN, mUserData, NULL);
@@ -1330,9 +1469,14 @@
         newPosition += updatePeriod;
         newPosCount--;
     }
+
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+        // for offloaded tracks, just wait for the upper layers to recreate the track
+        if (isOffloaded()) {
+            return NS_INACTIVE;
+        }
     }
 
     // if inactive, then don't run me again until re-started
@@ -1391,10 +1535,11 @@
                 "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
         requested = &ClientProxy::kNonBlocking;
         size_t avail = audioBuffer.frameCount + nonContig;
-        ALOGV("obtainBuffer(%u) returned %u = %u + %u",
-                mRemainingFrames, avail, audioBuffer.frameCount, nonContig);
+        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
+                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
         if (err != NO_ERROR) {
-            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
+            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
+                    (isOffloaded() && (err == DEAD_OBJECT))) {
                 return 0;
             }
             ALOGE("Error %d obtaining an audio buffer, giving up.", err);
@@ -1487,7 +1632,8 @@
 
 status_t AudioTrack::restoreTrack_l(const char *from)
 {
-    ALOGW("dead IAudioTrack, creating a new one from %s()", from);
+    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
+          isOffloaded() ? "Offloaded" : "PCM", from);
     ++mSequence;
     status_t result;
 
@@ -1495,6 +1641,14 @@
     // output parameters in getOutput_l() and createTrack_l()
     AudioSystem::clearAudioConfigCache();
 
+    if (isOffloaded()) {
+        return DEAD_OBJECT;
+    }
+
+    // force new output query from audio policy manager;
+    mOutput = 0;
+    audio_io_handle_t output = getOutput_l();
+
     // if the new IAudioTrack is created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory
@@ -1507,7 +1661,7 @@
                            mReqFrameCount,  // so that frame count never goes down
                            mFlags,
                            mSharedBuffer,
-                           getOutput_l(),
+                           output,
                            position /*epoch*/);
 
     if (result == NO_ERROR) {
@@ -1536,6 +1690,10 @@
         }
     }
     if (result != NO_ERROR) {
+        //Use of direct and offloaded output streams is ref counted by audio policy manager.
+        // As getOutput was called above and resulted in an output stream to be opened,
+        // we need to release it.
+        AudioSystem::releaseOutput(output);
         ALOGW("restoreTrack_l() failed status %d", result);
         mState = STATE_STOPPED;
     }
@@ -1543,6 +1701,25 @@
     return result;
 }
 
+status_t AudioTrack::setParameters(const String8& keyValuePairs)
+{
+    AutoMutex lock(mLock);
+    if (mAudioTrack != 0) {
+        return mAudioTrack->setParameters(keyValuePairs);
+    } else {
+        return NO_INIT;
+    }
+}
+
+String8 AudioTrack::getParameters(const String8& keys)
+{
+    if (mOutput) {
+        return AudioSystem::getParameters(mOutput, keys);
+    } else {
+        return String8::empty();
+    }
+}
+
 status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
 {
 
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 5f8f292..aa45a2f 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -38,7 +38,7 @@
         bool isOut, bool clientInServer)
     : mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize),
       mFrameCountP2(roundup(frameCount)), mIsOut(isOut), mClientInServer(clientInServer),
-      mIsShutdown(false)
+      mIsShutdown(false), mUnreleased(0)
 {
 }
 
@@ -64,10 +64,7 @@
 status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
         struct timespec *elapsed)
 {
-    if (buffer == NULL || buffer->mFrameCount == 0) {
-        ALOGE("%s BAD_VALUE", __func__);
-        return BAD_VALUE;
-    }
+    LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
     struct timespec total;          // total elapsed time spent waiting
     total.tv_sec = 0;
     total.tv_nsec = 0;
@@ -164,7 +161,7 @@
             buffer->mRaw = part1 > 0 ?
                     &((char *) mBuffers)[(mIsOut ? rear : front) * mFrameSize] : NULL;
             buffer->mNonContig = avail - part1;
-            // mUnreleased = part1;
+            mUnreleased = part1;
             status = NO_ERROR;
             break;
         }
@@ -203,7 +200,7 @@
             ts = &remaining;
             break;
         default:
-            LOG_FATAL("%s timeout=%d", timeout);
+            LOG_FATAL("obtainBuffer() timeout=%d", timeout);
             ts = NULL;
             break;
         }
@@ -238,6 +235,7 @@
             case -EWOULDBLOCK:  // benign race condition with server
             case -EINTR:        // wait was interrupted by signal or other spurious wakeup
             case -ETIMEDOUT:    // time-out expired
+                // FIXME these error/non-0 status are being dropped
                 break;
             default:
                 ALOGE("%s unexpected error %d", __func__, ret);
@@ -252,6 +250,7 @@
         buffer->mFrameCount = 0;
         buffer->mRaw = NULL;
         buffer->mNonContig = 0;
+        mUnreleased = 0;
     }
     if (elapsed != NULL) {
         *elapsed = total;
@@ -260,22 +259,26 @@
         requested = &kNonBlocking;
     }
     if (measure) {
-        ALOGV("requested %d.%03d elapsed %d.%03d", requested->tv_sec, requested->tv_nsec / 1000000,
-                total.tv_sec, total.tv_nsec / 1000000);
+        ALOGV("requested %ld.%03ld elapsed %ld.%03ld",
+              requested->tv_sec, requested->tv_nsec / 1000000,
+              total.tv_sec, total.tv_nsec / 1000000);
     }
     return status;
 }
 
 void ClientProxy::releaseBuffer(Buffer* buffer)
 {
+    LOG_ALWAYS_FATAL_IF(buffer == NULL);
     size_t stepCount = buffer->mFrameCount;
-    // FIXME
-    //  check mUnreleased
-    //  verify that stepCount <= frameCount returned by the last obtainBuffer()
-    //  verify stepCount not > total frame count of pipe
-    if (stepCount == 0) {
+    if (stepCount == 0 || mIsShutdown) {
+        // prevent accidental re-use of buffer
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
         return;
     }
+    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount));
+    mUnreleased -= stepCount;
     audio_track_cblk_t* cblk = mCblk;
     // Both of these barriers are required
     if (mIsOut) {
@@ -320,6 +323,121 @@
     mCblk->u.mStreaming.mFlush++;
 }
 
+bool AudioTrackClientProxy::clearStreamEndDone() {
+    return (android_atomic_and(~CBLK_STREAM_END_DONE, &mCblk->flags) & CBLK_STREAM_END_DONE) != 0;
+}
+
+bool AudioTrackClientProxy::getStreamEndDone() const {
+    return (mCblk->flags & CBLK_STREAM_END_DONE) != 0;
+}
+
+status_t AudioTrackClientProxy::waitStreamEndDone(const struct timespec *requested)
+{
+    struct timespec total;          // total elapsed time spent waiting
+    total.tv_sec = 0;
+    total.tv_nsec = 0;
+    audio_track_cblk_t* cblk = mCblk;
+    status_t status;
+    enum {
+        TIMEOUT_ZERO,       // requested == NULL || *requested == 0
+        TIMEOUT_INFINITE,   // *requested == infinity
+        TIMEOUT_FINITE,     // 0 < *requested < infinity
+        TIMEOUT_CONTINUE,   // additional chances after TIMEOUT_FINITE
+    } timeout;
+    if (requested == NULL) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == INT_MAX) {
+        timeout = TIMEOUT_INFINITE;
+    } else {
+        timeout = TIMEOUT_FINITE;
+    }
+    for (;;) {
+        int32_t flags = android_atomic_and(~(CBLK_INTERRUPT|CBLK_STREAM_END_DONE), &cblk->flags);
+        // check for track invalidation by server, or server death detection
+        if (flags & CBLK_INVALID) {
+            ALOGV("Track invalidated");
+            status = DEAD_OBJECT;
+            goto end;
+        }
+        if (flags & CBLK_STREAM_END_DONE) {
+            ALOGV("stream end received");
+            status = NO_ERROR;
+            goto end;
+        }
+        // check for obtainBuffer interrupted by client
+        // check for obtainBuffer interrupted by client
+        if (flags & CBLK_INTERRUPT) {
+            ALOGV("waitStreamEndDone() interrupted by client");
+            status = -EINTR;
+            goto end;
+        }
+        struct timespec remaining;
+        const struct timespec *ts;
+        switch (timeout) {
+        case TIMEOUT_ZERO:
+            status = WOULD_BLOCK;
+            goto end;
+        case TIMEOUT_INFINITE:
+            ts = NULL;
+            break;
+        case TIMEOUT_FINITE:
+            timeout = TIMEOUT_CONTINUE;
+            if (MAX_SEC == 0) {
+                ts = requested;
+                break;
+            }
+            // fall through
+        case TIMEOUT_CONTINUE:
+            // FIXME we do not retry if requested < 10ms? needs documentation on this state machine
+            if (requested->tv_sec < total.tv_sec ||
+                    (requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {
+                status = TIMED_OUT;
+                goto end;
+            }
+            remaining.tv_sec = requested->tv_sec - total.tv_sec;
+            if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {
+                remaining.tv_nsec += 1000000000;
+                remaining.tv_sec++;
+            }
+            if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {
+                remaining.tv_sec = MAX_SEC;
+                remaining.tv_nsec = 0;
+            }
+            ts = &remaining;
+            break;
+        default:
+            LOG_FATAL("waitStreamEndDone() timeout=%d", timeout);
+            ts = NULL;
+            break;
+        }
+        int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
+        if (!(old & CBLK_FUTEX_WAKE)) {
+            int rc;
+            int ret = __futex_syscall4(&cblk->mFutex,
+                    mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
+            switch (ret) {
+            case 0:             // normal wakeup by server, or by binderDied()
+            case -EWOULDBLOCK:  // benign race condition with server
+            case -EINTR:        // wait was interrupted by signal or other spurious wakeup
+            case -ETIMEDOUT:    // time-out expired
+                break;
+            default:
+                ALOGE("%s unexpected error %d", __func__, ret);
+                status = -ret;
+                goto end;
+            }
+        }
+    }
+
+end:
+    if (requested == NULL) {
+        requested = &kNonBlocking;
+    }
+    return status;
+}
+
 // ---------------------------------------------------------------------------
 
 StaticAudioTrackClientProxy::StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers,
@@ -362,20 +480,18 @@
 
 ServerProxy::ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
         size_t frameSize, bool isOut, bool clientInServer)
-    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer), mUnreleased(0),
+    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer),
       mAvailToClient(0), mFlush(0), mDeferWake(false)
 {
 }
 
 status_t ServerProxy::obtainBuffer(Buffer* buffer)
 {
+    LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
     if (mIsShutdown) {
-        buffer->mFrameCount = 0;
-        buffer->mRaw = NULL;
-        buffer->mNonContig = 0;
-        mUnreleased = 0;
-        return NO_INIT;
+        goto no_init;
     }
+    {
     audio_track_cblk_t* cblk = mCblk;
     // compute number of frames available to write (AudioTrack) or read (AudioRecord),
     // or use previous cached value from framesReady(), with added barrier if it omits.
@@ -385,11 +501,19 @@
     if (mIsOut) {
         int32_t flush = cblk->u.mStreaming.mFlush;
         rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+        front = cblk->u.mStreaming.mFront;
         if (flush != mFlush) {
-            front = rear;
             mFlush = flush;
-        } else {
-            front = cblk->u.mStreaming.mFront;
+            // effectively obtain then release whatever is in the buffer
+            android_atomic_release_store(rear, &cblk->u.mStreaming.mFront);
+            if (front != rear) {
+                int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
+                if (!(old & CBLK_FUTEX_WAKE)) {
+                    (void) __futex_syscall3(&cblk->mFutex,
+                            mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+                }
+            }
+            front = rear;
         }
     } else {
         front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -402,11 +526,7 @@
         mIsShutdown = true;
     }
     if (mIsShutdown) {
-        buffer->mFrameCount = 0;
-        buffer->mRaw = NULL;
-        buffer->mNonContig = 0;
-        mUnreleased = 0;
-        return NO_INIT;
+        goto no_init;
     }
     // don't allow filling pipe beyond the nominal size
     size_t availToServer;
@@ -443,23 +563,27 @@
     // FIXME need to test for recording
     mDeferWake = part1 < ask && availToServer >= ask;
     return part1 > 0 ? NO_ERROR : WOULD_BLOCK;
+    }
+no_init:
+    buffer->mFrameCount = 0;
+    buffer->mRaw = NULL;
+    buffer->mNonContig = 0;
+    mUnreleased = 0;
+    return NO_INIT;
 }
 
 void ServerProxy::releaseBuffer(Buffer* buffer)
 {
-    if (mIsShutdown) {
+    LOG_ALWAYS_FATAL_IF(buffer == NULL);
+    size_t stepCount = buffer->mFrameCount;
+    if (stepCount == 0 || mIsShutdown) {
+        // prevent accidental re-use of buffer
         buffer->mFrameCount = 0;
         buffer->mRaw = NULL;
         buffer->mNonContig = 0;
         return;
     }
-    size_t stepCount = buffer->mFrameCount;
-    LOG_ALWAYS_FATAL_IF(stepCount > mUnreleased);
-    if (stepCount == 0) {
-        buffer->mRaw = NULL;
-        buffer->mNonContig = 0;
-        return;
-    }
+    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount));
     mUnreleased -= stepCount;
     audio_track_cblk_t* cblk = mCblk;
     if (mIsOut) {
@@ -507,6 +631,11 @@
         return 0;
     }
     audio_track_cblk_t* cblk = mCblk;
+
+    int32_t flush = cblk->u.mStreaming.mFlush;
+    if (flush != mFlush) {
+        return mFrameCount;
+    }
     // the acquire might not be necessary since not doing a subsequent read
     int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
     ssize_t filled = rear - cblk->u.mStreaming.mFront;
@@ -522,6 +651,16 @@
     return filled;
 }
 
+bool  AudioTrackServerProxy::setStreamEndDone() {
+    bool old =
+            (android_atomic_or(CBLK_STREAM_END_DONE, &mCblk->flags) & CBLK_STREAM_END_DONE) != 0;
+    if (!old) {
+        (void) __futex_syscall3(&mCblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
+                1);
+    }
+    return old;
+}
+
 // ---------------------------------------------------------------------------
 
 StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
@@ -637,8 +776,9 @@
 void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
 {
     size_t stepCount = buffer->mFrameCount;
-    LOG_ALWAYS_FATAL_IF(stepCount > mUnreleased);
+    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased));
     if (stepCount == 0) {
+        // prevent accidental re-use of buffer
         buffer->mRaw = NULL;
         buffer->mNonContig = 0;
         return;
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 2f18680..c670936 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -73,6 +73,7 @@
     LOAD_HW_MODULE,
     GET_PRIMARY_OUTPUT_SAMPLING_RATE,
     GET_PRIMARY_OUTPUT_FRAME_COUNT,
+    SET_LOW_RAM_DEVICE,
 };
 
 class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -361,15 +362,16 @@
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask,
                                          uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags)
+                                         audio_output_flags_t flags,
+                                         const audio_offload_info_t *offloadInfo)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices ? *pDevices : (audio_devices_t)0;
-        uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-        audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
-        audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : (audio_channel_mask_t)0;
-        uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
-
+        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
+        audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
+        audio_channel_mask_t channelMask = pChannelMask != NULL ?
+                *pChannelMask : (audio_channel_mask_t)0;
+        uint32_t latency = pLatencyMs != NULL ? *pLatencyMs : 0;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
         data.writeInt32(devices);
@@ -378,19 +380,25 @@
         data.writeInt32(channelMask);
         data.writeInt32(latency);
         data.writeInt32((int32_t) flags);
+        if (offloadInfo == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.write(offloadInfo, sizeof(audio_offload_info_t));
+        }
         remote()->transact(OPEN_OUTPUT, data, &reply);
         audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
         ALOGV("openOutput() returned output, %d", output);
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices) *pDevices = devices;
+        if (pDevices != NULL) *pDevices = devices;
         samplingRate = reply.readInt32();
-        if (pSamplingRate) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
         format = (audio_format_t) reply.readInt32();
-        if (pFormat) *pFormat = format;
+        if (pFormat != NULL) *pFormat = format;
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) *pChannelMask = channelMask;
         latency = reply.readInt32();
-        if (pLatencyMs) *pLatencyMs = latency;
+        if (pLatencyMs != NULL) *pLatencyMs = latency;
         return output;
     }
 
@@ -439,10 +447,11 @@
                                         audio_channel_mask_t *pChannelMask)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices ? *pDevices : (audio_devices_t)0;
-        uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-        audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
-        audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : (audio_channel_mask_t)0;
+        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
+        audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
+        audio_channel_mask_t channelMask = pChannelMask != NULL ?
+                *pChannelMask : (audio_channel_mask_t)0;
 
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
@@ -453,13 +462,13 @@
         remote()->transact(OPEN_INPUT, data, &reply);
         audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices) *pDevices = devices;
+        if (pDevices != NULL) *pDevices = devices;
         samplingRate = reply.readInt32();
-        if (pSamplingRate) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
         format = (audio_format_t) reply.readInt32();
-        if (pFormat) *pFormat = format;
+        if (pFormat != NULL) *pFormat = format;
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) *pChannelMask = channelMask;
         return input;
     }
 
@@ -695,6 +704,15 @@
         return reply.readInt32();
     }
 
+    virtual status_t setLowRamDevice(bool isLowRamDevice)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+        data.writeInt32((int) isLowRamDevice);
+        remote()->transact(SET_LOW_RAM_DEVICE, data, &reply);
+        return reply.readInt32();
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -868,13 +886,19 @@
             audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
             uint32_t latency = data.readInt32();
             audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
+            bool hasOffloadInfo = data.readInt32() != 0;
+            audio_offload_info_t offloadInfo;
+            if (hasOffloadInfo) {
+                data.read(&offloadInfo, sizeof(audio_offload_info_t));
+            }
             audio_io_handle_t output = openOutput(module,
                                                  &devices,
                                                  &samplingRate,
                                                  &format,
                                                  &channelMask,
                                                  &latency,
-                                                 flags);
+                                                 flags,
+                                                 hasOffloadInfo ? &offloadInfo : NULL);
             ALOGV("OPEN_OUTPUT output, %p", output);
             reply->writeInt32((int32_t) output);
             reply->writeInt32(devices);
@@ -1056,6 +1080,12 @@
             reply->writeInt32(getPrimaryOutputFrameCount());
             return NO_ERROR;
         } break;
+        case SET_LOW_RAM_DEVICE: {
+            CHECK_INTERFACE(IAudioFlinger, data, reply);
+            bool isLowRamDevice = data.readInt32() != 0;
+            reply->writeInt32(setLowRamDevice(isLowRamDevice));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 2d1e0f8..84a589a 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -54,7 +54,7 @@
                     (const AudioSystem::OutputDescriptor *)param2;
             data.writeInt32(desc->samplingRate);
             data.writeInt32(desc->format);
-            data.writeInt32(desc->channels);
+            data.writeInt32(desc->channelMask);
             data.writeInt32(desc->frameCount);
             data.writeInt32(desc->latency);
         }
@@ -84,7 +84,7 @@
             } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
                 desc.samplingRate = data.readInt32();
                 desc.format = data.readInt32();
-                desc.channels = data.readInt32();
+                desc.channelMask = (audio_channel_mask_t) data.readInt32();
                 desc.frameCount = data.readInt32();
                 desc.latency = data.readInt32();
                 param2 = &desc;
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 386c351..4be3c09 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -56,7 +56,8 @@
     GET_DEVICES_FOR_STREAM,
     QUERY_DEFAULT_PRE_PROCESSING,
     SET_EFFECT_ENABLED,
-    IS_STREAM_ACTIVE_REMOTELY
+    IS_STREAM_ACTIVE_REMOTELY,
+    IS_OFFLOAD_SUPPORTED
 };
 
 class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -126,7 +127,8 @@
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
-                                        audio_output_flags_t flags)
+                                        audio_output_flags_t flags,
+                                        const audio_offload_info_t *offloadInfo)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -135,6 +137,12 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(static_cast <uint32_t>(flags));
+        if (offloadInfo == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.write(offloadInfo, sizeof(audio_offload_info_t));
+        }
         remote()->transact(GET_OUTPUT, data, &reply);
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
@@ -374,6 +382,14 @@
         *count = retCount;
         return status;
     }
+
+    virtual bool isOffloadSupported(const audio_offload_info_t& info)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(&info, sizeof(audio_offload_info_t));
+        remote()->transact(IS_OFFLOAD_SUPPORTED, data, &reply);
+        return reply.readInt32();    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -442,12 +458,17 @@
             audio_channel_mask_t channelMask = data.readInt32();
             audio_output_flags_t flags =
                     static_cast <audio_output_flags_t>(data.readInt32());
-
+            bool hasOffloadInfo = data.readInt32() != 0;
+            audio_offload_info_t offloadInfo;
+            if (hasOffloadInfo) {
+                data.read(&offloadInfo, sizeof(audio_offload_info_t));
+            }
             audio_io_handle_t output = getOutput(stream,
                                                  samplingRate,
                                                  format,
                                                  channelMask,
-                                                 flags);
+                                                 flags,
+                                                 hasOffloadInfo ? &offloadInfo : NULL);
             reply->writeInt32(static_cast <int>(output));
             return NO_ERROR;
         } break;
@@ -654,6 +675,15 @@
             return status;
         }
 
+        case IS_OFFLOAD_SUPPORTED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_offload_info_t info;
+            data.read(&info, sizeof(audio_offload_info_t));
+            bool isSupported = isOffloadSupported(info);
+            reply->writeInt32(isSupported);
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 0d06e98..4a7de65 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -42,6 +42,18 @@
     {
     }
 
+    virtual sp<IMemory> getCblk() const
+    {
+        Parcel data, reply;
+        sp<IMemory> cblk;
+        data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_CBLK, data, &reply);
+        if (status == NO_ERROR) {
+            cblk = interface_cast<IMemory>(reply.readStrongBinder());
+        }
+        return cblk;
+    }
+
     virtual status_t start(int /*AudioSystem::sync_event_t*/ event, int triggerSession)
     {
         Parcel data, reply;
@@ -64,17 +76,6 @@
         remote()->transact(STOP, data, &reply);
     }
 
-    virtual sp<IMemory> getCblk() const
-    {
-        Parcel data, reply;
-        sp<IMemory> cblk;
-        data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_CBLK, data, &reply);
-        if (status == NO_ERROR) {
-            cblk = interface_cast<IMemory>(reply.readStrongBinder());
-        }
-        return cblk;
-    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioRecord, "android.media.IAudioRecord");
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index e92f8aa..a2b49a3 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -39,6 +39,7 @@
     ALLOCATE_TIMED_BUFFER,
     QUEUE_TIMED_BUFFER,
     SET_MEDIA_TIME_TRANSFORM,
+    SET_PARAMETERS
 };
 
 class BpAudioTrack : public BpInterface<IAudioTrack>
@@ -154,6 +155,17 @@
         }
         return status;
     }
+
+    virtual status_t setParameters(const String8& keyValuePairs) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeString8(keyValuePairs);
+        status_t status = remote()->transact(SET_PARAMETERS, data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
@@ -223,6 +235,12 @@
             reply->writeInt32(setMediaTimeTransform(xform, target));
             return NO_ERROR;
         } break;
+        case SET_PARAMETERS: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            String8 keyValuePairs(data.readString8());
+            reply->writeInt32(setParameters(keyValuePairs));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index d6cd43a..5bbb2f0 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -51,6 +51,7 @@
     GET_EXTENSION_INDEX,
     OBSERVER_ON_MSG,
     GET_GRAPHIC_BUFFER_USAGE,
+    SET_INTERNAL_OPTION,
 };
 
 class BpOMX : public BpInterface<IOMX> {
@@ -439,6 +440,24 @@
 
         return err;
     }
+
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *optionData,
+            size_t size) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        data.writeIntPtr((intptr_t)node);
+        data.writeInt32(port_index);
+        data.writeInt32(size);
+        data.write(optionData, size);
+        data.writeInt32(type);
+        remote()->transact(SET_INTERNAL_OPTION, data, &reply);
+
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
@@ -537,6 +556,7 @@
         case SET_PARAMETER:
         case GET_CONFIG:
         case SET_CONFIG:
+        case SET_INTERNAL_OPTION:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
 
@@ -562,6 +582,15 @@
                 case SET_CONFIG:
                     err = setConfig(node, index, params, size);
                     break;
+                case SET_INTERNAL_OPTION:
+                {
+                    InternalOptionType type =
+                        (InternalOptionType)data.readInt32();
+
+                    err = setInternalOption(node, index, type, params, size);
+                    break;
+                }
+
                 default:
                     TRESPASS();
             }
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 8fe5bb3..e914b34 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -18,8 +18,6 @@
 #define LOG_TAG "JetPlayer-C"
 
 #include <utils/Log.h>
-#include <utils/threads.h>
-
 #include <media/JetPlayer.h>
 
 
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index e1e88ec..7f10e05 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -20,14 +20,8 @@
 
 //#define USE_SHARED_MEM_BUFFER
 
-// XXX needed for timing latency
-#include <utils/Timers.h>
-
 #include <media/AudioTrack.h>
 #include <media/mediaplayer.h>
-
-#include <system/audio.h>
-
 #include <media/SoundPool.h>
 #include "SoundPoolThread.h"
 
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index f9ad31d..adef3be 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -16,13 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ToneGenerator"
-#include <utils/threads.h>
 
-#include <stdio.h>
 #include <math.h>
 #include <utils/Log.h>
-#include <utils/RefBase.h>
-#include <utils/Timers.h>
 #include <cutils/properties.h>
 #include "media/ToneGenerator.h"
 
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 5b4071b..e519f13 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -28,6 +28,7 @@
 
 #include <media/Visualizer.h>
 #include <audio_utils/fixedfft.h>
+#include <utils/Thread.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp
index ae4d845..62593b2 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/media/libmediaplayerservice/Crypto.cpp
@@ -134,7 +134,6 @@
         return;
     }
 
-    ALOGE("Failed to find crypto plugin");
     mInitCheck = ERROR_UNSUPPORTED;
 }
 
@@ -151,6 +150,7 @@
     if (!mLibrary.get()) {
         mLibrary = new SharedLibrary(path);
         if (!*mLibrary) {
+            ALOGE("loadLibraryForScheme failed:%s", mLibrary->lastError());
             return false;
         }
 
@@ -165,6 +165,7 @@
     if (createCryptoFactory == NULL ||
         (mFactory = createCryptoFactory()) == NULL ||
         !mFactory->isCryptoSchemeSupported(uuid)) {
+        ALOGE("createCryptoFactory failed:%s", mLibrary->lastError());
         closeFactory();
         return false;
     }
diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp
index 1e6cd94..f00f488 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/media/libmediaplayerservice/Drm.cpp
@@ -71,6 +71,12 @@
 status_t Drm::setListener(const sp<IDrmClient>& listener)
 {
     Mutex::Autolock lock(mEventLock);
+    if (mListener != NULL){
+        mListener->asBinder()->unlinkToDeath(this);
+    }
+    if (listener != NULL) {
+        listener->asBinder()->linkToDeath(this);
+    }
     mListener = listener;
     return NO_ERROR;
 }
@@ -576,4 +582,12 @@
     return mPlugin->verify(sessionId, keyId, message, signature, match);
 }
 
+void Drm::binderDied(const wp<IBinder> &the_late_who)
+{
+    delete mPlugin;
+    mPlugin = NULL;
+    closeFactory();
+    mListener.clear();
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/Drm.h b/media/libmediaplayerservice/Drm.h
index 3da8ad4..3f460f1 100644
--- a/media/libmediaplayerservice/Drm.h
+++ b/media/libmediaplayerservice/Drm.h
@@ -29,7 +29,9 @@
 struct DrmFactory;
 struct DrmPlugin;
 
-struct Drm : public BnDrm, public DrmPluginListener {
+struct Drm : public BnDrm,
+             public IBinder::DeathRecipient,
+             public DrmPluginListener {
     Drm();
     virtual ~Drm();
 
@@ -115,6 +117,8 @@
                            Vector<uint8_t> const *sessionId,
                            Vector<uint8_t> const *data);
 
+    virtual void binderDied(const wp<IBinder> &the_late_who);
+
 private:
     mutable Mutex mLock;
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index fa1ff36..8833bd7 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -53,6 +53,8 @@
 #include <media/AudioTrack.h>
 #include <media/MemoryLeakTrackUtil.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/AudioPlayer.h>
+#include <media/stagefright/foundation/ADebug.h>
 
 #include <system/audio.h>
 
@@ -321,7 +323,7 @@
                 mHeap->getBase(), mHeap->getSize(), mHeap->getFlags(), mHeap->getDevice());
         result.append(buffer);
     }
-    snprintf(buffer, 255, "  msec per frame(%f), channel count(%d), format(%d), frame count(%ld)\n",
+    snprintf(buffer, 255, "  msec per frame(%f), channel count(%d), format(%d), frame count(%zd)\n",
             mMsecsPerFrame, mChannelCount, mFormat, mFrameCount);
     result.append(buffer);
     snprintf(buffer, 255, "  sample rate(%d), size(%d), error(%d), command complete(%s)\n",
@@ -1381,11 +1383,51 @@
     return OK;
 }
 
+status_t MediaPlayerService::AudioOutput::setParameters(const String8& keyValuePairs)
+{
+    if (mTrack == 0) return NO_INIT;
+    return mTrack->setParameters(keyValuePairs);
+}
+
+String8  MediaPlayerService::AudioOutput::getParameters(const String8& keys)
+{
+    if (mTrack == 0) return String8::empty();
+    return mTrack->getParameters(keys);
+}
+
+void MediaPlayerService::AudioOutput::deleteRecycledTrack()
+{
+    ALOGV("deleteRecycledTrack");
+
+    if (mRecycledTrack != 0) {
+
+        if (mCallbackData != NULL) {
+            mCallbackData->setOutput(NULL);
+            mCallbackData->endTrackSwitch();
+        }
+
+        if ((mRecycledTrack->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
+            mRecycledTrack->flush();
+        }
+        // An offloaded track isn't flushed because the STREAM_END is reported
+        // slightly prematurely to allow time for the gapless track switch
+        // but this means that if we decide not to recycle the track there
+        // could be a small amount of residual data still playing. We leave
+        // AudioFlinger to drain the track.
+
+        mRecycledTrack.clear();
+        delete mCallbackData;
+        mCallbackData = NULL;
+        close();
+    }
+}
+
 status_t MediaPlayerService::AudioOutput::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
         AudioCallback cb, void *cookie,
-        audio_output_flags_t flags)
+        audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo)
 {
     mCallback = cb;
     mCallbackCookie = cookie;
@@ -1396,20 +1438,34 @@
         bufferCount = mMinBufferCount;
 
     }
-    ALOGV("open(%u, %d, 0x%x, %d, %d, %d)", sampleRate, channelCount, channelMask,
-            format, bufferCount, mSessionId);
+    ALOGV("open(%u, %d, 0x%x, 0x%x, %d, %d 0x%x)", sampleRate, channelCount, channelMask,
+                format, bufferCount, mSessionId, flags);
     uint32_t afSampleRate;
     size_t afFrameCount;
     uint32_t frameCount;
 
-    if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
-        return NO_INIT;
-    }
-    if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
-        return NO_INIT;
+    // offloading is only supported in callback mode for now.
+    // offloadInfo must be present if offload flag is set
+    if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) &&
+            ((cb == NULL) || (offloadInfo == NULL))) {
+        return BAD_VALUE;
     }
 
-    frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        frameCount = 0; // AudioTrack will get frame count from AudioFlinger
+    } else {
+        uint32_t afSampleRate;
+        size_t afFrameCount;
+
+        if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
+            return NO_INIT;
+        }
+        if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
+            return NO_INIT;
+        }
+
+        frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+    }
 
     if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
         channelMask = audio_channel_out_mask_from_count(channelCount);
@@ -1419,65 +1475,108 @@
         }
     }
 
-    sp<AudioTrack> t;
-    CallbackData *newcbd = NULL;
-    if (mCallback != NULL) {
-        newcbd = new CallbackData(this);
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags,
-                CallbackWrapper,
-                newcbd,
-                0,  // notification frames
-                mSessionId);
-    } else {
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags,
-                NULL,
-                NULL,
-                0,
-                mSessionId);
-    }
-
-    if ((t == 0) || (t->initCheck() != NO_ERROR)) {
-        ALOGE("Unable to create audio track");
-        delete newcbd;
-        return NO_INIT;
-    }
-
+    // Check whether we can recycle the track
+    bool reuse = false;
+    bool bothOffloaded = false;
 
     if (mRecycledTrack != 0) {
-        // check if the existing track can be reused as-is, or if a new track needs to be created.
+        // check whether we are switching between two offloaded tracks
+        bothOffloaded = (flags & mRecycledTrack->getFlags()
+                                & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0;
 
-        bool reuse = true;
+        // check if the existing track can be reused as-is, or if a new track needs to be created.
+        reuse = true;
+
         if ((mCallbackData == NULL && mCallback != NULL) ||
                 (mCallbackData != NULL && mCallback == NULL)) {
             // recycled track uses callbacks but the caller wants to use writes, or vice versa
             ALOGV("can't chain callback and write");
             reuse = false;
         } else if ((mRecycledTrack->getSampleRate() != sampleRate) ||
-                (mRecycledTrack->channelCount() != channelCount) ||
-                (mRecycledTrack->frameCount() != t->frameCount())) {
-            ALOGV("samplerate, channelcount or framecount differ: %d/%d Hz, %d/%d ch, %d/%d frames",
+                (mRecycledTrack->channelCount() != (uint32_t)channelCount) ) {
+            ALOGV("samplerate, channelcount differ: %u/%u Hz, %u/%d ch",
                   mRecycledTrack->getSampleRate(), sampleRate,
-                  mRecycledTrack->channelCount(), channelCount,
-                  mRecycledTrack->frameCount(), t->frameCount());
+                  mRecycledTrack->channelCount(), channelCount);
             reuse = false;
         } else if (flags != mFlags) {
             ALOGV("output flags differ %08x/%08x", flags, mFlags);
             reuse = false;
+        } else if (mRecycledTrack->format() != format) {
+            reuse = false;
         }
+    } else {
+        ALOGV("no track available to recycle");
+    }
+
+    ALOGV_IF(bothOffloaded, "both tracks offloaded");
+
+    // If we can't recycle and both tracks are offloaded
+    // we must close the previous output before opening a new one
+    if (bothOffloaded && !reuse) {
+        ALOGV("both offloaded and not recycling");
+        deleteRecycledTrack();
+    }
+
+    sp<AudioTrack> t;
+    CallbackData *newcbd = NULL;
+
+    // We don't attempt to create a new track if we are recycling an
+    // offloaded track. But, if we are recycling a non-offloaded or we
+    // are switching where one is offloaded and one isn't then we create
+    // the new track in advance so that we can read additional stream info
+
+    if (!(reuse && bothOffloaded)) {
+        ALOGV("creating new AudioTrack");
+
+        if (mCallback != NULL) {
+            newcbd = new CallbackData(this);
+            t = new AudioTrack(
+                    mStreamType,
+                    sampleRate,
+                    format,
+                    channelMask,
+                    frameCount,
+                    flags,
+                    CallbackWrapper,
+                    newcbd,
+                    0,  // notification frames
+                    mSessionId,
+                    AudioTrack::TRANSFER_CALLBACK,
+                    offloadInfo);
+        } else {
+            t = new AudioTrack(
+                    mStreamType,
+                    sampleRate,
+                    format,
+                    channelMask,
+                    frameCount,
+                    flags,
+                    NULL,
+                    NULL,
+                    0,
+                    mSessionId);
+        }
+
+        if ((t == 0) || (t->initCheck() != NO_ERROR)) {
+            ALOGE("Unable to create audio track");
+            delete newcbd;
+            return NO_INIT;
+        }
+    }
+
+    if (reuse) {
+        CHECK(mRecycledTrack != NULL);
+
+        if (!bothOffloaded) {
+            if (mRecycledTrack->frameCount() != t->frameCount()) {
+                ALOGV("framecount differs: %u/%u frames",
+                      mRecycledTrack->frameCount(), t->frameCount());
+                reuse = false;
+            }
+        }
+
         if (reuse) {
-            ALOGV("chaining to next output");
+            ALOGV("chaining to next output and recycling track");
             close();
             mTrack = mRecycledTrack;
             mRecycledTrack.clear();
@@ -1487,19 +1586,16 @@
             delete newcbd;
             return OK;
         }
-
-        // if we're not going to reuse the track, unblock and flush it
-        if (mCallbackData != NULL) {
-            mCallbackData->setOutput(NULL);
-            mCallbackData->endTrackSwitch();
-        }
-        mRecycledTrack->flush();
-        mRecycledTrack.clear();
-        delete mCallbackData;
-        mCallbackData = NULL;
-        close();
     }
 
+    // we're not going to reuse the track, unblock and flush it
+    // this was done earlier if both tracks are offloaded
+    if (!bothOffloaded) {
+        deleteRecycledTrack();
+    }
+
+    CHECK((t != NULL) && ((mCallback == NULL) || (newcbd != NULL)));
+
     mCallbackData = newcbd;
     ALOGV("setVolume");
     t->setVolume(mLeftVolume, mRightVolume);
@@ -1513,15 +1609,19 @@
     }
     mTrack = t;
 
-    status_t res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
-    if (res != NO_ERROR) {
-        return res;
+    status_t res = NO_ERROR;
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
+        res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
+        if (res == NO_ERROR) {
+            t->setAuxEffectSendLevel(mSendLevel);
+            res = t->attachAuxEffect(mAuxEffectId);
+        }
     }
-    t->setAuxEffectSendLevel(mSendLevel);
-    return t->attachAuxEffect(mAuxEffectId);;
+    ALOGV("open() DONE status %d", res);
+    return res;
 }
 
-void MediaPlayerService::AudioOutput::start()
+status_t MediaPlayerService::AudioOutput::start()
 {
     ALOGV("start");
     if (mCallbackData != NULL) {
@@ -1530,8 +1630,9 @@
     if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
         mTrack->setAuxEffectSendLevel(mSendLevel);
-        mTrack->start();
+        return mTrack->start();
     }
+    return NO_INIT;
 }
 
 void MediaPlayerService::AudioOutput::setNextOutput(const sp<AudioOutput>& nextOutput) {
@@ -1644,10 +1745,6 @@
 void MediaPlayerService::AudioOutput::CallbackWrapper(
         int event, void *cookie, void *info) {
     //ALOGV("callbackwrapper");
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
-    }
-
     CallbackData *data = (CallbackData*)cookie;
     data->lock();
     AudioOutput *me = data->getOutput();
@@ -1656,22 +1753,46 @@
         // no output set, likely because the track was scheduled to be reused
         // by another player, but the format turned out to be incompatible.
         data->unlock();
-        buffer->size = 0;
+        if (buffer != NULL) {
+            buffer->size = 0;
+        }
         return;
     }
 
-    size_t actualSize = (*me->mCallback)(
-            me, buffer->raw, buffer->size, me->mCallbackCookie);
+    switch(event) {
+    case AudioTrack::EVENT_MORE_DATA: {
+        size_t actualSize = (*me->mCallback)(
+                me, buffer->raw, buffer->size, me->mCallbackCookie,
+                CB_EVENT_FILL_BUFFER);
 
-    if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
-        // We've reached EOS but the audio track is not stopped yet,
-        // keep playing silence.
+        if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
+            // We've reached EOS but the audio track is not stopped yet,
+            // keep playing silence.
 
-        memset(buffer->raw, 0, buffer->size);
-        actualSize = buffer->size;
+            memset(buffer->raw, 0, buffer->size);
+            actualSize = buffer->size;
+        }
+
+        buffer->size = actualSize;
+        } break;
+
+
+    case AudioTrack::EVENT_STREAM_END:
+        ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
+        (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
+                me->mCallbackCookie, CB_EVENT_STREAM_END);
+        break;
+
+    case AudioTrack::EVENT_NEW_IAUDIOTRACK :
+        ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
+        (*me->mCallback)(me,  NULL /* buffer */, 0 /* size */,
+                me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
+        break;
+
+    default:
+        ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
     }
 
-    buffer->size = actualSize;
     data->unlock();
 }
 
@@ -1767,7 +1888,8 @@
     }
 
     size_t actualSize =
-        (*mCallback)(sink.get(), mBuffer, mBufferSize, mCookie);
+        (*mCallback)(sink.get(), mBuffer, mBufferSize, mCookie,
+                MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER);
 
     if (actualSize > 0) {
         sink->write(mBuffer, actualSize);
@@ -1781,7 +1903,8 @@
 status_t MediaPlayerService::AudioCache::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
-        AudioCallback cb, void *cookie, audio_output_flags_t flags)
+        AudioCallback cb, void *cookie, audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo)
 {
     ALOGV("open(%u, %d, 0x%x, %d, %d)", sampleRate, channelCount, channelMask, format, bufferCount);
     if (mHeap->getHeapID() < 0) {
@@ -1799,10 +1922,11 @@
     return NO_ERROR;
 }
 
-void MediaPlayerService::AudioCache::start() {
+status_t MediaPlayerService::AudioCache::start() {
     if (mCallbackThread != NULL) {
         mCallbackThread->run("AudioCache callback");
     }
+    return NO_ERROR;
 }
 
 void MediaPlayerService::AudioCache::stop() {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index e586156..7d27944 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -20,15 +20,12 @@
 
 #include <arpa/inet.h>
 
-#include <utils/Log.h>
 #include <utils/threads.h>
-#include <utils/List.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/String8.h>
 #include <utils/Vector.h>
 
-#include <media/IMediaPlayerService.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/Metadata.h>
 #include <media/stagefright/foundation/ABase.h>
@@ -94,9 +91,10 @@
                 uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
                 audio_format_t format, int bufferCount,
                 AudioCallback cb, void *cookie,
-                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE);
+                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                const audio_offload_info_t *offloadInfo = NULL);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush();
@@ -114,11 +112,14 @@
                 void            setNextOutput(const sp<AudioOutput>& nextOutput);
                 void            switchToNextOutput();
         virtual bool            needsTrailingPadding() { return mNextOutput == NULL; }
+        virtual status_t        setParameters(const String8& keyValuePairs);
+        virtual String8         getParameters(const String8& keys);
 
     private:
         static void             setMinBufferCount();
         static void             CallbackWrapper(
                 int event, void *me, void *info);
+               void             deleteRecycledTrack();
 
         sp<AudioTrack>          mTrack;
         sp<AudioTrack>          mRecycledTrack;
@@ -195,9 +196,10 @@
                 uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
                 audio_format_t format, int bufferCount = 1,
                 AudioCallback cb = NULL, void *cookie = NULL,
-                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE);
+                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                const audio_offload_info_t *offloadInfo = NULL);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush() {}
diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp
index 8db5b9b..270b872 100644
--- a/media/libmediaplayerservice/MidiFile.cpp
+++ b/media/libmediaplayerservice/MidiFile.cpp
@@ -422,7 +422,7 @@
 
 status_t MidiFile::createOutputTrack() {
     if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels,
-            CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) {
+            CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2 /*bufferCount*/) != NO_ERROR) {
         ALOGE("mAudioSink open failed");
         return ERROR_OPEN_FAILED;
     }
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
index 20e6513..eb959b4 100644
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ b/media/libmediaplayerservice/RemoteDisplay.cpp
@@ -16,19 +16,23 @@
 
 #include "RemoteDisplay.h"
 
-#include "ANetworkSession.h"
 #include "source/WifiDisplaySource.h"
 
 #include <media/IRemoteDisplayClient.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 
 namespace android {
 
 RemoteDisplay::RemoteDisplay(
-        const sp<IRemoteDisplayClient> &client, const char *iface)
+        const sp<IRemoteDisplayClient> &client,
+        const char *iface)
     : mLooper(new ALooper),
-      mNetSession(new ANetworkSession),
-      mSource(new WifiDisplaySource(mNetSession, client)) {
+      mNetSession(new ANetworkSession) {
     mLooper->setName("wfd_looper");
+
+    mSource = new WifiDisplaySource(mNetSession, client);
     mLooper->registerHandler(mSource);
 
     mNetSession->start();
@@ -50,6 +54,7 @@
 
 status_t RemoteDisplay::dispose() {
     mSource->stop();
+    mSource.clear();
 
     mLooper->stop();
     mNetSession->stop();
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
index bd8b684..82a0116 100644
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ b/media/libmediaplayerservice/RemoteDisplay.h
@@ -18,6 +18,7 @@
 
 #define REMOTE_DISPLAY_H_
 
+#include <media/IMediaPlayerService.h>
 #include <media/IRemoteDisplay.h>
 #include <media/stagefright/foundation/ABase.h>
 #include <utils/Errors.h>
@@ -31,7 +32,9 @@
 struct WifiDisplaySource;
 
 struct RemoteDisplay : public BnRemoteDisplay {
-    RemoteDisplay(const sp<IRemoteDisplayClient> &client, const char *iface);
+    RemoteDisplay(
+            const sp<IRemoteDisplayClient> &client,
+            const char *iface);
 
     virtual status_t pause();
     virtual status_t resume();
diff --git a/media/libmediaplayerservice/SharedLibrary.cpp b/media/libmediaplayerservice/SharedLibrary.cpp
index 178e15d..34db761 100644
--- a/media/libmediaplayerservice/SharedLibrary.cpp
+++ b/media/libmediaplayerservice/SharedLibrary.cpp
@@ -46,4 +46,10 @@
         }
         return dlsym(mLibHandle, symbol);
     }
+
+    const char *SharedLibrary::lastError() const {
+        const char *error = dlerror();
+        return error ? error : "No errors or unknown error";
+    }
+
 };
diff --git a/media/libmediaplayerservice/SharedLibrary.h b/media/libmediaplayerservice/SharedLibrary.h
index 5353642..88451a0 100644
--- a/media/libmediaplayerservice/SharedLibrary.h
+++ b/media/libmediaplayerservice/SharedLibrary.h
@@ -29,6 +29,7 @@
 
         bool operator!() const;
         void *lookup(const char *symbol) const;
+        const char *lastError() const;
 
     private:
         void *mLibHandle;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 50ebf9c..3385a19 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -634,6 +634,10 @@
 }
 
 void NuPlayer::RTSPSource::onDisconnected(const sp<AMessage> &msg) {
+    if (mState == DISCONNECTED) {
+        return;
+    }
+
     status_t err;
     CHECK(msg->findInt32("result", &err));
     CHECK_NE(err, (status_t)OK);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 6bc7718..00804c5 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2630,6 +2630,14 @@
         goto error;
     }
 
+    err = native_window_set_scaling_mode(mNativeWindow.get(),
+                NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank_frames: set_scaling_mode failed: %s (%d)",
+              strerror(-err), -err);
+        goto error;
+    }
+
     err = native_window_set_usage(mNativeWindow.get(),
             GRALLOC_USAGE_SW_WRITE_OFTEN);
     if (err != NO_ERROR) {
@@ -4106,6 +4114,19 @@
         }
     }
 
+    int32_t dropInputFrames;
+    if (params->findInt32("drop-input-frames", &dropInputFrames)) {
+        bool suspend = dropInputFrames != 0;
+
+        CHECK_EQ((status_t)OK,
+                 mOMX->setInternalOption(
+                     mNode,
+                     kPortIndexInput,
+                     IOMX::INTERNAL_OPTION_SUSPEND,
+                     &suspend,
+                     sizeof(suspend)));
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 9544dbc..1f68b51 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -62,6 +62,7 @@
         $(TOP)/frameworks/av/include/media/stagefright/timedtext \
         $(TOP)/frameworks/native/include/media/hardware \
         $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/native/services/connectivitymanager \
         $(TOP)/external/flac/include \
         $(TOP)/external/tremolo \
         $(TOP)/external/openssl/include \
@@ -69,6 +70,7 @@
 LOCAL_SHARED_LIBRARIES := \
         libbinder \
         libcamera_client \
+        libconnectivitymanager \
         libcutils \
         libdl \
         libdrmframework \
@@ -98,6 +100,7 @@
         libstagefright_mpeg2ts \
         libstagefright_id3 \
         libFLAC \
+        libmedia_helper
 
 LOCAL_SRC_FILES += \
         chromium_http_stub.cpp
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 92efae8..2418aab 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioPlayer"
 #include <utils/Log.h>
+#include <cutils/compiler.h>
 
 #include <binder/IPCThreadState.h>
 #include <media/AudioTrack.h>
@@ -27,6 +28,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 
 #include "include/AwesomePlayer.h"
 
@@ -34,7 +36,7 @@
 
 AudioPlayer::AudioPlayer(
         const sp<MediaPlayerBase::AudioSink> &audioSink,
-        bool allowDeepBuffering,
+        uint32_t flags,
         AwesomePlayer *observer)
     : mInputBuffer(NULL),
       mSampleRate(0),
@@ -47,14 +49,17 @@
       mSeeking(false),
       mReachedEOS(false),
       mFinalStatus(OK),
+      mSeekTimeUs(0),
       mStarted(false),
       mIsFirstBuffer(false),
       mFirstBufferResult(OK),
       mFirstBuffer(NULL),
       mAudioSink(audioSink),
-      mAllowDeepBuffering(allowDeepBuffering),
       mObserver(observer),
-      mPinnedTimeUs(-1ll) {
+      mPinnedTimeUs(-1ll),
+      mPlaying(false),
+      mStartPosUs(0),
+      mCreateFlags(flags) {
 }
 
 AudioPlayer::~AudioPlayer() {
@@ -109,7 +114,7 @@
     const char *mime;
     bool success = format->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+    CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
 
     success = format->findInt32(kKeySampleRate, &mSampleRate);
     CHECK(success);
@@ -125,16 +130,74 @@
         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
     }
 
+    audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+    if (useOffload()) {
+        if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
+            ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime);
+            audioFormat = AUDIO_FORMAT_INVALID;
+        } else {
+            ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat);
+        }
+    }
+
+    int avgBitRate = -1;
+    format->findInt32(kKeyBitRate, &avgBitRate);
+
     if (mAudioSink.get() != NULL) {
 
+        uint32_t flags = AUDIO_OUTPUT_FLAG_NONE;
+        audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+
+        if (allowDeepBuffering()) {
+            flags |= AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+        }
+        if (useOffload()) {
+            flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+
+            int64_t durationUs;
+            if (format->findInt64(kKeyDuration, &durationUs)) {
+                offloadInfo.duration_us = durationUs;
+            } else {
+                offloadInfo.duration_us = -1;
+            }
+
+            offloadInfo.sample_rate = mSampleRate;
+            offloadInfo.channel_mask = channelMask;
+            offloadInfo.format = audioFormat;
+            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
+            offloadInfo.bit_rate = avgBitRate;
+            offloadInfo.has_video = ((mCreateFlags & HAS_VIDEO) != 0);
+            offloadInfo.is_streaming = ((mCreateFlags & IS_STREAMING) != 0);
+        }
+
         status_t err = mAudioSink->open(
-                mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT,
+                mSampleRate, numChannels, channelMask, audioFormat,
                 DEFAULT_AUDIOSINK_BUFFERCOUNT,
                 &AudioPlayer::AudioSinkCallback,
                 this,
-                (mAllowDeepBuffering ?
-                            AUDIO_OUTPUT_FLAG_DEEP_BUFFER :
-                            AUDIO_OUTPUT_FLAG_NONE));
+                (audio_output_flags_t)flags,
+                useOffload() ? &offloadInfo : NULL);
+
+        if (err == OK) {
+            mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
+            mFrameSize = mAudioSink->frameSize();
+
+            if (useOffload()) {
+                // If the playback is offloaded to h/w we pass the
+                // HAL some metadata information
+                // We don't want to do this for PCM because it will be going
+                // through the AudioFlinger mixer before reaching the hardware
+                sendMetaDataToHal(mAudioSink, format);
+            }
+
+            err = mAudioSink->start();
+            // do not alter behavior for non offloaded tracks: ignore start status.
+            if (!useOffload()) {
+                err = OK;
+            }
+        }
+
         if (err != OK) {
             if (mFirstBuffer != NULL) {
                 mFirstBuffer->release();
@@ -148,10 +211,6 @@
             return err;
         }
 
-        mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
-        mFrameSize = mAudioSink->frameSize();
-
-        mAudioSink->start();
     } else {
         // playing to an AudioTrack, set up mask if necessary
         audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
@@ -186,6 +245,7 @@
     }
 
     mStarted = true;
+    mPlaying = true;
     mPinnedTimeUs = -1ll;
 
     return OK;
@@ -212,27 +272,56 @@
 
         mPinnedTimeUs = ALooper::GetNowUs();
     }
+
+    mPlaying = false;
 }
 
-void AudioPlayer::resume() {
+status_t AudioPlayer::resume() {
     CHECK(mStarted);
+    status_t err;
 
     if (mAudioSink.get() != NULL) {
-        mAudioSink->start();
+        err = mAudioSink->start();
     } else {
-        mAudioTrack->start();
+        err = mAudioTrack->start();
     }
+
+    if (err == OK) {
+        mPlaying = true;
+    }
+
+    return err;
 }
 
 void AudioPlayer::reset() {
     CHECK(mStarted);
 
+    ALOGV("reset: mPlaying=%d mReachedEOS=%d useOffload=%d",
+                                mPlaying, mReachedEOS, useOffload() );
+
     if (mAudioSink.get() != NULL) {
         mAudioSink->stop();
+        // If we're closing and have reached EOS, we don't want to flush
+        // the track because if it is offloaded there could be a small
+        // amount of residual data in the hardware buffer which we must
+        // play to give gapless playback.
+        // But if we're resetting when paused or before we've reached EOS
+        // we can't be doing a gapless playback and there could be a large
+        // amount of data queued in the hardware if the track is offloaded,
+        // so we must flush to prevent a track switch being delayed playing
+        // the buffered data that we don't want now
+        if (!mPlaying || !mReachedEOS) {
+            mAudioSink->flush();
+        }
+
         mAudioSink->close();
     } else {
         mAudioTrack->stop();
 
+        if (!mPlaying || !mReachedEOS) {
+            mAudioTrack->flush();
+        }
+
         mAudioTrack.clear();
     }
 
@@ -256,10 +345,16 @@
     // The following hack is necessary to ensure that the OMX
     // component is completely released by the time we may try
     // to instantiate it again.
-    wp<MediaSource> tmp = mSource;
-    mSource.clear();
-    while (tmp.promote() != NULL) {
-        usleep(1000);
+    // When offloading, the OMX component is not used so this hack
+    // is not needed
+    if (!useOffload()) {
+        wp<MediaSource> tmp = mSource;
+        mSource.clear();
+        while (tmp.promote() != NULL) {
+            usleep(1000);
+        }
+    } else {
+        mSource.clear();
     }
     IPCThreadState::self()->flushCommands();
 
@@ -271,6 +366,8 @@
     mReachedEOS = false;
     mFinalStatus = OK;
     mStarted = false;
+    mPlaying = false;
+    mStartPosUs = 0;
 }
 
 // static
@@ -291,6 +388,15 @@
     return mReachedEOS;
 }
 
+void AudioPlayer::notifyAudioEOS() {
+    ALOGV("AudioPlayer@0x%p notifyAudioEOS", this);
+
+    if (mObserver != NULL) {
+        mObserver->postAudioEOS(0);
+        ALOGV("Notified observer of EOS!");
+    }
+}
+
 status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
     if (mAudioSink.get() != NULL) {
         return mAudioSink->setPlaybackRatePermille(ratePermille);
@@ -304,21 +410,44 @@
 // static
 size_t AudioPlayer::AudioSinkCallback(
         MediaPlayerBase::AudioSink *audioSink,
-        void *buffer, size_t size, void *cookie) {
+        void *buffer, size_t size, void *cookie,
+        MediaPlayerBase::AudioSink::cb_event_t event) {
     AudioPlayer *me = (AudioPlayer *)cookie;
 
-    return me->fillBuffer(buffer, size);
+    switch(event) {
+    case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
+        return me->fillBuffer(buffer, size);
+
+    case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
+        ALOGV("AudioSinkCallback: stream end");
+        me->mReachedEOS = true;
+        me->notifyAudioEOS();
+        break;
+
+    case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
+        ALOGV("AudioSinkCallback: Tear down event");
+        me->mObserver->postAudioTearDown();
+        break;
+    }
+
+    return 0;
 }
 
 void AudioPlayer::AudioCallback(int event, void *info) {
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
+    switch (event) {
+    case AudioTrack::EVENT_MORE_DATA:
+        {
+        AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+        size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
+        buffer->size = numBytesWritten;
+        }
+        break;
+
+    case AudioTrack::EVENT_STREAM_END:
+        mReachedEOS = true;
+        notifyAudioEOS();
+        break;
     }
-
-    AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-    size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
-
-    buffer->size = numBytesWritten;
 }
 
 uint32_t AudioPlayer::getNumFramesPendingPlayout() const {
@@ -358,6 +487,7 @@
     size_t size_remaining = size;
     while (size_remaining > 0) {
         MediaSource::ReadOptions options;
+        bool refreshSeekTime = false;
 
         {
             Mutex::Autolock autoLock(mLock);
@@ -372,6 +502,7 @@
                 }
 
                 options.setSeekTo(mSeekTimeUs);
+                refreshSeekTime = true;
 
                 if (mInputBuffer != NULL) {
                     mInputBuffer->release();
@@ -404,43 +535,56 @@
             Mutex::Autolock autoLock(mLock);
 
             if (err != OK) {
-                if (mObserver && !mReachedEOS) {
-                    // We don't want to post EOS right away but only
-                    // after all frames have actually been played out.
-
-                    // These are the number of frames submitted to the
-                    // AudioTrack that you haven't heard yet.
-                    uint32_t numFramesPendingPlayout =
-                        getNumFramesPendingPlayout();
-
-                    // These are the number of frames we're going to
-                    // submit to the AudioTrack by returning from this
-                    // callback.
-                    uint32_t numAdditionalFrames = size_done / mFrameSize;
-
-                    numFramesPendingPlayout += numAdditionalFrames;
-
-                    int64_t timeToCompletionUs =
-                        (1000000ll * numFramesPendingPlayout) / mSampleRate;
-
-                    ALOGV("total number of frames played: %lld (%lld us)",
-                            (mNumFramesPlayed + numAdditionalFrames),
-                            1000000ll * (mNumFramesPlayed + numAdditionalFrames)
-                                / mSampleRate);
-
-                    ALOGV("%d frames left to play, %lld us (%.2f secs)",
-                         numFramesPendingPlayout,
-                         timeToCompletionUs, timeToCompletionUs / 1E6);
-
-                    postEOS = true;
-                    if (mAudioSink->needsTrailingPadding()) {
-                        postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                if (!mReachedEOS) {
+                    if (useOffload()) {
+                        // no more buffers to push - stop() and wait for STREAM_END
+                        // don't set mReachedEOS until stream end received
+                        if (mAudioSink != NULL) {
+                            mAudioSink->stop();
+                        } else {
+                            mAudioTrack->stop();
+                        }
                     } else {
-                        postEOSDelayUs = 0;
+                        if (mObserver) {
+                            // We don't want to post EOS right away but only
+                            // after all frames have actually been played out.
+
+                            // These are the number of frames submitted to the
+                            // AudioTrack that you haven't heard yet.
+                            uint32_t numFramesPendingPlayout =
+                                getNumFramesPendingPlayout();
+
+                            // These are the number of frames we're going to
+                            // submit to the AudioTrack by returning from this
+                            // callback.
+                            uint32_t numAdditionalFrames = size_done / mFrameSize;
+
+                            numFramesPendingPlayout += numAdditionalFrames;
+
+                            int64_t timeToCompletionUs =
+                                (1000000ll * numFramesPendingPlayout) / mSampleRate;
+
+                            ALOGV("total number of frames played: %lld (%lld us)",
+                                    (mNumFramesPlayed + numAdditionalFrames),
+                                    1000000ll * (mNumFramesPlayed + numAdditionalFrames)
+                                        / mSampleRate);
+
+                            ALOGV("%d frames left to play, %lld us (%.2f secs)",
+                                 numFramesPendingPlayout,
+                                 timeToCompletionUs, timeToCompletionUs / 1E6);
+
+                            postEOS = true;
+                            if (mAudioSink->needsTrailingPadding()) {
+                                postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                            } else {
+                                postEOSDelayUs = 0;
+                            }
+                        }
+
+                        mReachedEOS = true;
                     }
                 }
 
-                mReachedEOS = true;
                 mFinalStatus = err;
                 break;
             }
@@ -451,17 +595,34 @@
                 mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
             }
 
-            CHECK(mInputBuffer->meta_data()->findInt64(
+            if(mInputBuffer->range_length() != 0) {
+                CHECK(mInputBuffer->meta_data()->findInt64(
                         kKeyTime, &mPositionTimeMediaUs));
+            }
 
-            mPositionTimeRealUs =
-                ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
-                    / mSampleRate;
+            // need to adjust the mStartPosUs for offload decoding since parser
+            // might not be able to get the exact seek time requested.
+            if (refreshSeekTime && useOffload()) {
+                if (postSeekComplete) {
+                    ALOGV("fillBuffer is going to post SEEK_COMPLETE");
+                    mObserver->postAudioSeekComplete();
+                    postSeekComplete = false;
+                }
 
-            ALOGV("buffer->size() = %d, "
-                 "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
-                 mInputBuffer->range_length(),
-                 mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+                mStartPosUs = mPositionTimeMediaUs;
+                ALOGV("adjust seek time to: %.2f", mStartPosUs/ 1E6);
+            }
+
+            if (!useOffload()) {
+                mPositionTimeRealUs =
+                    ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+                        / mSampleRate;
+                ALOGV("buffer->size() = %d, "
+                     "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
+                     mInputBuffer->range_length(),
+                     mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+            }
+
         }
 
         if (mInputBuffer->range_length() == 0) {
@@ -487,6 +648,13 @@
         size_remaining -= copy;
     }
 
+    if (useOffload()) {
+        // We must ask the hardware what it has played
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        ALOGV("mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
+             mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+    }
+
     {
         Mutex::Autolock autoLock(mLock);
         mNumFramesPlayed += size_done / mFrameSize;
@@ -535,9 +703,36 @@
     return result + diffUs;
 }
 
+int64_t AudioPlayer::getOutputPlayPositionUs_l() const
+{
+    uint32_t playedSamples = 0;
+    if (mAudioSink != NULL) {
+        mAudioSink->getPosition(&playedSamples);
+    } else {
+        mAudioTrack->getPosition(&playedSamples);
+    }
+
+    const int64_t playedUs = (static_cast<int64_t>(playedSamples) * 1000000 ) / mSampleRate;
+
+    // HAL position is relative to the first buffer we sent at mStartPosUs
+    const int64_t renderedDuration = mStartPosUs + playedUs;
+    ALOGV("getOutputPlayPositionUs_l %lld", renderedDuration);
+    return renderedDuration;
+}
+
 int64_t AudioPlayer::getMediaTimeUs() {
     Mutex::Autolock autoLock(mLock);
 
+    if (useOffload()) {
+        if (mSeeking) {
+            return mSeekTimeUs;
+        }
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %lld",
+              mPositionTimeRealUs);
+        return mPositionTimeRealUs;
+    }
+
     if (mPositionTimeMediaUs < 0 || mPositionTimeRealUs < 0) {
         if (mSeeking) {
             return mSeekTimeUs;
@@ -546,6 +741,11 @@
         return 0;
     }
 
+    if (useOffload()) {
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        return mPositionTimeRealUs;
+    }
+
     int64_t realTimeOffset = getRealTimeUsLocked() - mPositionTimeRealUs;
     if (realTimeOffset < 0) {
         realTimeOffset = 0;
@@ -567,19 +767,34 @@
 status_t AudioPlayer::seekTo(int64_t time_us) {
     Mutex::Autolock autoLock(mLock);
 
+    ALOGV("seekTo( %lld )", time_us);
+
     mSeeking = true;
     mPositionTimeRealUs = mPositionTimeMediaUs = -1;
     mReachedEOS = false;
     mSeekTimeUs = time_us;
+    mStartPosUs = time_us;
 
     // Flush resets the number of played frames
     mNumFramesPlayed = 0;
     mNumFramesPlayedSysTimeUs = ALooper::GetNowUs();
 
     if (mAudioSink != NULL) {
+        if (mPlaying) {
+            mAudioSink->pause();
+        }
         mAudioSink->flush();
+        if (mPlaying) {
+            mAudioSink->start();
+        }
     } else {
+        if (mPlaying) {
+            mAudioTrack->pause();
+        }
         mAudioTrack->flush();
+        if (mPlaying) {
+            mAudioTrack->start();
+        }
     }
 
     return OK;
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 6c197e2..79f2c91 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -47,6 +47,7 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
 
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
@@ -65,6 +66,11 @@
 static const size_t kLowWaterMarkBytes = 40000;
 static const size_t kHighWaterMarkBytes = 200000;
 
+// maximum time in paused state when offloading audio decompression. When elapsed, the AudioPlayer
+// is destroyed to allow the audio DSP to power down.
+static int64_t kOffloadPauseMaxUs = 60000000ll;
+
+
 struct AwesomeEvent : public TimedEventQueue::Event {
     AwesomeEvent(
             AwesomePlayer *player,
@@ -194,7 +200,9 @@
       mVideoBuffer(NULL),
       mDecryptHandle(NULL),
       mLastVideoTimeUs(-1),
-      mTextDriver(NULL) {
+      mTextDriver(NULL),
+      mOffloadAudio(false),
+      mAudioTearDown(false) {
     CHECK_EQ(mClient.connect(), (status_t)OK);
 
     DataSource::RegisterDefaultSniffers();
@@ -206,13 +214,17 @@
     mBufferingEvent = new AwesomeEvent(this, &AwesomePlayer::onBufferingUpdate);
     mBufferingEventPending = false;
     mVideoLagEvent = new AwesomeEvent(this, &AwesomePlayer::onVideoLagUpdate);
-    mVideoEventPending = false;
+    mVideoLagEventPending = false;
 
     mCheckAudioStatusEvent = new AwesomeEvent(
             this, &AwesomePlayer::onCheckAudioStatus);
 
     mAudioStatusEventPending = false;
 
+    mAudioTearDownEvent = new AwesomeEvent(this,
+                              &AwesomePlayer::onAudioTearDownEvent);
+    mAudioTearDownEventPending = false;
+
     reset();
 }
 
@@ -232,6 +244,11 @@
     mQueue.cancelEvent(mVideoLagEvent->eventID());
     mVideoLagEventPending = false;
 
+    if (mOffloadAudio) {
+        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
+        mAudioTearDownEventPending = false;
+    }
+
     if (!keepNotifications) {
         mQueue.cancelEvent(mStreamDoneEvent->eventID());
         mStreamDoneEventPending = false;
@@ -518,7 +535,7 @@
     mVideoTrack.clear();
     mExtractor.clear();
 
-    // Shutdown audio first, so that the respone to the reset request
+    // Shutdown audio first, so that the response to the reset request
     // appears to happen instantaneously as far as the user is concerned
     // If we did this later, audio would continue playing while we
     // shutdown the video-related resources and the player appear to
@@ -531,6 +548,7 @@
         mAudioSource->stop();
     }
     mAudioSource.clear();
+    mOmxSource.clear();
 
     mTimeSource = NULL;
 
@@ -586,7 +604,7 @@
 }
 
 void AwesomePlayer::notifyListener_l(int msg, int ext1, int ext2) {
-    if (mListener != NULL) {
+    if ((mListener != NULL) && !mAudioTearDown) {
         sp<MediaPlayerBase> listener = mListener.promote();
 
         if (listener != NULL) {
@@ -617,7 +635,7 @@
 bool AwesomePlayer::getCachedDuration_l(int64_t *durationUs, bool *eos) {
     int64_t bitrate;
 
-    if (mCachedSource != NULL && getBitrate(&bitrate)) {
+    if (mCachedSource != NULL && getBitrate(&bitrate) && (bitrate > 0)) {
         status_t finalStatus;
         size_t cachedDataRemaining = mCachedSource->approxDataRemaining(&finalStatus);
         *durationUs = cachedDataRemaining * 8000000ll / bitrate;
@@ -842,6 +860,13 @@
 
         pause_l(true /* at eos */);
 
+        // If audio hasn't completed MEDIA_SEEK_COMPLETE yet,
+        // notify MEDIA_SEEK_COMPLETE to observer immediately for state persistence.
+        if (mWatchForAudioSeekComplete) {
+            notifyListener_l(MEDIA_SEEK_COMPLETE);
+            mWatchForAudioSeekComplete = false;
+        }
+
         modifyFlags(AT_EOS, SET);
     }
 }
@@ -883,41 +908,42 @@
 
     if (mAudioSource != NULL) {
         if (mAudioPlayer == NULL) {
-            if (mAudioSink != NULL) {
-                bool allowDeepBuffering;
-                int64_t cachedDurationUs;
-                bool eos;
-                if (mVideoSource == NULL
-                        && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
-                        (getCachedDuration_l(&cachedDurationUs, &eos) &&
-                        cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
-                    allowDeepBuffering = true;
-                } else {
-                    allowDeepBuffering = false;
-                }
-
-                mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
-                mAudioPlayer->setSource(mAudioSource);
-
-                mTimeSource = mAudioPlayer;
-
-                // If there was a seek request before we ever started,
-                // honor the request now.
-                // Make sure to do this before starting the audio player
-                // to avoid a race condition.
-                seekAudioIfNecessary_l();
-            }
+            createAudioPlayer_l();
         }
 
         CHECK(!(mFlags & AUDIO_RUNNING));
 
         if (mVideoSource == NULL) {
+
             // We don't want to post an error notification at this point,
             // the error returned from MediaPlayer::start() will suffice.
 
             status_t err = startAudioPlayer_l(
                     false /* sendErrorNotification */);
 
+            if ((err != OK) && mOffloadAudio) {
+                ALOGI("play_l() cannot create offload output, fallback to sw decode");
+                delete mAudioPlayer;
+                mAudioPlayer = NULL;
+                // if the player was started it will take care of stopping the source when destroyed
+                if (!(mFlags & AUDIOPLAYER_STARTED)) {
+                    mAudioSource->stop();
+                }
+                modifyFlags((AUDIO_RUNNING | AUDIOPLAYER_STARTED), CLEAR);
+                mOffloadAudio = false;
+                mAudioSource = mOmxSource;
+                if (mAudioSource != NULL) {
+                    err = mAudioSource->start();
+
+                    if (err != OK) {
+                        mAudioSource.clear();
+                    } else {
+                        createAudioPlayer_l();
+                        err = startAudioPlayer_l(false);
+                    }
+                }
+            }
+
             if (err != OK) {
                 delete mAudioPlayer;
                 mAudioPlayer = NULL;
@@ -966,19 +992,58 @@
     return OK;
 }
 
+void AwesomePlayer::createAudioPlayer_l()
+{
+    uint32_t flags = 0;
+    int64_t cachedDurationUs;
+    bool eos;
+
+    if (mOffloadAudio) {
+        flags |= AudioPlayer::USE_OFFLOAD;
+    } else if (mVideoSource == NULL
+            && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
+            (getCachedDuration_l(&cachedDurationUs, &eos) &&
+            cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
+        flags |= AudioPlayer::ALLOW_DEEP_BUFFERING;
+    }
+    if (isStreamingHTTP()) {
+        flags |= AudioPlayer::IS_STREAMING;
+    }
+    if (mVideoSource != NULL) {
+        flags |= AudioPlayer::HAS_VIDEO;
+    }
+
+    mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);
+    mAudioPlayer->setSource(mAudioSource);
+
+    mTimeSource = mAudioPlayer;
+
+    // If there was a seek request before we ever started,
+    // honor the request now.
+    // Make sure to do this before starting the audio player
+    // to avoid a race condition.
+    seekAudioIfNecessary_l();
+}
+
 status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {
     CHECK(!(mFlags & AUDIO_RUNNING));
+    status_t err = OK;
 
     if (mAudioSource == NULL || mAudioPlayer == NULL) {
         return OK;
     }
 
+    if (mOffloadAudio) {
+        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
+        mAudioTearDownEventPending = false;
+    }
+
     if (!(mFlags & AUDIOPLAYER_STARTED)) {
         bool wasSeeking = mAudioPlayer->isSeeking();
 
         // We've already started the MediaSource in order to enable
         // the prefetcher to read its data.
-        status_t err = mAudioPlayer->start(
+        err = mAudioPlayer->start(
                 true /* sourceAlreadyStarted */);
 
         if (err != OK) {
@@ -998,14 +1063,16 @@
             postAudioSeekComplete();
         }
     } else {
-        mAudioPlayer->resume();
+        err = mAudioPlayer->resume();
     }
 
-    modifyFlags(AUDIO_RUNNING, SET);
+    if (err == OK) {
+        modifyFlags(AUDIO_RUNNING, SET);
 
-    mWatchForAudioEOS = true;
+        mWatchForAudioEOS = true;
+    }
 
-    return OK;
+    return err;
 }
 
 void AwesomePlayer::notifyVideoSize_l() {
@@ -1137,15 +1204,14 @@
     cancelPlayerEvents(true /* keepNotifications */);
 
     if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
-        if (at_eos) {
-            // If we played the audio stream to completion we
-            // want to make sure that all samples remaining in the audio
-            // track's queue are played out.
-            mAudioPlayer->pause(true /* playPendingSamples */);
-        } else {
-            mAudioPlayer->pause();
+        // If we played the audio stream to completion we
+        // want to make sure that all samples remaining in the audio
+        // track's queue are played out.
+        mAudioPlayer->pause(at_eos /* playPendingSamples */);
+        // send us a reminder to tear down the AudioPlayer if paused for too long.
+        if (mOffloadAudio) {
+            postAudioTearDownEvent(kOffloadPauseMaxUs);
         }
-
         modifyFlags(AUDIO_RUNNING, CLEAR);
     }
 
@@ -1290,7 +1356,6 @@
     } else {
         *positionUs = 0;
     }
-
     return OK;
 }
 
@@ -1385,14 +1450,29 @@
 
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
+    // Check whether there is a hardware codec for this stream
+    // This doesn't guarantee that the hardware has a free stream
+    // but it avoids us attempting to open (and re-open) an offload
+    // stream to hardware that doesn't have the necessary codec
+    mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL), isStreamingHTTP());
 
     if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+        ALOGV("createAudioPlayer: bypass OMX (raw)");
         mAudioSource = mAudioTrack;
     } else {
-        mAudioSource = OMXCodec::Create(
+        // If offloading we still create a OMX decoder as a fall-back
+        // but we don't start it
+        mOmxSource = OMXCodec::Create(
                 mClient.interface(), mAudioTrack->getFormat(),
                 false, // createEncoder
                 mAudioTrack);
+
+        if (mOffloadAudio) {
+            ALOGV("createAudioPlayer: bypass OMX (offload)");
+            mAudioSource = mAudioTrack;
+        } else {
+            mAudioSource = mOmxSource;
+        }
     }
 
     if (mAudioSource != NULL) {
@@ -1408,6 +1488,7 @@
 
         if (err != OK) {
             mAudioSource.clear();
+            mOmxSource.clear();
             return err;
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
@@ -1885,6 +1966,15 @@
     mQueue.postEventWithDelay(mCheckAudioStatusEvent, delayUs);
 }
 
+void AwesomePlayer::postAudioTearDownEvent(int64_t delayUs) {
+    Mutex::Autolock autoLock(mAudioLock);
+    if (mAudioTearDownEventPending) {
+        return;
+    }
+    mAudioTearDownEventPending = true;
+    mQueue.postEventWithDelay(mAudioTearDownEvent, delayUs);
+}
+
 void AwesomePlayer::onCheckAudioStatus() {
     {
         Mutex::Autolock autoLock(mAudioLock);
@@ -2200,7 +2290,10 @@
 
 void AwesomePlayer::onPrepareAsyncEvent() {
     Mutex::Autolock autoLock(mLock);
+    beginPrepareAsync_l();
+}
 
+void AwesomePlayer::beginPrepareAsync_l() {
     if (mFlags & PREPARE_CANCELLED) {
         ALOGI("prepare was cancelled before doing anything");
         abortPrepare(UNKNOWN_ERROR);
@@ -2273,6 +2366,10 @@
     postCheckAudioStatusEvent(0);
 }
 
+void AwesomePlayer::postAudioTearDown() {
+    postAudioTearDownEvent(0);
+}
+
 status_t AwesomePlayer::setParameter(int key, const Parcel &request) {
     switch (key) {
         case KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS:
@@ -2404,6 +2501,7 @@
         mAudioSource->stop();
     }
     mAudioSource.clear();
+    mOmxSource.clear();
 
     mTimeSource = NULL;
 
@@ -2660,4 +2758,66 @@
     }
 }
 
+void AwesomePlayer::onAudioTearDownEvent() {
+
+    Mutex::Autolock autoLock(mLock);
+    if (!mAudioTearDownEventPending) {
+        return;
+    }
+    mAudioTearDownEventPending = false;
+
+    ALOGV("onAudioTearDownEvent");
+
+    // stream info is cleared by reset_l() so copy what we need
+    const bool wasPlaying = (mFlags & PLAYING);
+    KeyedVector<String8, String8> uriHeaders(mUriHeaders);
+    sp<DataSource> fileSource(mFileSource);
+
+    mStatsLock.lock();
+    String8 uri(mStats.mURI);
+    mStatsLock.unlock();
+
+    // get current position so we can start recreated stream from here
+    int64_t position = 0;
+    getPosition(&position);
+
+    // Reset and recreate
+    reset_l();
+    mFlags |= PREPARING;
+
+    status_t err;
+
+    if (fileSource != NULL) {
+        mFileSource = fileSource;
+        err = setDataSource_l(fileSource);
+    } else {
+        err = setDataSource_l(uri, &uriHeaders);
+    }
+
+    if ( err != OK ) {
+        // This will force beingPrepareAsync_l() to notify
+        // a MEDIA_ERROR to the client and abort the prepare
+        mFlags |= PREPARE_CANCELLED;
+    }
+
+    mAudioTearDown = true;
+    mIsAsyncPrepare = true;
+
+    // Call parepare for the host decoding
+    beginPrepareAsync_l();
+
+    if (mPrepareResult == OK) {
+        if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
+            seekTo_l(position);
+        }
+
+        if (wasPlaying) {
+            modifyFlags(CACHE_UNDERRUN, CLEAR);
+            play_l();
+        }
+    }
+
+    mAudioTearDown = false;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index d2cc6c2..5fa4b6f 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -30,6 +30,8 @@
 #include <cutils/properties.h>
 #include <cutils/qtaguid.h>
 
+#include <ConnectivityManager.h>
+
 namespace android {
 
 HTTPBase::HTTPBase()
@@ -164,4 +166,14 @@
     }
 }
 
+// static
+void HTTPBase::RegisterSocketUserMark(int sockfd, uid_t uid) {
+    ConnectivityManager::markSocketAsUser(sockfd, uid);
+}
+
+// static
+void HTTPBase::UnRegisterSocketUserMark(int sockfd) {
+    RegisterSocketUserMark(sockfd, geteuid());
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 42a9c7a..ad985ee 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -1924,13 +1924,13 @@
         mtime = U64_AT(&buffer[12]);
         id = U32_AT(&buffer[20]);
         duration = U64_AT(&buffer[28]);
-    } else {
-        CHECK_EQ((unsigned)version, 0u);
-
+    } else if (version == 0) {
         ctime = U32_AT(&buffer[4]);
         mtime = U32_AT(&buffer[8]);
         id = U32_AT(&buffer[12]);
         duration = U32_AT(&buffer[20]);
+    } else {
+        return ERROR_UNSUPPORTED;
     }
 
     mLastTrack->meta->setInt32(kKeyTrackID, id);
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp
index a01ec97..ae6ae2d 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/MetaData.cpp
@@ -282,6 +282,7 @@
     if (!usesReservoir()) {
         if (u.ext_data) {
             free(u.ext_data);
+            u.ext_data = NULL;
         }
     }
 
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 1822f07..810d88f 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -113,6 +113,13 @@
             const char *parameter_name,
             OMX_INDEXTYPE *index);
 
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *data,
+            size_t size);
+
 private:
     mutable Mutex mLock;
 
@@ -331,6 +338,15 @@
     return getOMX(node)->getExtensionIndex(node, parameter_name, index);
 }
 
+status_t MuxOMX::setInternalOption(
+        node_id node,
+        OMX_U32 port_index,
+        InternalOptionType type,
+        const void *data,
+        size_t size) {
+    return getOMX(node)->setInternalOption(node, port_index, type, data, size);
+}
+
 OMXClient::OMXClient() {
 }
 
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 71b6569..befd4cc 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -21,7 +21,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <OMX_IVCommon.h>
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
 
 #include <ui/GraphicBuffer.h>
 #include <gui/ISurfaceComposer.h>
@@ -54,9 +54,8 @@
         ALOGE("Invalid dimensions %dx%d", bufferWidth, bufferHeight);
     }
 
-    mBufferQueue = new BufferQueue(true);
+    mBufferQueue = new BufferQueue();
     mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mBufferQueue->setSynchronousMode(true);
     mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
             GRALLOC_USAGE_HW_TEXTURE);
 
@@ -71,7 +70,7 @@
     listener = static_cast<BufferQueue::ConsumerListener*>(this);
     proxy = new BufferQueue::ProxyConsumerListener(listener);
 
-    status_t err = mBufferQueue->consumerConnect(proxy);
+    status_t err = mBufferQueue->consumerConnect(proxy, false);
     if (err != NO_ERROR) {
         ALOGE("SurfaceMediaSource: error connecting to BufferQueue: %s (%d)",
                 strerror(-err), err);
@@ -293,7 +292,7 @@
     // wait here till the frames come in from the client side
     while (mStarted) {
 
-        status_t err = mBufferQueue->acquireBuffer(&item);
+        status_t err = mBufferQueue->acquireBuffer(&item, 0);
         if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
             // wait for a buffer to be queued
             mFrameAvailableCondition.wait(mMutex);
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index b0df379..4db8e80 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -26,7 +26,12 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/AudioSystem.h>
+#include <media/MediaPlayerInterface.h>
+#include <hardware/audio.h>
 #include <media/stagefright/Utils.h>
+#include <media/AudioParameter.h>
 
 namespace android {
 
@@ -471,5 +476,132 @@
     return ua;
 }
 
+status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink,
+                           const sp<MetaData>& meta)
+{
+    int32_t sampleRate = 0;
+    int32_t bitRate = 0;
+    int32_t channelMask = 0;
+    int32_t delaySamples = 0;
+    int32_t paddingSamples = 0;
+
+    AudioParameter param = AudioParameter();
+
+    if (meta->findInt32(kKeySampleRate, &sampleRate)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_SAMPLE_RATE), sampleRate);
+    }
+    if (meta->findInt32(kKeyChannelMask, &channelMask)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_NUM_CHANNEL), channelMask);
+    }
+    if (meta->findInt32(kKeyBitRate, &bitRate)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE), bitRate);
+    }
+    if (meta->findInt32(kKeyEncoderDelay, &delaySamples)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES), delaySamples);
+    }
+    if (meta->findInt32(kKeyEncoderPadding, &paddingSamples)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES), paddingSamples);
+    }
+
+    ALOGV("sendMetaDataToHal: bitRate %d, sampleRate %d, chanMask %d,"
+          "delaySample %d, paddingSample %d", bitRate, sampleRate,
+          channelMask, delaySamples, paddingSamples);
+
+    sink->setParameters(param.toString());
+    return OK;
+}
+
+struct mime_conv_t {
+    const char* mime;
+    audio_format_t format;
+};
+
+static const struct mime_conv_t mimeLookup[] = {
+    { MEDIA_MIMETYPE_AUDIO_MPEG,        AUDIO_FORMAT_MP3 },
+    { MEDIA_MIMETYPE_AUDIO_RAW,         AUDIO_FORMAT_PCM_16_BIT },
+    { MEDIA_MIMETYPE_AUDIO_AMR_NB,      AUDIO_FORMAT_AMR_NB },
+    { MEDIA_MIMETYPE_AUDIO_AMR_WB,      AUDIO_FORMAT_AMR_WB },
+    { MEDIA_MIMETYPE_AUDIO_AAC,         AUDIO_FORMAT_AAC },
+    { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
+    { 0, AUDIO_FORMAT_INVALID }
+};
+
+status_t mapMimeToAudioFormat( audio_format_t& format, const char* mime )
+{
+const struct mime_conv_t* p = &mimeLookup[0];
+    while (p->mime != NULL) {
+        if (0 == strcasecmp(mime, p->mime)) {
+            format = p->format;
+            return OK;
+        }
+        ++p;
+    }
+
+    return BAD_VALUE;
+}
+
+bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming)
+{
+    const char *mime;
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+    audio_offload_info_t info = AUDIO_INFO_INITIALIZER;
+
+    info.format = AUDIO_FORMAT_INVALID;
+    if (mapMimeToAudioFormat(info.format, mime) != OK) {
+        ALOGE(" Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format !", mime);
+        return false;
+    } else {
+        ALOGV("Mime type \"%s\" mapped to audio_format %d", mime, info.format);
+    }
+
+    if (AUDIO_FORMAT_INVALID == info.format) {
+        // can't offload if we don't know what the source format is
+        ALOGE("mime type \"%s\" not a known audio format", mime);
+        return false;
+    }
+
+    int32_t srate = -1;
+    if (!meta->findInt32(kKeySampleRate, &srate)) {
+        ALOGV("track of type '%s' does not publish sample rate", mime);
+    }
+    info.sample_rate = srate;
+
+    int32_t cmask = 0;
+    if (!meta->findInt32(kKeyChannelMask, &cmask)) {
+        ALOGV("track of type '%s' does not publish channel mask", mime);
+
+        // Try a channel count instead
+        int32_t channelCount;
+        if (!meta->findInt32(kKeyChannelCount, &channelCount)) {
+            ALOGV("track of type '%s' does not publish channel count", mime);
+        } else {
+            cmask = audio_channel_out_mask_from_count(channelCount);
+        }
+    }
+    info.channel_mask = cmask;
+
+    int64_t duration = 0;
+    if (!meta->findInt64(kKeyDuration, &duration)) {
+        ALOGV("track of type '%s' does not publish duration", mime);
+    }
+    info.duration_us = duration;
+
+    int32_t brate = -1;
+    if (!meta->findInt32(kKeyBitRate, &brate)) {
+        ALOGV("track of type '%s' does not publish bitrate", mime);
+     }
+    info.bit_rate = brate;
+
+
+    info.stream_type = AUDIO_STREAM_MUSIC;
+    info.has_video = hasVideo;
+    info.is_streaming = isStreaming;
+
+    // Check if offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming
+    return AudioSystem::isOffloadSupported(info);
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/chromium_http/support.cpp b/media/libstagefright/chromium_http/support.cpp
index 741cb1d..0a8e3e3 100644
--- a/media/libstagefright/chromium_http/support.cpp
+++ b/media/libstagefright/chromium_http/support.cpp
@@ -150,7 +150,7 @@
 }
 
 net::NetLog::LogLevel SfNetLog::GetLogLevel() const {
-    return LOG_ALL;
+    return LOG_BASIC;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index a92d376..4060a0a 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -12,11 +12,16 @@
         frameworks/av/media/libstagefright/include \
         frameworks/native/include/media/openmax \
 
+ifeq ($(TARGET_DEVICE), manta)
+    LOCAL_CFLAGS += -DSURFACE_IS_BGR32
+endif
+
 LOCAL_STATIC_LIBRARIES := \
         libvpx
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+        libhardware \
 
 LOCAL_MODULE := libstagefright_soft_vpxenc
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 74d6df5..5f2b5c8 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -20,6 +20,8 @@
 
 #include <utils/Log.h>
 
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaDefs.h>
 
@@ -81,6 +83,52 @@
     }
 }
 
+static void ConvertRGB32ToPlanar(
+        const uint8_t *src, uint8_t *dstY, int32_t width, int32_t height) {
+    CHECK((width & 1) == 0);
+    CHECK((height & 1) == 0);
+
+    uint8_t *dstU = dstY + width * height;
+    uint8_t *dstV = dstU + (width / 2) * (height / 2);
+
+    for (int32_t y = 0; y < height; ++y) {
+        for (int32_t x = 0; x < width; ++x) {
+#ifdef SURFACE_IS_BGR32
+            unsigned blue = src[4 * x];
+            unsigned green = src[4 * x + 1];
+            unsigned red= src[4 * x + 2];
+#else
+            unsigned red= src[4 * x];
+            unsigned green = src[4 * x + 1];
+            unsigned blue = src[4 * x + 2];
+#endif
+
+            unsigned luma =
+                ((red * 66 + green * 129 + blue * 25) >> 8) + 16;
+
+            dstY[x] = luma;
+
+            if ((x & 1) == 0 && (y & 1) == 0) {
+                unsigned U =
+                    ((-red * 38 - green * 74 + blue * 112) >> 8) + 128;
+
+                unsigned V =
+                    ((red * 112 - green * 94 - blue * 18) >> 8) + 128;
+
+                dstU[x / 2] = U;
+                dstV[x / 2] = V;
+            }
+        }
+
+        if ((y & 1) == 0) {
+            dstU += width / 2;
+            dstV += width / 2;
+        }
+
+        src += 4 * width;
+        dstY += width;
+    }
+}
 
 SoftVPXEncoder::SoftVPXEncoder(const char *name,
                                const OMX_CALLBACKTYPE *callbacks,
@@ -99,8 +147,10 @@
       mErrorResilience(OMX_FALSE),
       mColorFormat(OMX_COLOR_FormatYUV420Planar),
       mLevel(OMX_VIDEO_VP8Level_Version0),
-      mConversionBuffer(NULL) {
-
+      mConversionBuffer(NULL),
+      mInputDataIsMeta(false),
+      mGrallocModule(NULL),
+      mKeyFrameRequested(false) {
     initPorts();
 }
 
@@ -247,7 +297,7 @@
         return UNKNOWN_ERROR;
     }
 
-    if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+    if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || mInputDataIsMeta) {
         if (mConversionBuffer == NULL) {
             mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
             if (mConversionBuffer == NULL) {
@@ -427,9 +477,17 @@
                 (const OMX_VIDEO_PARAM_BITRATETYPE *)param);
 
         case OMX_IndexParamPortDefinition:
-            return internalSetPortParams(
+        {
+            OMX_ERRORTYPE err = internalSetPortParams(
                 (const OMX_PARAM_PORTDEFINITIONTYPE *)param);
 
+            if (err != OMX_ErrorNone) {
+                return err;
+            }
+
+            return SimpleSoftOMXComponent::internalSetParameter(index, param);
+        }
+
         case OMX_IndexParamVideoPortFormat:
             return internalSetFormatParams(
                 (const OMX_VIDEO_PARAM_PORTFORMATTYPE *)param);
@@ -442,11 +500,47 @@
             return internalSetProfileLevel(
                 (const OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param);
 
+        case OMX_IndexVendorStartUnused:
+        {
+            // storeMetaDataInBuffers
+            const StoreMetaDataInBuffersParams *storeParam =
+                (const StoreMetaDataInBuffersParams *)param;
+
+            if (storeParam->nPortIndex != kInputPortIndex) {
+                return OMX_ErrorBadPortIndex;
+            }
+
+            mInputDataIsMeta = (storeParam->bStoreMetaData == OMX_TRUE);
+
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalSetParameter(index, param);
     }
 }
 
+OMX_ERRORTYPE SoftVPXEncoder::setConfig(
+        OMX_INDEXTYPE index, const OMX_PTR _params) {
+    switch (index) {
+        case OMX_IndexConfigVideoIntraVOPRefresh:
+        {
+            OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
+                (OMX_CONFIG_INTRAREFRESHVOPTYPE *)_params;
+
+            if (params->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorBadPortIndex;
+            }
+
+            mKeyFrameRequested = params->IntraRefreshVOP;
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::setConfig(index, _params);
+    }
+}
+
 OMX_ERRORTYPE SoftVPXEncoder::internalSetProfileLevel(
         const OMX_VIDEO_PARAM_PROFILELEVELTYPE* profileAndLevel) {
     if (profileAndLevel->nPortIndex != kOutputPortIndex) {
@@ -507,6 +601,10 @@
             format->eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
             format->eColorFormat == OMX_COLOR_FormatAndroidOpaque) {
             mColorFormat = format->eColorFormat;
+
+            OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+            def->format.video.eColorFormat = mColorFormat;
+
             return OMX_ErrorNone;
         } else {
             ALOGE("Unsupported color format %i", format->eColorFormat);
@@ -552,11 +650,17 @@
         if (port->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar ||
             port->format.video.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
             port->format.video.eColorFormat == OMX_COLOR_FormatAndroidOpaque) {
-                mColorFormat = port->format.video.eColorFormat;
+            mColorFormat = port->format.video.eColorFormat;
         } else {
             return OMX_ErrorUnsupportedSetting;
         }
 
+        OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+        def->format.video.nFrameWidth = mWidth;
+        def->format.video.nFrameHeight = mHeight;
+        def->format.video.xFramerate = port->format.video.xFramerate;
+        def->format.video.eColorFormat = mColorFormat;
+
         return OMX_ErrorNone;
     } else if (port->nPortIndex == kOutputPortIndex) {
         mBitrate = port->format.video.nBitrate;
@@ -625,24 +729,63 @@
             return;
         }
 
-        uint8_t* source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
+        uint8_t *source =
+            inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
 
-        // NOTE: As much as nothing is known about color format
-        // when it is denoted as AndroidOpaque, it is at least
-        // assumed to be planar.
-        if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
-            ConvertSemiPlanarToPlanar(source, mConversionBuffer, mWidth, mHeight);
+        if (mInputDataIsMeta) {
+            CHECK_GE(inputBufferHeader->nFilledLen,
+                     4 + sizeof(buffer_handle_t));
+
+            uint32_t bufferType = *(uint32_t *)source;
+            CHECK_EQ(bufferType, kMetadataBufferTypeGrallocSource);
+
+            if (mGrallocModule == NULL) {
+                CHECK_EQ(0, hw_get_module(
+                            GRALLOC_HARDWARE_MODULE_ID, &mGrallocModule));
+            }
+
+            const gralloc_module_t *grmodule =
+                (const gralloc_module_t *)mGrallocModule;
+
+            buffer_handle_t handle = *(buffer_handle_t *)(source + 4);
+
+            void *bits;
+            CHECK_EQ(0,
+                     grmodule->lock(
+                         grmodule, handle,
+                         GRALLOC_USAGE_SW_READ_OFTEN
+                            | GRALLOC_USAGE_SW_WRITE_NEVER,
+                         0, 0, mWidth, mHeight, &bits));
+
+            ConvertRGB32ToPlanar(
+                    (const uint8_t *)bits, mConversionBuffer, mWidth, mHeight);
+
+            source = mConversionBuffer;
+
+            CHECK_EQ(0, grmodule->unlock(grmodule, handle));
+        } else if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+            ConvertSemiPlanarToPlanar(
+                    source, mConversionBuffer, mWidth, mHeight);
+
             source = mConversionBuffer;
         }
         vpx_image_t raw_frame;
         vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
                      kInputBufferAlignment, source);
-        codec_return = vpx_codec_encode(mCodecContext,
-                                        &raw_frame,
-                                        inputBufferHeader->nTimeStamp,  // in timebase units
-                                        mFrameDurationUs,  // frame duration in timebase units
-                                        0,  // frame flags
-                                        VPX_DL_REALTIME);  // encoding deadline
+
+        vpx_enc_frame_flags_t flags = 0;
+        if (mKeyFrameRequested) {
+            flags |= VPX_EFLAG_FORCE_KF;
+            mKeyFrameRequested = false;
+        }
+
+        codec_return = vpx_codec_encode(
+                mCodecContext,
+                &raw_frame,
+                inputBufferHeader->nTimeStamp,  // in timebase units
+                mFrameDurationUs,  // frame duration in timebase units
+                flags,  // frame flags
+                VPX_DL_REALTIME);  // encoding deadline
         if (codec_return != VPX_CODEC_OK) {
             ALOGE("vpx encoder failed to encode frame");
             notify(OMX_EventError,
@@ -676,6 +819,17 @@
         notifyEmptyBufferDone(inputBufferHeader);
     }
 }
+
+OMX_ERRORTYPE SoftVPXEncoder::getExtensionIndex(
+        const char *name, OMX_INDEXTYPE *index) {
+    if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers")) {
+        *index = OMX_IndexVendorStartUnused;
+        return OMX_ErrorNone;
+    }
+
+    return SimpleSoftOMXComponent::getExtensionIndex(name, index);
+}
+
 }  // namespace android
 
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index a0a8ee6..4ee5e51 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -23,6 +23,8 @@
 #include <OMX_VideoExt.h>
 #include <OMX_IndexExt.h>
 
+#include <hardware/gralloc.h>
+
 #include "vpx/vpx_encoder.h"
 #include "vpx/vpx_codec.h"
 #include "vpx/vp8cx.h"
@@ -57,14 +59,13 @@
 //    - OMX timestamps are in microseconds, therefore
 // encoder timebase is fixed to 1/1000000
 
-class SoftVPXEncoder : public SimpleSoftOMXComponent {
- public:
+struct SoftVPXEncoder : public SimpleSoftOMXComponent {
     SoftVPXEncoder(const char *name,
                    const OMX_CALLBACKTYPE *callbacks,
                    OMX_PTR appData,
                    OMX_COMPONENTTYPE **component);
 
- protected:
+protected:
     virtual ~SoftVPXEncoder();
 
     // Returns current values for requested OMX
@@ -77,13 +78,19 @@
     virtual OMX_ERRORTYPE internalSetParameter(
             OMX_INDEXTYPE index, const OMX_PTR param);
 
+    virtual OMX_ERRORTYPE setConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
     // OMX callback when buffers available
     // Note that both an input and output buffer
     // is expected to be available to carry out
     // encoding of the frame
     virtual void onQueueFilled(OMX_U32 portIndex);
 
- private:
+    virtual OMX_ERRORTYPE getExtensionIndex(
+            const char *name, OMX_INDEXTYPE *index);
+
+private:
     // number of buffers allocated per port
     static const uint32_t kNumBuffers = 4;
 
@@ -156,6 +163,11 @@
     // indeed YUV420SemiPlanar.
     uint8_t* mConversionBuffer;
 
+    bool mInputDataIsMeta;
+    const hw_module_t *mGrallocModule;
+
+    bool mKeyFrameRequested;
+
     // Initializes input and output OMX ports with sensible
     // default values.
     void initPorts();
diff --git a/media/libstagefright/wifi-display/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
similarity index 85%
rename from media/libstagefright/wifi-display/ANetworkSession.cpp
rename to media/libstagefright/foundation/ANetworkSession.cpp
index 938d601..e629588 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -34,10 +34,21 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/Utils.h>
 
 namespace android {
 
+static uint16_t U16_AT(const uint8_t *ptr) {
+    return ptr[0] << 8 | ptr[1];
+}
+
+static uint32_t U32_AT(const uint8_t *ptr) {
+    return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
+}
+
+static uint64_t U64_AT(const uint8_t *ptr) {
+    return ((uint64_t)U32_AT(ptr)) << 32 | U32_AT(ptr + 4);
+}
+
 static const size_t kMaxUDPSize = 1500;
 static const int32_t kMaxUDPRetries = 200;
 
@@ -56,6 +67,12 @@
 };
 
 struct ANetworkSession::Session : public RefBase {
+    enum Mode {
+        MODE_RTSP,
+        MODE_DATAGRAM,
+        MODE_WEBSOCKET,
+    };
+
     enum State {
         CONNECTING,
         CONNECTED,
@@ -85,7 +102,9 @@
     status_t sendRequest(
             const void *data, ssize_t size, bool timeValid, int64_t timeUs);
 
-    void setIsRTSPConnection(bool yesno);
+    void setMode(Mode mode);
+
+    status_t switchToWebSocketMode();
 
 protected:
     virtual ~Session();
@@ -102,7 +121,7 @@
 
     int32_t mSessionID;
     State mState;
-    bool mIsRTSPConnection;
+    Mode mMode;
     int mSocket;
     sp<AMessage> mNotify;
     bool mSawReceiveFailure, mSawSendFailure;
@@ -145,7 +164,7 @@
         const sp<AMessage> &notify)
     : mSessionID(sessionID),
       mState(state),
-      mIsRTSPConnection(false),
+      mMode(MODE_DATAGRAM),
       mSocket(s),
       mNotify(notify),
       mSawReceiveFailure(false),
@@ -209,8 +228,18 @@
     return mSocket;
 }
 
-void ANetworkSession::Session::setIsRTSPConnection(bool yesno) {
-    mIsRTSPConnection = yesno;
+void ANetworkSession::Session::setMode(Mode mode) {
+    mMode = mode;
+}
+
+status_t ANetworkSession::Session::switchToWebSocketMode() {
+    if (mState != CONNECTED || mMode != MODE_RTSP) {
+        return INVALID_OPERATION;
+    }
+
+    mMode = MODE_WEBSOCKET;
+
+    return OK;
 }
 
 sp<AMessage> ANetworkSession::Session::getNotificationMessage() const {
@@ -238,6 +267,8 @@
 
 status_t ANetworkSession::Session::readMore() {
     if (mState == DATAGRAM) {
+        CHECK_EQ(mMode, MODE_DATAGRAM);
+
         status_t err;
         do {
             sp<ABuffer> buf = new ABuffer(kMaxUDPSize);
@@ -326,7 +357,7 @@
         err = -ECONNRESET;
     }
 
-    if (!mIsRTSPConnection) {
+    if (mMode == MODE_DATAGRAM) {
         // TCP stream carrying 16-bit length-prefixed datagrams.
 
         while (mInBuffer.size() >= 2) {
@@ -350,7 +381,7 @@
 
             mInBuffer.erase(0, packetSize + 2);
         }
-    } else {
+    } else if (mMode == MODE_RTSP) {
         for (;;) {
             size_t length;
 
@@ -417,6 +448,69 @@
                 break;
             }
         }
+    } else {
+        CHECK_EQ(mMode, MODE_WEBSOCKET);
+
+        const uint8_t *data = (const uint8_t *)mInBuffer.c_str();
+        // hexdump(data, mInBuffer.size());
+
+        while (mInBuffer.size() >= 2) {
+            size_t offset = 2;
+
+            unsigned payloadLen = data[1] & 0x7f;
+            if (payloadLen == 126) {
+                if (offset + 2 > mInBuffer.size()) {
+                    break;
+                }
+
+                payloadLen = U16_AT(&data[offset]);
+                offset += 2;
+            } else if (payloadLen == 127) {
+                if (offset + 8 > mInBuffer.size()) {
+                    break;
+                }
+
+                payloadLen = U64_AT(&data[offset]);
+                offset += 8;
+            }
+
+            uint32_t mask = 0;
+            if (data[1] & 0x80) {
+                // MASK==1
+                if (offset + 4 > mInBuffer.size()) {
+                    break;
+                }
+
+                mask = U32_AT(&data[offset]);
+                offset += 4;
+            }
+
+            if (offset + payloadLen > mInBuffer.size()) {
+                break;
+            }
+
+            // We have the full message.
+
+            sp<ABuffer> packet = new ABuffer(payloadLen);
+            memcpy(packet->data(), &data[offset], payloadLen);
+
+            if (mask != 0) {
+                for (size_t i = 0; i < payloadLen; ++i) {
+                    packet->data()[i] =
+                        data[offset + i]
+                            ^ ((mask >> (8 * (3 - (i % 4)))) & 0xff);
+                }
+            }
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("sessionID", mSessionID);
+            notify->setInt32("reason", kWhatWebSocketMessage);
+            notify->setBuffer("data", packet);
+            notify->setInt32("headerByte", data[0]);
+            notify->post();
+
+            mInBuffer.erase(0, offset + payloadLen);
+        }
     }
 
     if (err != OK) {
@@ -608,13 +702,61 @@
 
     sp<ABuffer> buffer;
 
-    if (mState == CONNECTED && !mIsRTSPConnection) {
+    if (mState == CONNECTED && mMode == MODE_DATAGRAM) {
         CHECK_LE(size, 65535);
 
         buffer = new ABuffer(size + 2);
         buffer->data()[0] = size >> 8;
         buffer->data()[1] = size & 0xff;
         memcpy(buffer->data() + 2, data, size);
+    } else if (mState == CONNECTED && mMode == MODE_WEBSOCKET) {
+        static const bool kUseMask = false;  // Chromium doesn't like it.
+
+        size_t numHeaderBytes = 2 + (kUseMask ? 4 : 0);
+        if (size > 65535) {
+            numHeaderBytes += 8;
+        } else if (size > 125) {
+            numHeaderBytes += 2;
+        }
+
+        buffer = new ABuffer(numHeaderBytes + size);
+        buffer->data()[0] = 0x81;  // FIN==1 | opcode=1 (text)
+        buffer->data()[1] = kUseMask ? 0x80 : 0x00;
+
+        if (size > 65535) {
+            buffer->data()[1] |= 127;
+            buffer->data()[2] = 0x00;
+            buffer->data()[3] = 0x00;
+            buffer->data()[4] = 0x00;
+            buffer->data()[5] = 0x00;
+            buffer->data()[6] = (size >> 24) & 0xff;
+            buffer->data()[7] = (size >> 16) & 0xff;
+            buffer->data()[8] = (size >> 8) & 0xff;
+            buffer->data()[9] = size & 0xff;
+        } else if (size > 125) {
+            buffer->data()[1] |= 126;
+            buffer->data()[2] = (size >> 8) & 0xff;
+            buffer->data()[3] = size & 0xff;
+        } else {
+            buffer->data()[1] |= size;
+        }
+
+        if (kUseMask) {
+            uint32_t mask = rand();
+
+            buffer->data()[numHeaderBytes - 4] = (mask >> 24) & 0xff;
+            buffer->data()[numHeaderBytes - 3] = (mask >> 16) & 0xff;
+            buffer->data()[numHeaderBytes - 2] = (mask >> 8) & 0xff;
+            buffer->data()[numHeaderBytes - 1] = mask & 0xff;
+
+            for (size_t i = 0; i < (size_t)size; ++i) {
+                buffer->data()[numHeaderBytes + i] =
+                    ((const uint8_t *)data)[i]
+                        ^ ((mask >> (8 * (3 - (i % 4)))) & 0xff);
+            }
+        } else {
+            memcpy(buffer->data() + numHeaderBytes, data, size);
+        }
     } else {
         buffer = new ABuffer(size);
         memcpy(buffer->data(), data, size);
@@ -1001,9 +1143,9 @@
             notify);
 
     if (mode == kModeCreateTCPDatagramSessionActive) {
-        session->setIsRTSPConnection(false);
+        session->setMode(Session::MODE_DATAGRAM);
     } else if (mode == kModeCreateRTSPClient) {
-        session->setIsRTSPConnection(true);
+        session->setMode(Session::MODE_RTSP);
     }
 
     mSessions.add(session->sessionID(), session);
@@ -1080,6 +1222,19 @@
     return err;
 }
 
+status_t ANetworkSession::switchToWebSocketMode(int32_t sessionID) {
+    Mutex::Autolock autoLock(mLock);
+
+    ssize_t index = mSessions.indexOfKey(sessionID);
+
+    if (index < 0) {
+        return -ENOENT;
+    }
+
+    const sp<Session> session = mSessions.valueAt(index);
+    return session->switchToWebSocketMode();
+}
+
 void ANetworkSession::interrupt() {
     static const char dummy = 0;
 
@@ -1213,8 +1368,10 @@
                                         clientSocket,
                                         session->getNotificationMessage());
 
-                            clientSession->setIsRTSPConnection(
-                                    session->isRTSPServer());
+                            clientSession->setMode(
+                                    session->isRTSPServer()
+                                        ? Session::MODE_RTSP
+                                        : Session::MODE_DATAGRAM);
 
                             sessionsToAdd.push_back(clientSession);
                         }
diff --git a/media/libstagefright/foundation/Android.mk b/media/libstagefright/foundation/Android.mk
index d65e213..ad2dab5 100644
--- a/media/libstagefright/foundation/Android.mk
+++ b/media/libstagefright/foundation/Android.mk
@@ -10,7 +10,9 @@
     ALooper.cpp                   \
     ALooperRoster.cpp             \
     AMessage.cpp                  \
+    ANetworkSession.cpp           \
     AString.cpp                   \
+    ParsedMessage.cpp             \
     base64.cpp                    \
     hexdump.cpp
 
diff --git a/media/libstagefright/wifi-display/ParsedMessage.cpp b/media/libstagefright/foundation/ParsedMessage.cpp
similarity index 89%
rename from media/libstagefright/wifi-display/ParsedMessage.cpp
rename to media/libstagefright/foundation/ParsedMessage.cpp
index c0e60c3..049c9ad 100644
--- a/media/libstagefright/wifi-display/ParsedMessage.cpp
+++ b/media/libstagefright/foundation/ParsedMessage.cpp
@@ -19,6 +19,7 @@
 #include <ctype.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
 
 namespace android {
 
@@ -89,6 +90,7 @@
     ssize_t lastDictIndex = -1;
 
     size_t offset = 0;
+    bool headersComplete = false;
     while (offset < size) {
         size_t lineEndOffset = offset;
         while (lineEndOffset + 1 < size
@@ -113,6 +115,8 @@
         }
 
         if (lineEndOffset == offset) {
+            // An empty line separates headers from body.
+            headersComplete = true;
             offset += 2;
             break;
         }
@@ -146,12 +150,17 @@
         offset = lineEndOffset + 2;
     }
 
+    if (!headersComplete && (!noMoreData || offset == 0)) {
+        // We either saw the empty line separating headers from body
+        // or we saw at least the status line and know that no more data
+        // is going to follow.
+        return -1;
+    }
+
     for (size_t i = 0; i < mDict.size(); ++i) {
         mDict.editValueAt(i).trim();
     }
 
-    // Found the end of headers.
-
     int32_t contentLength;
     if (!findInt32("content-length", &contentLength) || contentLength < 0) {
         contentLength = 0;
@@ -168,13 +177,17 @@
     return totalLength;
 }
 
-void ParsedMessage::getRequestField(size_t index, AString *field) const {
+bool ParsedMessage::getRequestField(size_t index, AString *field) const {
     AString line;
     CHECK(findString("_", &line));
 
     size_t prevOffset = 0;
     size_t offset = 0;
     for (size_t i = 0; i <= index; ++i) {
+        if (offset >= line.size()) {
+            return false;
+        }
+
         ssize_t spacePos = line.find(" ", offset);
 
         if (spacePos < 0) {
@@ -186,11 +199,16 @@
     }
 
     field->setTo(line, prevOffset, offset - prevOffset - 1);
+
+    return true;
 }
 
 bool ParsedMessage::getStatusCode(int32_t *statusCode) const {
     AString statusCodeString;
-    getRequestField(1, &statusCodeString);
+    if (!getRequestField(1, &statusCodeString)) {
+        *statusCode = 0;
+        return false;
+    }
 
     char *end;
     *statusCode = strtol(statusCodeString.c_str(), &end, 10);
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 2306f31..d3c74e2 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -25,6 +25,7 @@
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/TimeSource.h>
+#include <media/stagefright/MetaData.h>
 #include <utils/threads.h>
 #include <drm/DrmManagerClient.h>
 
@@ -100,7 +101,7 @@
 
     void postAudioEOS(int64_t delayUs = 0ll);
     void postAudioSeekComplete();
-
+    void postAudioTearDown();
     status_t dump(int fd, const Vector<String16> &args) const;
 
 private:
@@ -171,6 +172,7 @@
 
     ssize_t mActiveAudioTrackIndex;
     sp<MediaSource> mAudioTrack;
+    sp<MediaSource> mOmxSource;
     sp<MediaSource> mAudioSource;
     AudioPlayer *mAudioPlayer;
     int64_t mDurationUs;
@@ -211,7 +213,8 @@
     bool mAudioStatusEventPending;
     sp<TimedEventQueue::Event> mVideoLagEvent;
     bool mVideoLagEventPending;
-
+    sp<TimedEventQueue::Event> mAudioTearDownEvent;
+    bool mAudioTearDownEventPending;
     sp<TimedEventQueue::Event> mAsyncPrepareEvent;
     Condition mPreparedCondition;
     bool mIsAsyncPrepare;
@@ -223,6 +226,8 @@
     void postStreamDoneEvent_l(status_t status);
     void postCheckAudioStatusEvent(int64_t delayUs);
     void postVideoLagEvent_l();
+    void postAudioTearDownEvent(int64_t delayUs);
+
     status_t play_l();
 
     MediaBuffer *mVideoBuffer;
@@ -257,6 +262,7 @@
     void setAudioSource(sp<MediaSource> source);
     status_t initAudioDecoder();
 
+
     void setVideoSource(sp<MediaSource> source);
     status_t initVideoDecoder(uint32_t flags = 0);
 
@@ -273,6 +279,9 @@
     void abortPrepare(status_t err);
     void finishAsyncPrepare_l();
     void onVideoLagUpdate();
+    void onAudioTearDownEvent();
+
+    void beginPrepareAsync_l();
 
     bool getCachedDuration_l(int64_t *durationUs, bool *eos);
 
@@ -285,6 +294,7 @@
     void finishSeekIfNecessary(int64_t videoTimeUs);
     void ensureCacheIsFetching_l();
 
+    void createAudioPlayer_l();
     status_t startAudioPlayer_l(bool sendErrorNotification = true);
 
     void shutdownVideoDecoder_l();
@@ -327,6 +337,9 @@
         Vector<TrackStat> mTracks;
     } mStats;
 
+    bool    mOffloadAudio;
+    bool    mAudioTearDown;
+
     status_t setVideoScalingMode(int32_t mode);
     status_t setVideoScalingMode_l(int32_t mode);
     status_t getTrackInfo(Parcel* reply) const;
diff --git a/media/libstagefright/include/ESDS.h b/media/libstagefright/include/ESDS.h
index 3a79951..2f40dae 100644
--- a/media/libstagefright/include/ESDS.h
+++ b/media/libstagefright/include/ESDS.h
@@ -33,6 +33,9 @@
 
     status_t getObjectTypeIndication(uint8_t *objectTypeIndication) const;
     status_t getCodecSpecificInfo(const void **data, size_t *size) const;
+    status_t getCodecSpecificOffset(size_t *offset, size_t *size) const;
+    status_t getBitRate(uint32_t *brateMax, uint32_t *brateAvg) const;
+    status_t getStreamType(uint8_t *streamType) const;
 
 private:
     enum {
@@ -49,6 +52,9 @@
     size_t mDecoderSpecificOffset;
     size_t mDecoderSpecificLength;
     uint8_t mObjectTypeIndication;
+    uint8_t mStreamType;
+    uint32_t mBitRateMax;
+    uint32_t mBitRateAvg;
 
     status_t skipDescriptorHeader(
             size_t offset, size_t size,
diff --git a/media/libstagefright/include/HTTPBase.h b/media/libstagefright/include/HTTPBase.h
index c2dc351..d4b7f9f 100644
--- a/media/libstagefright/include/HTTPBase.h
+++ b/media/libstagefright/include/HTTPBase.h
@@ -59,6 +59,9 @@
     static void RegisterSocketUserTag(int sockfd, uid_t uid, uint32_t kTag);
     static void UnRegisterSocketUserTag(int sockfd);
 
+    static void RegisterSocketUserMark(int sockfd, uid_t uid);
+    static void UnRegisterSocketUserMark(int sockfd);
+
 protected:
     void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
 
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 24b8d98..7fed7d4 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -109,6 +109,13 @@
             const char *parameter_name,
             OMX_INDEXTYPE *index);
 
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *data,
+            size_t size);
+
     virtual void binderDied(const wp<IBinder> &the_late_who);
 
     OMX_ERRORTYPE OnEvent(
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 67aba6b..f6ae376 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -96,6 +96,12 @@
     status_t getExtensionIndex(
             const char *parameterName, OMX_INDEXTYPE *index);
 
+    status_t setInternalOption(
+            OMX_U32 portIndex,
+            IOMX::InternalOptionType type,
+            const void *data,
+            size_t size);
+
     void onMessage(const omx_message &msg);
     void onObserverDied(OMXMaster *master);
     void onGetHandleFailed();
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index b3a8463..d6fd95b 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -18,12 +18,12 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include <GraphicBufferSource.h>
+#include "GraphicBufferSource.h"
 
 #include <OMX_Core.h>
 #include <media/stagefright/foundation/ADebug.h>
 
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
 #include <ui/GraphicBuffer.h>
 
 namespace android {
@@ -36,6 +36,7 @@
     mInitCheck(UNKNOWN_ERROR),
     mNodeInstance(nodeInstance),
     mExecuting(false),
+    mSuspended(false),
     mNumFramesAvailable(0),
     mEndOfStream(false),
     mEndOfStreamSent(false) {
@@ -51,10 +52,9 @@
 
     String8 name("GraphicBufferSource");
 
-    mBufferQueue = new BufferQueue(true);
+    mBufferQueue = new BufferQueue();
     mBufferQueue->setConsumerName(name);
     mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mBufferQueue->setSynchronousMode(true);
     mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
             GRALLOC_USAGE_HW_TEXTURE);
 
@@ -75,7 +75,7 @@
     sp<BufferQueue::ConsumerListener> proxy;
     proxy = new BufferQueue::ProxyConsumerListener(listener);
 
-    mInitCheck = mBufferQueue->consumerConnect(proxy);
+    mInitCheck = mBufferQueue->consumerConnect(proxy, false);
     if (mInitCheck != NO_ERROR) {
         ALOGE("Error connecting to BufferQueue: %s (%d)",
                 strerror(-mInitCheck), mInitCheck);
@@ -130,10 +130,12 @@
 
 void GraphicBufferSource::omxLoaded(){
     Mutex::Autolock autoLock(mMutex);
-    ALOGV("--> loaded");
-    CHECK(mExecuting);
+    if (!mExecuting) {
+        // This can happen if something failed very early.
+        ALOGW("Dropped back down to Loaded without Executing");
+    }
 
-    ALOGV("Dropped down to loaded, avail=%d eos=%d eosSent=%d",
+    ALOGV("--> loaded; avail=%d eos=%d eosSent=%d",
             mNumFramesAvailable, mEndOfStream, mEndOfStreamSent);
 
     // Codec is no longer executing.  Discard all codec-related state.
@@ -237,9 +239,43 @@
     return;
 }
 
+void GraphicBufferSource::suspend(bool suspend) {
+    Mutex::Autolock autoLock(mMutex);
+
+    if (suspend) {
+        mSuspended = true;
+
+        while (mNumFramesAvailable > 0) {
+            BufferQueue::BufferItem item;
+            status_t err = mBufferQueue->acquireBuffer(&item, 0);
+
+            if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+                // shouldn't happen.
+                ALOGW("suspend: frame was not available");
+                break;
+            } else if (err != OK) {
+                ALOGW("suspend: acquireBuffer returned err=%d", err);
+                break;
+            }
+
+            --mNumFramesAvailable;
+
+            mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+        }
+        return;
+    }
+
+    mSuspended = false;
+}
+
 bool GraphicBufferSource::fillCodecBuffer_l() {
     CHECK(mExecuting && mNumFramesAvailable > 0);
 
+    if (mSuspended) {
+        return false;
+    }
+
     int cbi = findAvailableCodecBuffer_l();
     if (cbi < 0) {
         // No buffers available, bail.
@@ -251,7 +287,7 @@
     ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%d",
             mNumFramesAvailable);
     BufferQueue::BufferItem item;
-    status_t err = mBufferQueue->acquireBuffer(&item);
+    status_t err = mBufferQueue->acquireBuffer(&item, 0);
     if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
         // shouldn't happen
         ALOGW("fillCodecBuffer_l: frame was not available");
@@ -416,13 +452,18 @@
     ALOGV("onFrameAvailable exec=%d avail=%d",
             mExecuting, mNumFramesAvailable);
 
-    if (mEndOfStream) {
-        // This should only be possible if a new buffer was queued after
-        // EOS was signaled, i.e. the app is misbehaving.
-        ALOGW("onFrameAvailable: EOS is set, ignoring frame");
+    if (mEndOfStream || mSuspended) {
+        if (mEndOfStream) {
+            // This should only be possible if a new buffer was queued after
+            // EOS was signaled, i.e. the app is misbehaving.
+
+            ALOGW("onFrameAvailable: EOS is set, ignoring frame");
+        } else {
+            ALOGV("onFrameAvailable: suspended, ignoring frame");
+        }
 
         BufferQueue::BufferItem item;
-        status_t err = mBufferQueue->acquireBuffer(&item);
+        status_t err = mBufferQueue->acquireBuffer(&item, 0);
         if (err == OK) {
             mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
                     EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 8c6b470..ac73770 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -85,6 +85,10 @@
     // have a codec buffer ready, we just set the mEndOfStream flag.
     status_t signalEndOfInputStream();
 
+    // If suspend is true, all incoming buffers (including those currently
+    // in the BufferQueue) will be discarded until the suspension is lifted.
+    void suspend(bool suspend);
+
 protected:
     // BufferQueue::ConsumerListener interface, called when a new frame of
     // data is available.  If we're executing and a codec buffer is
@@ -155,6 +159,8 @@
     // Set by omxExecuting() / omxIdling().
     bool mExecuting;
 
+    bool mSuspended;
+
     // We consume graphic buffers from this.
     sp<BufferQueue> mBufferQueue;
 
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 3987ead..4b1dbe6 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -396,6 +396,15 @@
             parameter_name, index);
 }
 
+status_t OMX::setInternalOption(
+        node_id node,
+        OMX_U32 port_index,
+        InternalOptionType type,
+        const void *data,
+        size_t size) {
+    return findInstance(node)->setInternalOption(port_index, type, data, size);
+}
+
 OMX_ERRORTYPE OMX::OnEvent(
         node_id node,
         OMX_IN OMX_EVENTTYPE eEvent,
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index a9eb94f..525e18d 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -238,6 +238,18 @@
 
 status_t OMXNodeInstance::sendCommand(
         OMX_COMMANDTYPE cmd, OMX_S32 param) {
+    const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
+    if (bufferSource != NULL
+            && cmd == OMX_CommandStateSet
+            && param == OMX_StateLoaded) {
+        // Initiating transition from Executing -> Loaded
+        // Buffers are about to be freed.
+        bufferSource->omxLoaded();
+        setGraphicBufferSource(NULL);
+
+        // fall through
+    }
+
     Mutex::Autolock autoLock(mLock);
 
     OMX_ERRORTYPE err = OMX_SendCommand(mHandle, cmd, param, NULL);
@@ -584,7 +596,8 @@
     CHECK(oerr == OMX_ErrorNone);
 
     if (def.format.video.eColorFormat != OMX_COLOR_FormatAndroidOpaque) {
-        ALOGE("createInputSurface requires AndroidOpaque color format");
+        ALOGE("createInputSurface requires COLOR_FormatSurface "
+              "(AndroidOpaque) color format");
         return INVALID_OPERATION;
     }
 
@@ -769,6 +782,36 @@
     return StatusFromOMXError(err);
 }
 
+status_t OMXNodeInstance::setInternalOption(
+        OMX_U32 portIndex,
+        IOMX::InternalOptionType type,
+        const void *data,
+        size_t size) {
+    switch (type) {
+        case IOMX::INTERNAL_OPTION_SUSPEND:
+        {
+            const sp<GraphicBufferSource> &bufferSource =
+                getGraphicBufferSource();
+
+            if (bufferSource == NULL || portIndex != kPortIndexInput) {
+                return ERROR_UNSUPPORTED;
+            }
+
+            if (size != sizeof(bool)) {
+                return INVALID_OPERATION;
+            }
+
+            bool suspend = *(bool *)data;
+            bufferSource->suspend(suspend);
+
+            return OK;
+        }
+
+        default:
+            return ERROR_UNSUPPORTED;
+    }
+}
+
 void OMXNodeInstance::onMessage(const omx_message &msg) {
     if (msg.type == omx_message::FILL_BUFFER_DONE) {
         OMX_BUFFERHEADERTYPE *buffer =
@@ -818,16 +861,11 @@
         OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
     const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
 
-    if (bufferSource != NULL && event == OMX_EventCmdComplete &&
-            arg1 == OMX_CommandStateSet) {
-        if (arg2 == OMX_StateExecuting) {
-            bufferSource->omxExecuting();
-        } else if (arg2 == OMX_StateLoaded) {
-            // Must be shutting down -- won't have a GraphicBufferSource
-            // on the way up.
-            bufferSource->omxLoaded();
-            setGraphicBufferSource(NULL);
-        }
+    if (bufferSource != NULL
+            && event == OMX_EventCmdComplete
+            && arg1 == OMX_CommandStateSet
+            && arg2 == OMX_StateExecuting) {
+        bufferSource->omxExecuting();
     }
 }
 
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 3068541..906aef3 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -60,6 +60,7 @@
         ALOGE("Connection is still open, closing the socket.");
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
@@ -214,6 +215,7 @@
     if (mState != DISCONNECTED) {
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
@@ -266,6 +268,7 @@
     if (mUIDValid) {
         HTTPBase::RegisterSocketUserTag(mSocket, mUID,
                                         (uint32_t)*(uint32_t*) "RTSP");
+        HTTPBase::RegisterSocketUserMark(mSocket, mUID);
     }
 
     MakeSocketBlocking(mSocket, false);
@@ -295,6 +298,7 @@
 
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
@@ -312,6 +316,7 @@
 void ARTSPConnection::performDisconnect() {
     if (mUIDValid) {
         HTTPBase::UnRegisterSocketUserTag(mSocket);
+        HTTPBase::UnRegisterSocketUserMark(mSocket);
     }
     close(mSocket);
     mSocket = -1;
@@ -385,6 +390,7 @@
         mState = DISCONNECTED;
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index e067e20..946f602 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -127,7 +127,8 @@
           mKeepAliveTimeoutUs(kDefaultKeepAliveTimeoutUs),
           mKeepAliveGeneration(0),
           mPausing(false),
-          mPauseGeneration(0) {
+          mPauseGeneration(0),
+          mPlayResponseParsed(false) {
         mNetLooper->setName("rtsp net");
         mNetLooper->start(false /* runOnCallingThread */,
                           false /* canCallJava */,
@@ -711,7 +712,9 @@
                             // Clear the tag
                             if (mUIDValid) {
                                 HTTPBase::UnRegisterSocketUserTag(track->mRTPSocket);
+                                HTTPBase::UnRegisterSocketUserMark(track->mRTPSocket);
                                 HTTPBase::UnRegisterSocketUserTag(track->mRTCPSocket);
+                                HTTPBase::UnRegisterSocketUserMark(track->mRTCPSocket);
                             }
 
                             close(track->mRTPSocket);
@@ -842,7 +845,9 @@
                         // Clear the tag
                         if (mUIDValid) {
                             HTTPBase::UnRegisterSocketUserTag(info->mRTPSocket);
+                            HTTPBase::UnRegisterSocketUserMark(info->mRTPSocket);
                             HTTPBase::UnRegisterSocketUserTag(info->mRTCPSocket);
+                            HTTPBase::UnRegisterSocketUserMark(info->mRTCPSocket);
                         }
 
                         close(info->mRTPSocket);
@@ -1371,6 +1376,7 @@
     }
 
     void parsePlayResponse(const sp<ARTSPResponse> &response) {
+        mPlayResponseParsed = true;
         if (mTracks.size() == 0) {
             ALOGV("parsePlayResponse: late packets ignored.");
             return;
@@ -1524,6 +1530,8 @@
 
     Vector<TrackInfo> mTracks;
 
+    bool mPlayResponseParsed;
+
     void setupTrack(size_t index) {
         sp<APacketSource> source =
             new APacketSource(mSessionDesc, index);
@@ -1595,6 +1603,8 @@
                                                 (uint32_t)*(uint32_t*) "RTP_");
                 HTTPBase::RegisterSocketUserTag(info->mRTCPSocket, mUID,
                                                 (uint32_t)*(uint32_t*) "RTP_");
+                HTTPBase::RegisterSocketUserMark(info->mRTPSocket, mUID);
+                HTTPBase::RegisterSocketUserMark(info->mRTCPSocket, mUID);
             }
 
             request.append("Transport: RTP/AVP/UDP;unicast;client_port=");
@@ -1728,6 +1738,13 @@
             int32_t trackIndex, const sp<ABuffer> &accessUnit) {
         ALOGV("onAccessUnitComplete track %d", trackIndex);
 
+        if(!mPlayResponseParsed){
+            ALOGI("play response is not parsed, storing accessunit");
+            TrackInfo *track = &mTracks.editItemAt(trackIndex);
+            track->mPackets.push_back(accessUnit);
+            return;
+        }
+
         if (mFirstAccessUnit) {
             sp<AMessage> msg = mNotify->dup();
             msg->setInt32("what", kWhatConnected);
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index 404b41e..c7d107e 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -3,11 +3,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-        ANetworkSession.cpp             \
         MediaReceiver.cpp               \
         MediaSender.cpp                 \
         Parameters.cpp                  \
-        ParsedMessage.cpp               \
         rtp/RTPAssembler.cpp            \
         rtp/RTPReceiver.cpp             \
         rtp/RTPSender.cpp               \
diff --git a/media/libstagefright/wifi-display/MediaReceiver.cpp b/media/libstagefright/wifi-display/MediaReceiver.cpp
index 364acb9..5524235 100644
--- a/media/libstagefright/wifi-display/MediaReceiver.cpp
+++ b/media/libstagefright/wifi-display/MediaReceiver.cpp
@@ -20,13 +20,13 @@
 
 #include "MediaReceiver.h"
 
-#include "ANetworkSession.h"
 #include "AnotherPacketSource.h"
 #include "rtp/RTPReceiver.h"
 
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
 
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index a230cd8..b1cdec0 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -20,7 +20,6 @@
 
 #include "MediaSender.h"
 
-#include "ANetworkSession.h"
 #include "rtp/RTPSender.h"
 #include "source/TSPacketizer.h"
 
@@ -31,6 +30,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <ui/GraphicBuffer.h>
 
 namespace android {
diff --git a/media/libstagefright/wifi-display/TimeSyncer.cpp b/media/libstagefright/wifi-display/TimeSyncer.cpp
index cb429bc..0f4d93a 100644
--- a/media/libstagefright/wifi-display/TimeSyncer.cpp
+++ b/media/libstagefright/wifi-display/TimeSyncer.cpp
@@ -20,13 +20,12 @@
 
 #include "TimeSyncer.h"
 
-#include "ANetworkSession.h"
-
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <media/stagefright/Utils.h>
 
 namespace android {
diff --git a/media/libstagefright/wifi-display/nettest.cpp b/media/libstagefright/wifi-display/nettest.cpp
index 0779bf5..73c0d80 100644
--- a/media/libstagefright/wifi-display/nettest.cpp
+++ b/media/libstagefright/wifi-display/nettest.cpp
@@ -18,7 +18,6 @@
 #define LOG_TAG "nettest"
 #include <utils/Log.h>
 
-#include "ANetworkSession.h"
 #include "TimeSyncer.h"
 
 #include <binder/ProcessState.h>
@@ -27,6 +26,7 @@
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaDefs.h>
diff --git a/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp b/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp
index 2d22e79..3b3bd63 100644
--- a/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp
+++ b/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp
@@ -21,11 +21,10 @@
 #include "RTPAssembler.h"
 #include "RTPReceiver.h"
 
-#include "ANetworkSession.h"
-
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
index 6bbe650..1887b8b 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.cpp
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
@@ -20,11 +20,10 @@
 
 #include "RTPSender.h"
 
-#include "ANetworkSession.h"
-
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
diff --git a/media/libstagefright/wifi-display/rtptest.cpp b/media/libstagefright/wifi-display/rtptest.cpp
index 764a38b..b902f29 100644
--- a/media/libstagefright/wifi-display/rtptest.cpp
+++ b/media/libstagefright/wifi-display/rtptest.cpp
@@ -18,7 +18,6 @@
 #define LOG_TAG "rtptest"
 #include <utils/Log.h>
 
-#include "ANetworkSession.h"
 #include "rtp/RTPSender.h"
 #include "rtp/RTPReceiver.h"
 #include "TimeSyncer.h"
@@ -29,6 +28,7 @@
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaDefs.h>
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
index 15f9c88..cdb2267 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
@@ -29,9 +29,8 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
 
 namespace android {
 
@@ -488,12 +487,38 @@
             break;
         }
 
+        case kWhatQueueAccessUnit:
+            onQueueAccessUnit(msg);
+            break;
+
+        case kWhatSetFormat:
+            onSetFormat(msg);
+            break;
+
         default:
             TRESPASS();
     }
 }
 
 void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
+    sp<AMessage> msg = new AMessage(kWhatSetFormat, id());
+    msg->setSize("trackIndex", trackIndex);
+    msg->setMessage("format", format);
+    msg->post();
+}
+
+void DirectRenderer::onSetFormat(const sp<AMessage> &msg) {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    sp<AMessage> format;
+    CHECK(msg->findMessage("format", &format));
+
+    internalSetFormat(trackIndex, format);
+}
+
+void DirectRenderer::internalSetFormat(
+        size_t trackIndex, const sp<AMessage> &format) {
     CHECK_LT(trackIndex, 2u);
 
     CHECK(mDecoderContext[trackIndex] == NULL);
@@ -517,18 +542,21 @@
 
 void DirectRenderer::queueAccessUnit(
         size_t trackIndex, const sp<ABuffer> &accessUnit) {
+    sp<AMessage> msg = new AMessage(kWhatQueueAccessUnit, id());
+    msg->setSize("trackIndex", trackIndex);
+    msg->setBuffer("accessUnit", accessUnit);
+    msg->post();
+}
+
+void DirectRenderer::onQueueAccessUnit(const sp<AMessage> &msg) {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    sp<ABuffer> accessUnit;
+    CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
     CHECK_LT(trackIndex, 2u);
-
-    if (mDecoderContext[trackIndex] == NULL) {
-        CHECK_EQ(trackIndex, 0u);
-
-        sp<AMessage> format = new AMessage;
-        format->setString("mime", "video/avc");
-        format->setInt32("width", 640);
-        format->setInt32("height", 360);
-
-        setFormat(trackIndex, format);
-    }
+    CHECK(mDecoderContext[trackIndex] != NULL);
 
     mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
 }
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.h b/media/libstagefright/wifi-display/sink/DirectRenderer.h
index c5a4a83..07c2170 100644
--- a/media/libstagefright/wifi-display/sink/DirectRenderer.h
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.h
@@ -23,9 +23,7 @@
 namespace android {
 
 struct ABuffer;
-struct AudioTrack;
 struct IGraphicBufferProducer;
-struct MediaCodec;
 
 // Renders audio and video data queued by calls to "queueAccessUnit".
 struct DirectRenderer : public AHandler {
@@ -45,6 +43,8 @@
     enum {
         kWhatDecoderNotify,
         kWhatRenderVideo,
+        kWhatQueueAccessUnit,
+        kWhatSetFormat,
     };
 
     struct OutputInfo {
@@ -74,6 +74,11 @@
     void scheduleVideoRenderIfNecessary();
     void onRenderVideo();
 
+    void onSetFormat(const sp<AMessage> &msg);
+    void onQueueAccessUnit(const sp<AMessage> &msg);
+
+    void internalSetFormat(size_t trackIndex, const sp<AMessage> &format);
+
     DISALLOW_EVIL_CONSTRUCTORS(DirectRenderer);
 };
 
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
index 5db2099..bc88f1e 100644
--- a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
@@ -22,13 +22,13 @@
 
 #include "DirectRenderer.h"
 #include "MediaReceiver.h"
-#include "ParsedMessage.h"
 #include "TimeSyncer.h"
 
 #include <cutils/properties.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ParsedMessage.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
 
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.h b/media/libstagefright/wifi-display/sink/WifiDisplaySink.h
index adb9d89..dc1fc32 100644
--- a/media/libstagefright/wifi-display/sink/WifiDisplaySink.h
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.h
@@ -18,12 +18,11 @@
 
 #define WIFI_DISPLAY_SINK_H_
 
-#include "ANetworkSession.h"
-
 #include "VideoFormats.h"
 
 #include <gui/Surface.h>
 #include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 
 namespace android {
 
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 0214520..6f23854 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -21,6 +21,7 @@
 #include "Converter.h"
 
 #include "MediaPuller.h"
+#include "include/avc_utils.h"
 
 #include <cutils/properties.h>
 #include <gui/Surface.h>
@@ -33,6 +34,8 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 
+#include <arpa/inet.h>
+
 #include <OMX_Video.h>
 
 namespace android {
@@ -40,12 +43,14 @@
 Converter::Converter(
         const sp<AMessage> &notify,
         const sp<ALooper> &codecLooper,
-        const sp<AMessage> &outputFormat)
-    : mInitCheck(NO_INIT),
-      mNotify(notify),
+        const sp<AMessage> &outputFormat,
+        uint32_t flags)
+    : mNotify(notify),
       mCodecLooper(codecLooper),
       mOutputFormat(outputFormat),
+      mFlags(flags),
       mIsVideo(false),
+      mIsH264(false),
       mIsPCMAudio(false),
       mNeedToManuallyPrependSPSPPS(false),
       mDoMoreWorkPending(false)
@@ -55,21 +60,18 @@
 #endif
       ,mPrevVideoBitrate(-1)
       ,mNumFramesToDrop(0)
+      ,mEncodingSuspended(false)
     {
     AString mime;
     CHECK(mOutputFormat->findString("mime", &mime));
 
     if (!strncasecmp("video/", mime.c_str(), 6)) {
         mIsVideo = true;
+
+        mIsH264 = !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_AVC);
     } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime.c_str())) {
         mIsPCMAudio = true;
     }
-
-    mInitCheck = initEncoder();
-
-    if (mInitCheck != OK) {
-        releaseEncoder();
-    }
 }
 
 static void ReleaseMediaBufferReference(const sp<ABuffer> &accessUnit) {
@@ -118,8 +120,19 @@
     (new AMessage(kWhatShutdown, id()))->post();
 }
 
-status_t Converter::initCheck() const {
-    return mInitCheck;
+status_t Converter::init() {
+    status_t err = initEncoder();
+
+    if (err != OK) {
+        releaseEncoder();
+    }
+
+    return err;
+}
+
+sp<IGraphicBufferProducer> Converter::getGraphicBufferProducer() {
+    CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+    return mGraphicBufferProducer;
 }
 
 size_t Converter::getInputBufferCount() const {
@@ -244,6 +257,16 @@
         return err;
     }
 
+    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+        CHECK(mIsVideo);
+
+        err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+
+        if (err != OK) {
+            return err;
+        }
+    }
+
     err = mEncoder->start();
 
     if (err != OK) {
@@ -256,7 +279,17 @@
         return err;
     }
 
-    return mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+    err = mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+        scheduleDoMoreWork();
+    }
+
+    return OK;
 }
 
 void Converter::notifyError(status_t err) {
@@ -312,9 +345,12 @@
                 sp<ABuffer> accessUnit;
                 CHECK(msg->findBuffer("accessUnit", &accessUnit));
 
-                if (mIsVideo && mNumFramesToDrop) {
-                    --mNumFramesToDrop;
-                    ALOGI("dropping frame.");
+                if (mNumFramesToDrop > 0 || mEncodingSuspended) {
+                    if (mNumFramesToDrop > 0) {
+                        --mNumFramesToDrop;
+                        ALOGI("dropping frame.");
+                    }
+
                     ReleaseMediaBufferReference(accessUnit);
                     break;
                 }
@@ -396,7 +432,7 @@
             }
 
             if (mIsVideo) {
-                ALOGI("requesting IDR frame");
+                ALOGV("requesting IDR frame");
                 mEncoder->requestIDRFrame();
             }
             break;
@@ -411,6 +447,10 @@
             AString mime;
             CHECK(mOutputFormat->findString("mime", &mime));
             ALOGI("encoder (%s) shut down.", mime.c_str());
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatShutdownCompleted);
+            notify->post();
             break;
         }
 
@@ -431,6 +471,21 @@
             break;
         }
 
+        case kWhatSuspendEncoding:
+        {
+            int32_t suspend;
+            CHECK(msg->findInt32("suspend", &suspend));
+
+            mEncodingSuspended = suspend;
+
+            if (mFlags & FLAG_USE_SURFACE_INPUT) {
+                sp<AMessage> params = new AMessage;
+                params->setInt32("drop-input-frames",suspend);
+                mEncoder->setParameters(params);
+            }
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -616,22 +671,39 @@
     return OK;
 }
 
+sp<ABuffer> Converter::prependCSD(const sp<ABuffer> &accessUnit) const {
+    CHECK(mCSD0 != NULL);
+
+    sp<ABuffer> dup = new ABuffer(accessUnit->size() + mCSD0->size());
+    memcpy(dup->data(), mCSD0->data(), mCSD0->size());
+    memcpy(dup->data() + mCSD0->size(), accessUnit->data(), accessUnit->size());
+
+    int64_t timeUs;
+    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+    dup->meta()->setInt64("timeUs", timeUs);
+
+    return dup;
+}
+
 status_t Converter::doMoreWork() {
     status_t err;
 
-    for (;;) {
-        size_t bufferIndex;
-        err = mEncoder->dequeueInputBuffer(&bufferIndex);
+    if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+        for (;;) {
+            size_t bufferIndex;
+            err = mEncoder->dequeueInputBuffer(&bufferIndex);
 
-        if (err != OK) {
-            break;
+            if (err != OK) {
+                break;
+            }
+
+            mAvailEncoderInputIndices.push_back(bufferIndex);
         }
 
-        mAvailEncoderInputIndices.push_back(bufferIndex);
+        feedEncoderInputBuffers();
     }
 
-    feedEncoderInputBuffers();
-
     for (;;) {
         size_t bufferIndex;
         size_t offset;
@@ -705,9 +777,19 @@
 
             if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
                 if (!handle) {
+                    if (mIsH264) {
+                        mCSD0 = buffer;
+                    }
                     mOutputFormat->setBuffer("csd-0", buffer);
                 }
             } else {
+                if (mNeedToManuallyPrependSPSPPS
+                        && mIsH264
+                        && (mFlags & FLAG_PREPEND_CSD_IF_NECESSARY)
+                        && IsIDR(buffer)) {
+                    buffer = prependCSD(buffer);
+                }
+
                 sp<AMessage> notify = mNotify->dup();
                 notify->setInt32("what", kWhatAccessUnit);
                 notify->setBuffer("accessUnit", buffer);
@@ -732,9 +814,18 @@
 }
 
 void Converter::dropAFrame() {
+    // Unsupported in surface input mode.
+    CHECK(!(mFlags & FLAG_USE_SURFACE_INPUT));
+
     (new AMessage(kWhatDropAFrame, id()))->post();
 }
 
+void Converter::suspendEncoding(bool suspend) {
+    sp<AMessage> msg = new AMessage(kWhatSuspendEncoding, id());
+    msg->setInt32("suspend", suspend);
+    msg->post();
+}
+
 int32_t Converter::getVideoBitrate() const {
     return mPrevVideoBitrate;
 }
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index 76c8b19..5876e07 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -18,13 +18,12 @@
 
 #define CONVERTER_H_
 
-#include "WifiDisplaySource.h"
-
 #include <media/stagefright/foundation/AHandler.h>
 
 namespace android {
 
 struct ABuffer;
+struct IGraphicBufferProducer;
 struct MediaCodec;
 
 #define ENABLE_SILENCE_DETECTION        0
@@ -33,11 +32,25 @@
 // media access unit of a different format.
 // Right now this'll convert raw video into H.264 and raw audio into AAC.
 struct Converter : public AHandler {
+    enum {
+        kWhatAccessUnit,
+        kWhatEOS,
+        kWhatError,
+        kWhatShutdownCompleted,
+    };
+
+    enum FlagBits {
+        FLAG_USE_SURFACE_INPUT          = 1,
+        FLAG_PREPEND_CSD_IF_NECESSARY   = 2,
+    };
     Converter(const sp<AMessage> &notify,
               const sp<ALooper> &codecLooper,
-              const sp<AMessage> &outputFormat);
+              const sp<AMessage> &outputFormat,
+              uint32_t flags = 0);
 
-    status_t initCheck() const;
+    status_t init();
+
+    sp<IGraphicBufferProducer> getGraphicBufferProducer();
 
     size_t getInputBufferCount() const;
 
@@ -50,22 +63,7 @@
     void requestIDRFrame();
 
     void dropAFrame();
-
-    enum {
-        kWhatAccessUnit,
-        kWhatEOS,
-        kWhatError,
-    };
-
-    enum {
-        kWhatDoMoreWork,
-        kWhatRequestIDRFrame,
-        kWhatShutdown,
-        kWhatMediaPullerNotify,
-        kWhatEncoderActivity,
-        kWhatDropAFrame,
-        kWhatReleaseOutputBuffer,
-    };
+    void suspendEncoding(bool suspend);
 
     void shutdownAsync();
 
@@ -74,22 +72,40 @@
 
     static int32_t GetInt32Property(const char *propName, int32_t defaultValue);
 
+    enum {
+        // MUST not conflict with private enums below.
+        kWhatMediaPullerNotify = 'pulN',
+    };
+
 protected:
     virtual ~Converter();
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
 private:
-    status_t mInitCheck;
+    enum {
+        kWhatDoMoreWork,
+        kWhatRequestIDRFrame,
+        kWhatSuspendEncoding,
+        kWhatShutdown,
+        kWhatEncoderActivity,
+        kWhatDropAFrame,
+        kWhatReleaseOutputBuffer,
+    };
+
     sp<AMessage> mNotify;
     sp<ALooper> mCodecLooper;
     sp<AMessage> mOutputFormat;
+    uint32_t mFlags;
     bool mIsVideo;
+    bool mIsH264;
     bool mIsPCMAudio;
     bool mNeedToManuallyPrependSPSPPS;
 
     sp<MediaCodec> mEncoder;
     sp<AMessage> mEncoderActivityNotify;
 
+    sp<IGraphicBufferProducer> mGraphicBufferProducer;
+
     Vector<sp<ABuffer> > mEncoderInputBuffers;
     Vector<sp<ABuffer> > mEncoderOutputBuffers;
 
@@ -97,6 +113,8 @@
 
     List<sp<ABuffer> > mInputBufferQueue;
 
+    sp<ABuffer> mCSD0;
+
     bool mDoMoreWorkPending;
 
 #if ENABLE_SILENCE_DETECTION
@@ -109,6 +127,7 @@
     int32_t mPrevVideoBitrate;
 
     int32_t mNumFramesToDrop;
+    bool mEncodingSuspended;
 
     status_t initEncoder();
     void releaseEncoder();
@@ -127,6 +146,8 @@
 
     static bool IsSilence(const sp<ABuffer> &accessUnit);
 
+    sp<ABuffer> prependCSD(const sp<ABuffer> &accessUnit) const;
+
     DISALLOW_EVIL_CONSTRUCTORS(Converter);
 };
 
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp
index 189bea3..7e8891d 100644
--- a/media/libstagefright/wifi-display/source/MediaPuller.cpp
+++ b/media/libstagefright/wifi-display/source/MediaPuller.cpp
@@ -93,6 +93,9 @@
                 err = mSource->start(params.get());
             } else {
                 err = mSource->start();
+                if (err != OK) {
+                    ALOGE("source failed to start w/ err %d", err);
+                }
             }
 
             if (err == OK) {
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index a15fbac..0aa4ee5 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -521,7 +521,7 @@
                 if (mTracks.isEmpty()) {
                     ALOGI("Reached EOS");
                 }
-            } else {
+            } else if (what != Converter::kWhatShutdownCompleted) {
                 CHECK_EQ(what, Converter::kWhatError);
 
                 status_t err;
@@ -957,14 +957,16 @@
 
     sp<Converter> converter = new Converter(notify, codecLooper, format);
 
-    err = converter->initCheck();
+    looper()->registerHandler(converter);
+
+    err = converter->init();
     if (err != OK) {
         ALOGE("%s converter returned err %d", isVideo ? "video" : "audio", err);
+
+        looper()->unregisterHandler(converter->id());
         return err;
     }
 
-    looper()->registerHandler(converter);
-
     notify = new AMessage(Converter::kWhatMediaPullerNotify, converter->id());
     notify->setSize("trackIndex", trackIndex);
 
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index b421b35..4b59e62 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -21,7 +21,6 @@
 #include "WifiDisplaySource.h"
 #include "PlaybackSession.h"
 #include "Parameters.h"
-#include "ParsedMessage.h"
 #include "rtp/RTPSender.h"
 #include "TimeSyncer.h"
 
@@ -33,6 +32,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ParsedMessage.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
 
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index 64186fc..4f11712 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -18,10 +18,10 @@
 
 #define WIFI_DISPLAY_SOURCE_H_
 
-#include "ANetworkSession.h"
 #include "VideoFormats.h"
 
 #include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 
 #include <netinet/in.h>
 
diff --git a/media/libstagefright/wifi-display/udptest.cpp b/media/libstagefright/wifi-display/udptest.cpp
index 111846d..61eb9f9 100644
--- a/media/libstagefright/wifi-display/udptest.cpp
+++ b/media/libstagefright/wifi-display/udptest.cpp
@@ -18,11 +18,11 @@
 #define LOG_TAG "udptest"
 #include <utils/Log.h>
 
-#include "ANetworkSession.h"
 #include "TimeSyncer.h"
 
 #include <binder/ProcessState.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 
 namespace android {
 
diff --git a/media/libstagefright/wifi-display/wfd.cpp b/media/libstagefright/wifi-display/wfd.cpp
index 9fee4d0..4607606 100644
--- a/media/libstagefright/wifi-display/wfd.cpp
+++ b/media/libstagefright/wifi-display/wfd.cpp
@@ -175,7 +175,8 @@
     iface.append(StringPrintf(":%d", port).c_str());
 
     sp<RemoteDisplayClient> client = new RemoteDisplayClient;
-    sp<IRemoteDisplay> display = service->listenForRemoteDisplay(client, iface);
+    sp<IRemoteDisplay> display =
+        service->listenForRemoteDisplay(client, iface);
 
     client->waitUntilDone();
 
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 714854e..54377f1 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -27,9 +27,6 @@
 
 LOCAL_SRC_FILES += StateQueue.cpp
 
-# uncomment for debugging timing problems related to StateQueue::push()
-LOCAL_CFLAGS += -DSTATE_QUEUE_DUMP
-
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
@@ -56,24 +53,10 @@
 
 LOCAL_MODULE:= libaudioflinger
 
-LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp
-
-LOCAL_CFLAGS += -DFAST_MIXER_STATISTICS
-
-# uncomment to display CPU load adjusted for CPU frequency
-# LOCAL_CFLAGS += -DCPU_FREQUENCY_STATISTICS
+LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp
 
 LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"'
 
-LOCAL_CFLAGS += -UFAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
-
-# uncomment to allow tee sink debugging to be enabled by property
-# LOCAL_CFLAGS += -DTEE_SINK
-
-# uncomment to enable the audio watchdog
-# LOCAL_SRC_FILES += AudioWatchdog.cpp
-# LOCAL_CFLAGS += -DAUDIO_WATCHDOG
-
 # Define ANDROID_SMP appropriately. Used to get inline tracing fast-path.
 ifeq ($(TARGET_CPU_SMP),true)
     LOCAL_CFLAGS += -DANDROID_SMP=1
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index a6edb77..b30e2cf 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -19,6 +19,7 @@
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <dirent.h>
 #include <math.h>
 #include <signal.h>
@@ -36,10 +37,6 @@
 
 #include <cutils/bitops.h>
 #include <cutils/properties.h>
-#include <cutils/compiler.h>
-
-//#include <private/media/AudioTrackShared.h>
-//#include <private/media/AudioEffectShared.h>
 
 #include <system/audio.h>
 #include <hardware/audio.h>
@@ -58,12 +55,13 @@
 #include <powermanager/PowerManager.h>
 
 #include <common_time/cc_helper.h>
-//#include <common_time/local_clock.h>
 
 #include <media/IMediaLogService.h>
 
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
+#include <media/AudioParameter.h>
+#include <private/android_filesystem_config.h>
 
 // ----------------------------------------------------------------------------
 
@@ -141,7 +139,9 @@
       mMasterMute(false),
       mNextUniqueId(1),
       mMode(AUDIO_MODE_INVALID),
-      mBtNrecIsOff(false)
+      mBtNrecIsOff(false),
+      mIsLowRamDevice(true),
+      mIsDeviceTypeKnown(false)
 {
     getpid_cached = getpid();
     char value[PROPERTY_VALUE_MAX];
@@ -981,11 +981,12 @@
 
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;
-    struct audio_config config = {
-        sample_rate: sampleRate,
-        channel_mask: channelMask,
-        format: format,
-    };
+    struct audio_config config;
+    memset(&config, 0, sizeof(config));
+    config.sample_rate = sampleRate;
+    config.channel_mask = channelMask;
+    config.format = format;
+
     audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
     size_t size = dev->get_input_buffer_size(dev, &config);
     mHardwareStatus = AUDIO_HW_IDLE;
@@ -1382,31 +1383,53 @@
 
 // ----------------------------------------------------------------------------
 
+status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice)
+{
+    uid_t uid = IPCThreadState::self()->getCallingUid();
+    if (uid != AID_SYSTEM) {
+        return PERMISSION_DENIED;
+    }
+    Mutex::Autolock _l(mLock);
+    if (mIsDeviceTypeKnown) {
+        return INVALID_OPERATION;
+    }
+    mIsLowRamDevice = isLowRamDevice;
+    mIsDeviceTypeKnown = true;
+    return NO_ERROR;
+}
+
+// ----------------------------------------------------------------------------
+
 audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
                                            audio_devices_t *pDevices,
                                            uint32_t *pSamplingRate,
                                            audio_format_t *pFormat,
                                            audio_channel_mask_t *pChannelMask,
                                            uint32_t *pLatencyMs,
-                                           audio_output_flags_t flags)
+                                           audio_output_flags_t flags,
+                                           const audio_offload_info_t *offloadInfo)
 {
-    status_t status;
     PlaybackThread *thread = NULL;
-    struct audio_config config = {
-        sample_rate: pSamplingRate ? *pSamplingRate : 0,
-        channel_mask: pChannelMask ? *pChannelMask : 0,
-        format: pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT,
-    };
+    struct audio_config config;
+    config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
+    config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
+    config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
+    if (offloadInfo) {
+        config.offload_info = *offloadInfo;
+    }
+
     audio_stream_out_t *outStream = NULL;
     AudioHwDevice *outHwDev;
 
-    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
+    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (pDevices != NULL) ? *pDevices : 0,
               config.sample_rate,
               config.format,
               config.channel_mask,
               flags);
+    ALOGV("openOutput(), offloadInfo %p version 0x%04x",
+          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version );
 
     if (pDevices == NULL || *pDevices == 0) {
         return 0;
@@ -1423,7 +1446,7 @@
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
-    status = hwDevHal->open_output_stream(hwDevHal,
+    status_t status = hwDevHal->open_output_stream(hwDevHal,
                                           id,
                                           *pDevices,
                                           (audio_output_flags_t)flags,
@@ -1431,7 +1454,7 @@
                                           &outStream);
 
     mHardwareStatus = AUDIO_HW_IDLE;
-    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, "
+    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %#08x, "
             "Channels %x, status %d",
             outStream,
             config.sample_rate,
@@ -1440,9 +1463,12 @@
             status);
 
     if (status == NO_ERROR && outStream != NULL) {
-        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
+        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
 
-        if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
+        if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+            thread = new OffloadThread(this, output, id, *pDevices);
+            ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
+        } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
             (config.format != AUDIO_FORMAT_PCM_16_BIT) ||
             (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
             thread = new DirectOutputThread(this, output, id, *pDevices);
@@ -1532,11 +1558,28 @@
                     DuplicatingThread *dupThread =
                             (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
                     dupThread->removeOutputTrack((MixerThread *)thread.get());
+
+                }
+            }
+        }
+
+
+        mPlaybackThreads.removeItem(output);
+        // save all effects to the default thread
+        if (mPlaybackThreads.size()) {
+            PlaybackThread *dstThread = checkPlaybackThread_l(mPlaybackThreads.keyAt(0));
+            if (dstThread != NULL) {
+                // audioflinger lock is held here so the acquisition order of thread locks does not
+                // matter
+                Mutex::Autolock _dl(dstThread->mLock);
+                Mutex::Autolock _sl(thread->mLock);
+                Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l();
+                for (size_t i = 0; i < effectChains.size(); i ++) {
+                    moveEffectChain_l(effectChains[i]->sessionId(), thread.get(), dstThread, true);
                 }
             }
         }
         audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
-        mPlaybackThreads.removeItem(output);
     }
     thread->exit();
     // The thread entity (active unit of execution) is no longer running here,
@@ -1591,11 +1634,11 @@
 {
     status_t status;
     RecordThread *thread = NULL;
-    struct audio_config config = {
-        sample_rate: pSamplingRate ? *pSamplingRate : 0,
-        channel_mask: pChannelMask ? *pChannelMask : 0,
-        format: pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT,
-    };
+    struct audio_config config;
+    config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
+    config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
+    config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
+
     uint32_t reqSamplingRate = config.sample_rate;
     audio_format_t reqFormat = config.format;
     audio_channel_mask_t reqChannels = config.channel_mask;
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 05dbab1..eee5da5 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -160,7 +160,8 @@
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask,
                                          uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags);
+                                         audio_output_flags_t flags,
+                                         const audio_offload_info_t *offloadInfo);
 
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
                                                   audio_io_handle_t output2);
@@ -219,6 +220,8 @@
     virtual uint32_t getPrimaryOutputSamplingRate();
     virtual size_t getPrimaryOutputFrameCount();
 
+    virtual status_t setLowRamDevice(bool isLowRamDevice);
+
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,
@@ -362,7 +365,9 @@
     class PlaybackThread;
     class MixerThread;
     class DirectOutputThread;
+    class OffloadThread;
     class DuplicatingThread;
+    class AsyncCallbackThread;
     class Track;
     class RecordTrack;
     class EffectModule;
@@ -404,8 +409,11 @@
                                              int64_t pts);
         virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
                                                   int target);
+        virtual status_t    setParameters(const String8& keyValuePairs);
+
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
+
     private:
         const sp<PlaybackThread::Track> mTrack;
     };
@@ -427,6 +435,7 @@
         void                stop_nonvirtual();
     };
 
+
               PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
               MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
               RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
@@ -493,11 +502,12 @@
     struct AudioStreamOut {
         AudioHwDevice* const audioHwDev;
         audio_stream_out_t* const stream;
+        audio_output_flags_t flags;
 
         audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
 
-        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out) :
-            audioHwDev(dev), stream(out) {}
+        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out, audio_output_flags_t flags) :
+            audioHwDev(dev), stream(out), flags(flags) {}
     };
 
     struct AudioStreamIn {
@@ -591,12 +601,11 @@
     status_t    closeOutput_nonvirtual(audio_io_handle_t output);
     status_t    closeInput_nonvirtual(audio_io_handle_t input);
 
-// do not use #ifdef here, since AudioFlinger.h is included by more than one module
-//#ifdef TEE_SINK
+#ifdef TEE_SINK
     // all record threads serially share a common tee sink, which is re-created on format change
     sp<NBAIO_Sink>   mRecordTeeSink;
     sp<NBAIO_Source> mRecordTeeSource;
-//#endif
+#endif
 
 public:
 
@@ -621,6 +630,15 @@
     static const size_t kTeeSinkTrackFramesDefault = 0x1000;
 #endif
 
+    // This method reads from a variable without mLock, but the variable is updated under mLock.  So
+    // we might read a stale value, or a value that's inconsistent with respect to other variables.
+    // In this case, it's safe because the return value isn't used for making an important decision.
+    // The reason we don't want to take mLock is because it could block the caller for a long time.
+    bool    isLowRamDevice() const { return mIsLowRamDevice; }
+
+private:
+    bool    mIsLowRamDevice;
+    bool    mIsDeviceTypeKnown;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 7d38f80..df4e029 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "AudioMixer"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 2706880..900b411 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "AudioPolicyService"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #undef __STRICT_ANSI__
 #define __STDINT_LIMITS
 #define __STDC_LIMIT_MACROS
@@ -40,6 +41,7 @@
 #include <system/audio_policy.h>
 #include <hardware/audio_policy.h>
 #include <audio_effects/audio_effects_conf.h>
+#include <media/AudioParameter.h>
 
 namespace android {
 
@@ -49,7 +51,7 @@
 static const int kDumpLockRetries = 50;
 static const int kDumpLockSleepUs = 20000;
 
-static const nsecs_t kAudioCommandTimeout = 3000000000; // 3 seconds
+static const nsecs_t kAudioCommandTimeout = 3000000000LL; // 3 seconds
 
 namespace {
     extern struct audio_policy_service_ops aps_ops;
@@ -68,10 +70,11 @@
     Mutex::Autolock _l(mLock);
 
     // start tone playback thread
-    mTonePlaybackThread = new AudioCommandThread(String8(""));
+    mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
     // start audio commands thread
-    mAudioCommandThread = new AudioCommandThread(String8("ApmCommand"));
-
+    mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
+    // start output activity command thread
+    mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
     if (rc)
@@ -222,15 +225,16 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags)
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
 {
     if (mpAudioPolicy == NULL) {
         return 0;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask,
-                                        flags);
+    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
+                                    format, channelMask, flags, offloadInfo);
 }
 
 status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -253,6 +257,15 @@
         return NO_INIT;
     }
     ALOGV("stopOutput()");
+    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    return NO_ERROR;
+}
+
+status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
+                                      audio_stream_type_t stream,
+                                      int session)
+{
+    ALOGV("doStopOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
 }
@@ -263,6 +276,12 @@
         return;
     }
     ALOGV("releaseOutput()");
+    mOutputCommandThread->releaseOutputCommand(output);
+}
+
+void AudioPolicyService::doReleaseOutput(audio_io_handle_t output)
+{
+    ALOGV("doReleaseOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
     mpAudioPolicy->release_output(mpAudioPolicy, output);
 }
@@ -638,8 +657,9 @@
 
 // -----------  AudioPolicyService::AudioCommandThread implementation ----------
 
-AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name)
-    : Thread(false), mName(name)
+AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name,
+                                                           const wp<AudioPolicyService>& service)
+    : Thread(false), mName(name), mService(service)
 {
     mpToneGenerator = NULL;
 }
@@ -647,7 +667,7 @@
 
 AudioPolicyService::AudioCommandThread::~AudioCommandThread()
 {
-    if (mName != "" && !mAudioCommands.isEmpty()) {
+    if (!mAudioCommands.isEmpty()) {
         release_wake_lock(mName.string());
     }
     mAudioCommands.clear();
@@ -656,11 +676,7 @@
 
 void AudioPolicyService::AudioCommandThread::onFirstRef()
 {
-    if (mName != "") {
-        run(mName.string(), ANDROID_PRIORITY_AUDIO);
-    } else {
-        run("AudioCommand", ANDROID_PRIORITY_AUDIO);
-    }
+    run(mName.string(), ANDROID_PRIORITY_AUDIO);
 }
 
 bool AudioPolicyService::AudioCommandThread::threadLoop()
@@ -735,6 +751,32 @@
                     }
                     delete data;
                     }break;
+                case STOP_OUTPUT: {
+                    StopOutputData *data = (StopOutputData *)command->mParam;
+                    ALOGV("AudioCommandThread() processing stop output %d",
+                            data->mIO);
+                    sp<AudioPolicyService> svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doStopOutput(data->mIO, data->mStream, data->mSession);
+                    mLock.lock();
+                    delete data;
+                    }break;
+                case RELEASE_OUTPUT: {
+                    ReleaseOutputData *data = (ReleaseOutputData *)command->mParam;
+                    ALOGV("AudioCommandThread() processing release output %d",
+                            data->mIO);
+                    sp<AudioPolicyService> svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doReleaseOutput(data->mIO);
+                    mLock.lock();
+                    delete data;
+                    }break;
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
@@ -746,7 +788,7 @@
             }
         }
         // release delayed commands wake lock
-        if (mName != "" && mAudioCommands.isEmpty()) {
+        if (mAudioCommands.isEmpty()) {
             release_wake_lock(mName.string());
         }
         ALOGV("AudioCommandThread() going to sleep");
@@ -890,17 +932,46 @@
     return status;
 }
 
+void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
+                                                               audio_stream_type_t stream,
+                                                               int session)
+{
+    AudioCommand *command = new AudioCommand();
+    command->mCommand = STOP_OUTPUT;
+    StopOutputData *data = new StopOutputData();
+    data->mIO = output;
+    data->mStream = stream;
+    data->mSession = session;
+    command->mParam = (void *)data;
+    Mutex::Autolock _l(mLock);
+    insertCommand_l(command);
+    ALOGV("AudioCommandThread() adding stop output %d", output);
+    mWaitWorkCV.signal();
+}
+
+void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output)
+{
+    AudioCommand *command = new AudioCommand();
+    command->mCommand = RELEASE_OUTPUT;
+    ReleaseOutputData *data = new ReleaseOutputData();
+    data->mIO = output;
+    command->mParam = (void *)data;
+    Mutex::Autolock _l(mLock);
+    insertCommand_l(command);
+    ALOGV("AudioCommandThread() adding release output %d", output);
+    mWaitWorkCV.signal();
+}
+
 // insertCommand_l() must be called with mLock held
 void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
 {
     ssize_t i;  // not size_t because i will count down to -1
     Vector <AudioCommand *> removedCommands;
-
     nsecs_t time = 0;
     command->mTime = systemTime() + milliseconds(delayMs);
 
     // acquire wake lock to make sure delayed commands are processed
-    if (mName != "" && mAudioCommands.isEmpty()) {
+    if (mAudioCommands.isEmpty()) {
         acquire_wake_lock(PARTIAL_WAKE_LOCK, mName.string());
     }
 
@@ -1055,6 +1126,21 @@
     return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
 }
 
+bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
+{
+    if (mpAudioPolicy == NULL) {
+        ALOGV("mpAudioPolicy == NULL");
+        return false;
+    }
+
+    if (mpAudioPolicy->is_offload_supported == NULL) {
+        ALOGV("HAL does not implement is_offload_supported");
+        return false;
+    }
+
+    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
+}
+
 // ----------------------------------------------------------------------------
 // Audio pre-processing configuration
 // ----------------------------------------------------------------------------
@@ -1387,7 +1473,8 @@
                                                    audio_format_t *pFormat,
                                                    audio_channel_mask_t *pChannelMask,
                                                    uint32_t *pLatencyMs,
-                                                   audio_output_flags_t flags)
+                                                   audio_output_flags_t flags,
+                                                   const audio_offload_info_t *offloadInfo)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -1395,7 +1482,7 @@
         return 0;
     }
     return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags);
+                          pLatencyMs, flags, offloadInfo);
 }
 
 static audio_io_handle_t aps_open_dup_output(void *service,
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 53238fa..ae053a9 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -67,7 +67,8 @@
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = 0,
                                         audio_output_flags_t flags =
-                                                AUDIO_OUTPUT_FLAG_NONE);
+                                                AUDIO_OUTPUT_FLAG_NONE,
+                                        const audio_offload_info_t *offloadInfo = NULL);
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0);
@@ -136,6 +137,12 @@
     virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
     virtual status_t stopTone();
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
+    virtual bool isOffloadSupported(const audio_offload_info_t &config);
+
+            status_t doStopOutput(audio_io_handle_t output,
+                                  audio_stream_type_t stream,
+                                  int session = 0);
+            void doReleaseOutput(audio_io_handle_t output);
 
 private:
                         AudioPolicyService() ANDROID_API;
@@ -159,10 +166,12 @@
             STOP_TONE,
             SET_VOLUME,
             SET_PARAMETERS,
-            SET_VOICE_VOLUME
+            SET_VOICE_VOLUME,
+            STOP_OUTPUT,
+            RELEASE_OUTPUT
         };
 
-        AudioCommandThread (String8 name);
+        AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
         virtual             ~AudioCommandThread();
 
                     status_t    dump(int fd);
@@ -180,6 +189,11 @@
                     status_t    parametersCommand(audio_io_handle_t ioHandle,
                                             const char *keyValuePairs, int delayMs = 0);
                     status_t    voiceVolumeCommand(float volume, int delayMs = 0);
+                    void        stopOutputCommand(audio_io_handle_t output,
+                                                  audio_stream_type_t stream,
+                                                  int session);
+                    void        releaseOutputCommand(audio_io_handle_t output);
+
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
 
     private:
@@ -224,12 +238,25 @@
             float mVolume;
         };
 
+        class StopOutputData {
+        public:
+            audio_io_handle_t mIO;
+            audio_stream_type_t mStream;
+            int mSession;
+        };
+
+        class ReleaseOutputData {
+        public:
+            audio_io_handle_t mIO;
+        };
+
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector <AudioCommand *> mAudioCommands; // list of pending commands
         ToneGenerator *mpToneGenerator;     // the tone generator
         AudioCommand mLastCommand;          // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
+        wp<AudioPolicyService> mService;
     };
 
     class EffectDesc {
@@ -314,6 +341,7 @@
                             // device connection state  or routing
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
     sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
+    sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
     KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
diff --git a/services/audioflinger/AudioWatchdog.cpp b/services/audioflinger/AudioWatchdog.cpp
index 8f328ee..93d185e 100644
--- a/services/audioflinger/AudioWatchdog.cpp
+++ b/services/audioflinger/AudioWatchdog.cpp
@@ -17,9 +17,12 @@
 #define LOG_TAG "AudioWatchdog"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <utils/Log.h>
 #include "AudioWatchdog.h"
 
+#ifdef AUDIO_WATCHDOG
+
 namespace android {
 
 void AudioWatchdogDump::dump(int fd)
@@ -132,3 +135,5 @@
 }
 
 }   // namespace android
+
+#endif // AUDIO_WATCHDOG
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
new file mode 100644
index 0000000..bc2038a
--- /dev/null
+++ b/services/audioflinger/Configuration.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Put build-time configuration options here rather than Android.mk,
+// so that the instantiate for AudioFlinger service will pick up the same options.
+
+#ifndef ANDROID_AUDIOFLINGER_CONFIGURATION_H
+#define ANDROID_AUDIOFLINGER_CONFIGURATION_H
+
+// uncomment to enable detailed battery usage reporting (not debugged)
+//#define ADD_BATTERY_DATA
+
+// uncomment to enable the audio watchdog
+//#define AUDIO_WATCHDOG
+
+// uncomment to display CPU load adjusted for CPU frequency
+//#define CPU_FREQUENCY_STATISTICS
+
+// uncomment to enable fast mixer to take performance samples for later statistical analysis
+#define FAST_MIXER_STATISTICS
+
+// uncomment to allow fast tracks at non-native sample rate
+//#define FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
+
+// uncomment for debugging timing problems related to StateQueue::push()
+//#define STATE_QUEUE_DUMP
+
+// uncomment to allow tee sink debugging to be enabled by property
+//#define TEE_SINK
+
+// uncomment to log CPU statistics every n wall clock seconds
+//#define DEBUG_CPU_USAGE 10
+
+#endif // ANDROID_AUDIOFLINGER_CONFIGURATION_H
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 942ea35..d5a21a7 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -19,6 +19,7 @@
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <utils/Log.h>
 #include <audio_effects/effect_visualizer.h>
 #include <audio_utils/primitives.h>
@@ -94,16 +95,7 @@
 {
     ALOGV("Destructor %p", this);
     if (mEffectInterface != NULL) {
-        if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
-                (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
-            sp<ThreadBase> thread = mThread.promote();
-            if (thread != 0) {
-                audio_stream_t *stream = thread->stream();
-                if (stream != NULL) {
-                    stream->remove_audio_effect(stream, mEffectInterface);
-                }
-            }
-        }
+        remove_effect_from_hal_l();
         // release effect engine
         EffectRelease(mEffectInterface);
     }
@@ -487,7 +479,7 @@
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
-    status_t cmdStatus;
+    status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
     status_t status = (*mEffectInterface)->command(mEffectInterface,
                                                    EFFECT_CMD_DISABLE,
@@ -495,12 +487,19 @@
                                                    NULL,
                                                    &size,
                                                    &cmdStatus);
-    if (status == 0) {
+    if (status == NO_ERROR) {
         status = cmdStatus;
     }
-    if (status == 0 &&
-            ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
-             (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
+    if (status == NO_ERROR) {
+        status = remove_effect_from_hal_l();
+    }
+    return status;
+}
+
+status_t AudioFlinger::EffectModule::remove_effect_from_hal_l()
+{
+    if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+             (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
             audio_stream_t *stream = thread->stream();
@@ -509,7 +508,7 @@
             }
         }
     }
-    return status;
+    return NO_ERROR;
 }
 
 status_t AudioFlinger::EffectModule::command(uint32_t cmdCode,
@@ -594,6 +593,17 @@
                 h->setEnabled(enabled);
             }
         }
+//EL_FIXME not sure why this is needed?
+//        sp<ThreadBase> thread = mThread.promote();
+//        if (thread == 0) {
+//            return NO_ERROR;
+//        }
+//
+//        if ((thread->type() == ThreadBase::OFFLOAD) && (enabled)) {
+//            PlaybackThread *p = (PlaybackThread *)thread.get();
+//            ALOGV("setEnabled: Offload, invalidate tracks");
+//            p->invalidateTracks(AUDIO_STREAM_MUSIC);
+//        }
     }
     return NO_ERROR;
 }
@@ -1217,9 +1227,7 @@
 // Must be called with EffectChain::mLock locked
 void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread)
 {
-    size_t numSamples = thread->frameCount() * thread->channelCount();
-    memset(mInBuffer, 0, numSamples * sizeof(int16_t));
-
+    memset(mInBuffer, 0, thread->frameCount() * thread->frameSize());
 }
 
 // Must be called with EffectChain::mLock locked
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 91303ee..0b7fb83 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -126,6 +126,7 @@
 
     status_t start_l();
     status_t stop_l();
+    status_t remove_effect_from_hal_l();
 
 mutable Mutex               mLock;      // mutex for process, commands and handles list protection
     wp<ThreadBase>      mThread;    // parent thread
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 21df1d7..ad9f4f2 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -25,6 +25,7 @@
 
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 
+#include "Configuration.h"
 #include <sys/atomics.h>
 #include <time.h>
 #include <utils/Log.h>
@@ -44,6 +45,8 @@
 #define MIN_WARMUP_CYCLES          2    // minimum number of loop cycles to wait for warmup
 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
 
+#define FCC_2                       2   // fixed channel count assumption
+
 namespace android {
 
 // Fast mixer thread
@@ -82,7 +85,7 @@
     struct timespec oldLoad = {0, 0};    // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
     bool oldLoadValid = false;  // whether oldLoad is valid
     uint32_t bounds = 0;
-    bool full = false;      // whether we have collected at least kSamplingN samples
+    bool full = false;      // whether we have collected at least mSamplingN samples
 #ifdef CPU_FREQUENCY_STATISTICS
     ThreadCpuUsage tcu;     // for reading the current CPU clock frequency in kHz
 #endif
@@ -142,7 +145,9 @@
                     preIdle = *current;
                     current = &preIdle;
                     oldTsValid = false;
+#ifdef FAST_MIXER_STATISTICS
                     oldLoadValid = false;
+#endif
                     ignoreNextOverrun = true;
                 }
                 previous = current;
@@ -182,8 +187,10 @@
                 warmupCycles = 0;
                 sleepNs = -1;
                 coldGen = current->mColdGen;
+#ifdef FAST_MIXER_STATISTICS
                 bounds = 0;
                 full = false;
+#endif
                 oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs);
             } else {
                 sleepNs = FAST_HOT_IDLE_NS;
@@ -220,7 +227,7 @@
                 } else {
                     format = outputSink->format();
                     sampleRate = Format_sampleRate(format);
-                    ALOG_ASSERT(Format_channelCount(format) == 2);
+                    ALOG_ASSERT(Format_channelCount(format) == FCC_2);
                 }
                 dumpState->mSampleRate = sampleRate;
             }
@@ -236,7 +243,7 @@
                     //       implementation; it would be better to have normal mixer allocate for us
                     //       to avoid blocking here and to prevent possible priority inversion
                     mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
-                    mixBuffer = new short[frameCount * 2];
+                    mixBuffer = new short[frameCount * FCC_2];
                     periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
                     underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
                     overrunNs = (frameCount * 500000000LL) / sampleRate;    // 0.50
@@ -433,7 +440,7 @@
         //bool didFullWrite = false;    // dumpsys could display a count of partial writes
         if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) {
             if (mixBufferState == UNDEFINED) {
-                memset(mixBuffer, 0, frameCount * 2 * sizeof(short));
+                memset(mixBuffer, 0, frameCount * FCC_2 * sizeof(short));
                 mixBufferState = ZEROED;
             }
             if (teeSink != NULL) {
@@ -498,91 +505,91 @@
                     }
                 }
                 sleepNs = -1;
-              if (isWarm) {
-                if (sec > 0 || nsec > underrunNs) {
-                    ATRACE_NAME("underrun");
-                    // FIXME only log occasionally
-                    ALOGV("underrun: time since last cycle %d.%03ld sec",
-                            (int) sec, nsec / 1000000L);
-                    dumpState->mUnderruns++;
-                    ignoreNextOverrun = true;
-                } else if (nsec < overrunNs) {
-                    if (ignoreNextOverrun) {
-                        ignoreNextOverrun = false;
-                    } else {
+                if (isWarm) {
+                    if (sec > 0 || nsec > underrunNs) {
+                        ATRACE_NAME("underrun");
                         // FIXME only log occasionally
-                        ALOGV("overrun: time since last cycle %d.%03ld sec",
+                        ALOGV("underrun: time since last cycle %d.%03ld sec",
                                 (int) sec, nsec / 1000000L);
-                        dumpState->mOverruns++;
-                    }
-                    // This forces a minimum cycle time. It:
-                    //   - compensates for an audio HAL with jitter due to sample rate conversion
-                    //   - works with a variable buffer depth audio HAL that never pulls at a rate
-                    //     < than overrunNs per buffer.
-                    //   - recovers from overrun immediately after underrun
-                    // It doesn't work with a non-blocking audio HAL.
-                    sleepNs = forceNs - nsec;
-                } else {
-                    ignoreNextOverrun = false;
-                }
-              }
-#ifdef FAST_MIXER_STATISTICS
-              if (isWarm) {
-                // advance the FIFO queue bounds
-                size_t i = bounds & (FastMixerDumpState::kSamplingN - 1);
-                bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
-                if (full) {
-                    bounds += 0x10000;
-                } else if (!(bounds & (FastMixerDumpState::kSamplingN - 1))) {
-                    full = true;
-                }
-                // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
-                uint32_t monotonicNs = nsec;
-                if (sec > 0 && sec < 4) {
-                    monotonicNs += sec * 1000000000;
-                }
-                // compute the raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
-                uint32_t loadNs = 0;
-                struct timespec newLoad;
-                rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
-                if (rc == 0) {
-                    if (oldLoadValid) {
-                        sec = newLoad.tv_sec - oldLoad.tv_sec;
-                        nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
-                        if (nsec < 0) {
-                            --sec;
-                            nsec += 1000000000;
+                        dumpState->mUnderruns++;
+                        ignoreNextOverrun = true;
+                    } else if (nsec < overrunNs) {
+                        if (ignoreNextOverrun) {
+                            ignoreNextOverrun = false;
+                        } else {
+                            // FIXME only log occasionally
+                            ALOGV("overrun: time since last cycle %d.%03ld sec",
+                                    (int) sec, nsec / 1000000L);
+                            dumpState->mOverruns++;
                         }
-                        loadNs = nsec;
-                        if (sec > 0 && sec < 4) {
-                            loadNs += sec * 1000000000;
-                        }
+                        // This forces a minimum cycle time. It:
+                        //  - compensates for an audio HAL with jitter due to sample rate conversion
+                        //  - works with a variable buffer depth audio HAL that never pulls at a
+                        //    rate < than overrunNs per buffer.
+                        //  - recovers from overrun immediately after underrun
+                        // It doesn't work with a non-blocking audio HAL.
+                        sleepNs = forceNs - nsec;
                     } else {
-                        // first time through the loop
-                        oldLoadValid = true;
+                        ignoreNextOverrun = false;
                     }
-                    oldLoad = newLoad;
                 }
+#ifdef FAST_MIXER_STATISTICS
+                if (isWarm) {
+                    // advance the FIFO queue bounds
+                    size_t i = bounds & (dumpState->mSamplingN - 1);
+                    bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
+                    if (full) {
+                        bounds += 0x10000;
+                    } else if (!(bounds & (dumpState->mSamplingN - 1))) {
+                        full = true;
+                    }
+                    // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
+                    uint32_t monotonicNs = nsec;
+                    if (sec > 0 && sec < 4) {
+                        monotonicNs += sec * 1000000000;
+                    }
+                    // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
+                    uint32_t loadNs = 0;
+                    struct timespec newLoad;
+                    rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
+                    if (rc == 0) {
+                        if (oldLoadValid) {
+                            sec = newLoad.tv_sec - oldLoad.tv_sec;
+                            nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
+                            if (nsec < 0) {
+                                --sec;
+                                nsec += 1000000000;
+                            }
+                            loadNs = nsec;
+                            if (sec > 0 && sec < 4) {
+                                loadNs += sec * 1000000000;
+                            }
+                        } else {
+                            // first time through the loop
+                            oldLoadValid = true;
+                        }
+                        oldLoad = newLoad;
+                    }
 #ifdef CPU_FREQUENCY_STATISTICS
-                // get the absolute value of CPU clock frequency in kHz
-                int cpuNum = sched_getcpu();
-                uint32_t kHz = tcu.getCpukHz(cpuNum);
-                kHz = (kHz << 4) | (cpuNum & 0xF);
+                    // get the absolute value of CPU clock frequency in kHz
+                    int cpuNum = sched_getcpu();
+                    uint32_t kHz = tcu.getCpukHz(cpuNum);
+                    kHz = (kHz << 4) | (cpuNum & 0xF);
 #endif
-                // save values in FIFO queues for dumpsys
-                // these stores #1, #2, #3 are not atomic with respect to each other,
-                // or with respect to store #4 below
-                dumpState->mMonotonicNs[i] = monotonicNs;
-                dumpState->mLoadNs[i] = loadNs;
+                    // save values in FIFO queues for dumpsys
+                    // these stores #1, #2, #3 are not atomic with respect to each other,
+                    // or with respect to store #4 below
+                    dumpState->mMonotonicNs[i] = monotonicNs;
+                    dumpState->mLoadNs[i] = loadNs;
 #ifdef CPU_FREQUENCY_STATISTICS
-                dumpState->mCpukHz[i] = kHz;
+                    dumpState->mCpukHz[i] = kHz;
 #endif
-                // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
-                // the newest open and oldest closed halves are atomic with respect to each other
-                dumpState->mBounds = bounds;
-                ATRACE_INT("cycle_ms", monotonicNs / 1000000);
-                ATRACE_INT("load_us", loadNs / 1000);
-              }
+                    // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
+                    // the newest open & oldest closed halves are atomic with respect to each other
+                    dumpState->mBounds = bounds;
+                    ATRACE_INT("cycle_ms", monotonicNs / 1000000);
+                    ATRACE_INT("load_us", loadNs / 1000);
+                }
 #endif
             } else {
                 // first time through the loop
@@ -603,26 +610,44 @@
     // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
 }
 
-FastMixerDumpState::FastMixerDumpState() :
+FastMixerDumpState::FastMixerDumpState(
+#ifdef FAST_MIXER_STATISTICS
+        uint32_t samplingN
+#endif
+        ) :
     mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0),
     mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0),
     mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0),
     mTrackMask(0)
 #ifdef FAST_MIXER_STATISTICS
-    , mBounds(0)
+    , mSamplingN(0), mBounds(0)
 #endif
 {
     mMeasuredWarmupTs.tv_sec = 0;
     mMeasuredWarmupTs.tv_nsec = 0;
-    // sample arrays aren't accessed atomically with respect to the bounds,
-    // so clearing reduces chance for dumpsys to read random uninitialized samples
-    memset(&mMonotonicNs, 0, sizeof(mMonotonicNs));
-    memset(&mLoadNs, 0, sizeof(mLoadNs));
-#ifdef CPU_FREQUENCY_STATISTICS
-    memset(&mCpukHz, 0, sizeof(mCpukHz));
+#ifdef FAST_MIXER_STATISTICS
+    increaseSamplingN(samplingN);
 #endif
 }
 
+#ifdef FAST_MIXER_STATISTICS
+void FastMixerDumpState::increaseSamplingN(uint32_t samplingN)
+{
+    if (samplingN <= mSamplingN || samplingN > kSamplingN || roundup(samplingN) != samplingN) {
+        return;
+    }
+    uint32_t additional = samplingN - mSamplingN;
+    // sample arrays aren't accessed atomically with respect to the bounds,
+    // so clearing reduces chance for dumpsys to read random uninitialized samples
+    memset(&mMonotonicNs[mSamplingN], 0, sizeof(mMonotonicNs[0]) * additional);
+    memset(&mLoadNs[mSamplingN], 0, sizeof(mLoadNs[0]) * additional);
+#ifdef CPU_FREQUENCY_STATISTICS
+    memset(&mCpukHz[mSamplingN], 0, sizeof(mCpukHz[0]) * additional);
+#endif
+    mSamplingN = samplingN;
+}
+#endif
+
 FastMixerDumpState::~FastMixerDumpState()
 {
 }
@@ -641,7 +666,7 @@
     }
 }
 
-void FastMixerDumpState::dump(int fd)
+void FastMixerDumpState::dump(int fd) const
 {
     if (mCommand == FastMixerState::INITIAL) {
         fdprintf(fd, "FastMixer not initialized\n");
@@ -692,9 +717,9 @@
     uint32_t newestOpen = bounds & 0xFFFF;
     uint32_t oldestClosed = bounds >> 16;
     uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
-    if (n > kSamplingN) {
+    if (n > mSamplingN) {
         ALOGE("too many samples %u", n);
-        n = kSamplingN;
+        n = mSamplingN;
     }
     // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
     // and adjusted CPU load in MHz normalized for CPU clock frequency
@@ -710,7 +735,7 @@
     uint32_t *tail = n >= kTailDenominator ? new uint32_t[n] : NULL;
     // loop over all the samples
     for (uint32_t j = 0; j < n; ++j) {
-        size_t i = oldestClosed++ & (kSamplingN - 1);
+        size_t i = oldestClosed++ & (mSamplingN - 1);
         uint32_t wallNs = mMonotonicNs[i];
         if (tail != NULL) {
             tail[j] = wallNs;
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 2ab1d04..6158925 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -85,10 +85,14 @@
 // Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
 // It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer.
 struct FastMixerDumpState {
-    FastMixerDumpState();
+    FastMixerDumpState(
+#ifdef FAST_MIXER_STATISTICS
+            uint32_t samplingN = kSamplingNforLowRamDevice
+#endif
+            );
     /*virtual*/ ~FastMixerDumpState();
 
-    void dump(int fd);          // should only be called on a stable copy, not the original
+    void dump(int fd) const;    // should only be called on a stable copy, not the original
 
     FastMixerState::Command mCommand;   // current command
     uint32_t mWriteSequence;    // incremented before and after each write()
@@ -106,8 +110,15 @@
 
 #ifdef FAST_MIXER_STATISTICS
     // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency.
-    // kSamplingN is the size of the sampling frame, and must be a power of 2 <= 0x8000.
+    // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000.
+    // The sample arrays are virtually allocated based on this compile-time constant,
+    // but are only initialized and used based on the runtime parameter mSamplingN.
     static const uint32_t kSamplingN = 0x8000;
+    // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN.
+    // This value was chosen such that each array uses 1 small page (4 Kbytes).
+    static const uint32_t kSamplingNforLowRamDevice = 0x400;
+    // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN.
+    uint32_t mSamplingN;
     // The bounds define the interval of valid samples, and are represented as follows:
     //      newest open (excluded) endpoint   = lower 16 bits of bounds, modulo N
     //      oldest closed (included) endpoint = upper 16 bits of bounds, modulo N
@@ -119,6 +130,8 @@
 #ifdef CPU_FREQUENCY_STATISTICS
     uint32_t mCpukHz[kSamplingN];       // absolute CPU clock frequency in kHz, bits 0-3 are CPU#
 #endif
+    // Increase sampling window after construction, must be a power of 2 <= kSamplingN
+    void    increaseSamplingN(uint32_t samplingN);
 #endif
 };
 
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index c45c81b..737de97 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "Configuration.h"
 #include "FastMixerState.h"
 
 namespace android {
diff --git a/services/audioflinger/ISchedulingPolicyService.cpp b/services/audioflinger/ISchedulingPolicyService.cpp
index 0079968..f55bc02 100644
--- a/services/audioflinger/ISchedulingPolicyService.cpp
+++ b/services/audioflinger/ISchedulingPolicyService.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "SchedulingPolicyService"
+#define LOG_TAG "ISchedulingPolicyService"
 //#define LOG_NDEBUG 0
 
 #include <binder/Parcel.h>
@@ -45,9 +45,17 @@
         data.writeInt32(tid);
         data.writeInt32(prio);
         uint32_t flags = asynchronous ? IBinder::FLAG_ONEWAY : 0;
-        remote()->transact(REQUEST_PRIORITY_TRANSACTION, data, &reply, flags);
-        // fail on exception
-        if (reply.readExceptionCode() != 0) return -1;
+        status_t status = remote()->transact(REQUEST_PRIORITY_TRANSACTION, data, &reply, flags);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        if (asynchronous) {
+            return NO_ERROR;
+        }
+        // fail on exception: force binder reconnection
+        if (reply.readExceptionCode() != 0) {
+            return DEAD_OBJECT;
+        }
         return reply.readInt32();
     }
 };
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index b1286d3..8b7433c 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -51,6 +51,8 @@
             audio_stream_type_t streamType() const {
                 return mStreamType;
             }
+            bool        isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; }
+            status_t    setParameters(const String8& keyValuePairs);
             status_t    attachAuxEffect(int EffectId);
             void        setAuxBuffer(int EffectId, int32_t *buffer);
             int32_t     *auxBuffer() const { return mAuxBuffer; }
@@ -68,6 +70,7 @@
     friend class PlaybackThread;
     friend class MixerThread;
     friend class DirectOutputThread;
+    friend class OffloadThread;
 
                         Track(const Track&);
                         Track& operator = (const Track&);
@@ -142,6 +145,7 @@
                                         // barrier, but is read/written atomically
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
     AudioTrackServerProxy*  mAudioTrackServerProxy;
+    bool                mResumeToStopping; // track was paused in stopping state.
 };  // end of Track
 
 class TimedTrack : public Track {
diff --git a/services/audioflinger/SchedulingPolicyService.cpp b/services/audioflinger/SchedulingPolicyService.cpp
index 36e62e9..70a3f1a 100644
--- a/services/audioflinger/SchedulingPolicyService.cpp
+++ b/services/audioflinger/SchedulingPolicyService.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "SchedulingPolicyService"
+//#define LOG_NDEBUG 0
+
 #include <binder/IServiceManager.h>
 #include <utils/Mutex.h>
 #include "ISchedulingPolicyService.h"
@@ -28,25 +31,32 @@
 int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool asynchronous)
 {
     // FIXME merge duplicated code related to service lookup, caching, and error recovery
-    sp<ISchedulingPolicyService> sps;
+    int ret;
     for (;;) {
         sMutex.lock();
-        sps = sSchedulingPolicyService;
+        sp<ISchedulingPolicyService> sps = sSchedulingPolicyService;
         sMutex.unlock();
-        if (sps != 0) {
-            break;
-        }
-        sp<IBinder> binder = defaultServiceManager()->checkService(_scheduling_policy);
-        if (binder != 0) {
+        if (sps == 0) {
+            sp<IBinder> binder = defaultServiceManager()->checkService(_scheduling_policy);
+            if (binder == 0) {
+                sleep(1);
+                continue;
+            }
             sps = interface_cast<ISchedulingPolicyService>(binder);
             sMutex.lock();
             sSchedulingPolicyService = sps;
             sMutex.unlock();
+        }
+        ret = sps->requestPriority(pid, tid, prio, asynchronous);
+        if (ret != DEAD_OBJECT) {
             break;
         }
-        sleep(1);
+        ALOGW("SchedulingPolicyService died");
+        sMutex.lock();
+        sSchedulingPolicyService.clear();
+        sMutex.unlock();
     }
-    return sps->requestPriority(pid, tid, prio, asynchronous);
+    return ret;
 }
 
 }   // namespace android
diff --git a/services/audioflinger/StateQueue.cpp b/services/audioflinger/StateQueue.cpp
index 3e891a5..c2d3bbd 100644
--- a/services/audioflinger/StateQueue.cpp
+++ b/services/audioflinger/StateQueue.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "StateQueue"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <time.h>
 #include <cutils/atomic.h>
 #include <utils/Log.h>
diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h
index e33b3c6..9cde642 100644
--- a/services/audioflinger/StateQueue.h
+++ b/services/audioflinger/StateQueue.h
@@ -31,8 +31,14 @@
 //        and this may result in an audible artifact
 //      needs read-only access to a recent stable state,
 //        but not necessarily the most current one
+//      only allocate and free memory when configuration changes
+//      avoid conventional logging, as this is a form of I/O and could block
+//      defer computation to other threads when feasible; for example
+//        cycle times are collected by fast mixer thread but the floating-point
+//        statistical calculations on these cycle times are computed by normal mixer
+//      these requirements also apply to callouts such as AudioBufferProvider and VolumeProvider
 //  Normal mixer thread:
-//      periodic with typical period ~40 ms
+//      periodic with typical period ~20 ms
 //      SCHED_OTHER scheduling policy and nice priority == urgent audio
 //      ok to block, but prefer to avoid as much as possible
 //      needs read/write access to state
diff --git a/services/audioflinger/StateQueueInstantiations.cpp b/services/audioflinger/StateQueueInstantiations.cpp
index 077582f..0d5cd0c 100644
--- a/services/audioflinger/StateQueueInstantiations.cpp
+++ b/services/audioflinger/StateQueueInstantiations.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "Configuration.h"
 #include "FastMixerState.h"
 #include "StateQueue.h"
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3b5727b..62e2e1e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -20,11 +20,12 @@
 //#define LOG_NDEBUG 0
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 
+#include "Configuration.h"
 #include <math.h>
 #include <fcntl.h>
 #include <sys/stat.h>
 #include <cutils/properties.h>
-#include <cutils/compiler.h>
+#include <media/AudioParameter.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
@@ -53,14 +54,11 @@
 #include "ServiceUtilities.h"
 #include "SchedulingPolicyService.h"
 
-#undef ADD_BATTERY_DATA
-
 #ifdef ADD_BATTERY_DATA
 #include <media/IMediaPlayerService.h>
 #include <media/IMediaDeathNotifier.h>
 #endif
 
-// #define DEBUG_CPU_USAGE 10  // log statistics every n wall clock seconds
 #ifdef DEBUG_CPU_USAGE
 #include <cpustats/CentralTendencyStatistics.h>
 #include <cpustats/ThreadCpuUsage.h>
@@ -267,10 +265,9 @@
         audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
     :   Thread(false /*canCallJava*/),
         mType(type),
-        mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mNormalFrameCount(0),
-        // mChannelMask
-        mChannelCount(0),
-        mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID),
+        mAudioFlinger(audioFlinger),
+        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are
+        // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
         mParamStatus(NO_ERROR),
         mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
         mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
@@ -281,6 +278,12 @@
 
 AudioFlinger::ThreadBase::~ThreadBase()
 {
+    // mConfigEvents should be empty, but just in case it isn't, free the memory it owns
+    for (size_t i = 0; i < mConfigEvents.size(); i++) {
+        delete mConfigEvents[i];
+    }
+    mConfigEvents.clear();
+
     mParamCond.broadcast();
     // do not lock the mutex in destructor
     releaseWakeLock_l();
@@ -420,9 +423,7 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount);
     result.append(buffer);
-    snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Channel Count: %d\n", mChannelCount);
+    snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount);
     result.append(buffer);
     snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
     result.append(buffer);
@@ -927,13 +928,19 @@
                                              audio_devices_t device,
                                              type_t type)
     :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
-        mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+        mNormalFrameCount(0), mMixBuffer(NULL),
+        mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
         mMixerStatus(MIXER_IDLE),
         mMixerStatusIgnoringFastTracks(MIXER_IDLE),
         standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
+        mBytesRemaining(0),
+        mCurrentWriteLength(0),
+        mUseAsyncWrite(false),
+        mWriteBlocked(false),
+        mDraining(false),
         mScreenState(AudioFlinger::mScreenState),
         // index 0 is reserved for normal mixer's submix
         mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1)
@@ -976,7 +983,7 @@
 AudioFlinger::PlaybackThread::~PlaybackThread()
 {
     mAudioFlinger->unregisterWriter(mNBLogWriter);
-    delete [] mMixBuffer;
+    delete [] mAllocMixBuffer;
 }
 
 void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
@@ -1044,6 +1051,8 @@
 
     snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this);
     result.append(buffer);
+    snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount);
+    result.append(buffer);
     snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n",
             ns2ms(systemTime() - mLastWriteTime));
     result.append(buffer);
@@ -1182,7 +1191,22 @@
                 goto Exit;
             }
         }
+    } else if (mType == OFFLOAD) {
+        if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
+            ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
+                    "for output %p with format %d",
+                    sampleRate, format, channelMask, mOutput, mFormat);
+            lStatus = BAD_VALUE;
+            goto Exit;
+        }
     } else {
+        if ((format & AUDIO_FORMAT_MAIN_MASK) != AUDIO_FORMAT_PCM) {
+                ALOGE("createTrack_l() Bad parameter: format %d \""
+                        "for output %p with format %d",
+                        format, mOutput, mFormat);
+                lStatus = BAD_VALUE;
+                goto Exit;
+        }
         // Resampler implementation limits input sampling rate to 2 x output sampling rate.
         if (sampleRate > mSampleRate*2) {
             ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
@@ -1228,6 +1252,7 @@
             lStatus = NO_MEMORY;
             goto Exit;
         }
+
         mTracks.add(track);
 
         sp<EffectChain> chain = getEffectChain_l(sessionId);
@@ -1302,12 +1327,14 @@
 {
     Mutex::Autolock _l(mLock);
     mStreamTypes[stream].volume = value;
+    signal_l();
 }
 
 void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
 {
     Mutex::Autolock _l(mLock);
     mStreamTypes[stream].mute = muted;
+    signal_l();
 }
 
 float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
@@ -1327,6 +1354,30 @@
         // the track is newly added, make sure it fills up all its
         // buffers before playing. This is to ensure the client will
         // effectively get the latency it requested.
+        if (!track->isOutputTrack()) {
+            TrackBase::track_state state = track->mState;
+            mLock.unlock();
+            status = AudioSystem::startOutput(mId, track->streamType(), track->sessionId());
+            mLock.lock();
+            // abort track was stopped/paused while we released the lock
+            if (state != track->mState) {
+                if (status == NO_ERROR) {
+                    mLock.unlock();
+                    AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
+                    mLock.lock();
+                }
+                return INVALID_OPERATION;
+            }
+            // abort if start is rejected by audio policy manager
+            if (status != NO_ERROR) {
+                return PERMISSION_DENIED;
+            }
+#ifdef ADD_BATTERY_DATA
+            // to track the speaker usage
+            addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
+#endif
+        }
+
         track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
@@ -1347,14 +1398,19 @@
     return status;
 }
 
-// destroyTrack_l() must be called with ThreadBase::mLock held
-void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
+bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
 {
-    track->mState = TrackBase::TERMINATED;
+    track->terminate();
     // active tracks are removed by threadLoop()
-    if (mActiveTracks.indexOf(track) < 0) {
+    bool trackActive = (mActiveTracks.indexOf(track) >= 0);
+    track->mState = TrackBase::STOPPED;
+    if (!trackActive) {
         removeTrack_l(track);
+    } else if (track->isFastTrack() || track->isOffloaded()) {
+        track->mState = TrackBase::STOPPING_1;
     }
+
+    return trackActive;
 }
 
 void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
@@ -1378,18 +1434,25 @@
     }
 }
 
+void AudioFlinger::PlaybackThread::signal_l()
+{
+    // Thread could be blocked waiting for async
+    // so signal it to handle state changes immediately
+    // If threadLoop is currently unlocked a signal of mWaitWorkCV will
+    // be lost so we also flag to prevent it blocking on mWaitWorkCV
+    mSignalPending = true;
+    mWaitWorkCV.signal();
+}
+
 String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
 {
-    String8 out_s8 = String8("");
-    char *s;
-
     Mutex::Autolock _l(mLock);
     if (initCheck() != NO_ERROR) {
-        return out_s8;
+        return String8();
     }
 
-    s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
-    out_s8 = String8(s);
+    char *s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
+    const String8 out_s8(s);
     free(s);
     return out_s8;
 }
@@ -1405,7 +1468,7 @@
     switch (event) {
     case AudioSystem::OUTPUT_OPENED:
     case AudioSystem::OUTPUT_CONFIG_CHANGED:
-        desc.channels = mChannelMask;
+        desc.channelMask = mChannelMask;
         desc.samplingRate = mSampleRate;
         desc.format = mFormat;
         desc.frameCount = mNormalFrameCount; // FIXME see
@@ -1423,12 +1486,78 @@
     mAudioFlinger->audioConfigChanged_l(event, mId, param2);
 }
 
+void AudioFlinger::PlaybackThread::writeCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setWriteBlocked(false);
+}
+
+void AudioFlinger::PlaybackThread::drainCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setDraining(false);
+}
+
+void AudioFlinger::PlaybackThread::setWriteBlocked(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mWriteBlocked = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+void AudioFlinger::PlaybackThread::setDraining(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mDraining = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+// static
+int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event,
+                                                void *param,
+                                                void *cookie)
+{
+    AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie;
+    ALOGV("asyncCallback() event %d", event);
+    switch (event) {
+    case STREAM_CBK_EVENT_WRITE_READY:
+        me->writeCallback();
+        break;
+    case STREAM_CBK_EVENT_DRAIN_READY:
+        me->drainCallback();
+        break;
+    default:
+        ALOGW("asyncCallback() unknown event %d", event);
+        break;
+    }
+    return 0;
+}
+
 void AudioFlinger::PlaybackThread::readOutputParameters()
 {
+    // unfortunately we have no way of recovering from errors here, hence the LOG_FATAL
     mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
     mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
-    mChannelCount = (uint16_t)popcount(mChannelMask);
+    if (!audio_is_output_channel(mChannelMask)) {
+        LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
+    }
+    if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
+        LOG_FATAL("HAL channel mask %#x not supported for mixed output; "
+                "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
+    }
+    mChannelCount = popcount(mChannelMask);
     mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
+    if (!audio_is_valid_format(mFormat)) {
+        LOG_FATAL("HAL format %d not valid for output", mFormat);
+    }
+    if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) {
+        LOG_FATAL("HAL format %d not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT",
+                mFormat);
+    }
     mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
     mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
     if (mFrameCount & 15) {
@@ -1436,6 +1565,14 @@
                 mFrameCount);
     }
 
+    if ((mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) &&
+            (mOutput->stream->set_callback != NULL)) {
+        if (mOutput->stream->set_callback(mOutput->stream,
+                                      AudioFlinger::PlaybackThread::asyncCallback, this) == 0) {
+            mUseAsyncWrite = true;
+        }
+    }
+
     // Calculate size of normal mix buffer relative to the HAL output buffer size
     double multiplier = 1.0;
     if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
@@ -1478,9 +1615,11 @@
     ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
-    delete[] mMixBuffer;
-    mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount];
-    memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+    delete[] mAllocMixBuffer;
+    size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize;
+    mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1];
+    mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align);
+    memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize);
 
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
@@ -1614,16 +1753,21 @@
         const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (CC_UNLIKELY(count)) {
+    if (count) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
-            if ((track->sharedBuffer() != 0) &&
-                    (track->mState == TrackBase::ACTIVE || track->mState == TrackBase::RESUMING)) {
+            if (!track->isOutputTrack()) {
                 AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
+#ifdef ADD_BATTERY_DATA
+                // to track the speaker usage
+                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
+                if (track->isTerminated()) {
+                    AudioSystem::releaseOutput(mId);
+                }
             }
         }
     }
-
 }
 
 void AudioFlinger::PlaybackThread::checkSilentMode_l()
@@ -1644,17 +1788,18 @@
 }
 
 // shared by MIXER and DIRECT, overridden by DUPLICATING
-void AudioFlinger::PlaybackThread::threadLoop_write()
+ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
 {
     // FIXME rewrite to reduce number of system calls
     mLastWriteTime = systemTime();
     mInWrite = true;
-    int bytesWritten;
+    ssize_t bytesWritten;
 
     // If an NBAIO sink is present, use it to write the normal mixer's submix
     if (mNormalSink != 0) {
 #define mBitShift 2 // FIXME
-        size_t count = mixBufferSize >> mBitShift;
+        size_t count = mBytesRemaining >> mBitShift;
+        size_t offset = (mCurrentWriteLength - mBytesRemaining) >> 1;
         ATRACE_BEGIN("write");
         // update the setpoint when AudioFlinger::mScreenState changes
         uint32_t screenState = AudioFlinger::mScreenState;
@@ -1666,7 +1811,7 @@
                         (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
             }
         }
-        ssize_t framesWritten = mNormalSink->write(mMixBuffer, count);
+        ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count);
         ATRACE_END();
         if (framesWritten > 0) {
             bytesWritten = framesWritten << mBitShift;
@@ -1675,15 +1820,48 @@
         }
     // otherwise use the HAL / AudioStreamOut directly
     } else {
-        // Direct output thread.
-        bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
+        // Direct output and offload threads
+        size_t offset = (mCurrentWriteLength - mBytesRemaining) / sizeof(int16_t);
+        if (mUseAsyncWrite) {
+            mWriteBlocked = true;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setWriteBlocked(true);
+        }
+        bytesWritten = mOutput->stream->write(mOutput->stream,
+                                                   mMixBuffer + offset, mBytesRemaining);
+        if (mUseAsyncWrite &&
+                ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
+            // do not wait for async callback in case of error of full write
+            mWriteBlocked = false;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setWriteBlocked(false);
+        }
     }
 
-    if (bytesWritten > 0) {
-        mBytesWritten += mixBufferSize;
-    }
     mNumWrites++;
     mInWrite = false;
+
+    return bytesWritten;
+}
+
+void AudioFlinger::PlaybackThread::threadLoop_drain()
+{
+    if (mOutput->stream->drain) {
+        ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
+        if (mUseAsyncWrite) {
+            mDraining = true;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setDraining(true);
+        }
+        mOutput->stream->drain(mOutput->stream,
+            (mMixerStatus == MIXER_DRAIN_TRACK) ? AUDIO_DRAIN_EARLY_NOTIFY
+                                                : AUDIO_DRAIN_ALL);
+    }
+}
+
+void AudioFlinger::PlaybackThread::threadLoop_exit()
+{
+    // Default implementation has nothing to do
 }
 
 /*
@@ -1924,10 +2102,29 @@
 
             saveOutputTracks();
 
-            // put audio hardware into standby after short delay
-            if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
-                        isSuspended())) {
-                if (!mStandby) {
+            if (mSignalPending) {
+                // A signal was raised while we were unlocked
+                mSignalPending = false;
+            } else if (waitingAsyncCallback_l()) {
+                if (exitPending()) {
+                    break;
+                }
+                releaseWakeLock_l();
+                ALOGV("wait async completion");
+                mWaitWorkCV.wait(mLock);
+                ALOGV("async completion/wake");
+                acquireWakeLock_l();
+                if (exitPending()) {
+                    break;
+                }
+                if (!mActiveTracks.size() && (systemTime() > standbyTime)) {
+                    continue;
+                }
+                sleepTime = 0;
+            } else if ((!mActiveTracks.size() && systemTime() > standbyTime) ||
+                                   isSuspended()) {
+                // put audio hardware into standby after short delay
+                if (shouldStandby_l()) {
 
                     threadLoop_standby();
 
@@ -1954,7 +2151,7 @@
                     mMixerStatus = MIXER_IDLE;
                     mMixerStatusIgnoringFastTracks = MIXER_IDLE;
                     mBytesWritten = 0;
-
+                    mBytesRemaining = 0;
                     checkSilentMode_l();
 
                     standbyTime = systemTime() + standbyDelay;
@@ -1976,50 +2173,73 @@
             lockEffectChains_l(effectChains);
         }
 
-        if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) {
-            threadLoop_mix();
-        } else {
-            threadLoop_sleepTime();
-        }
+        if (mBytesRemaining == 0) {
+            mCurrentWriteLength = 0;
+            if (mMixerStatus == MIXER_TRACKS_READY) {
+                // threadLoop_mix() sets mCurrentWriteLength
+                threadLoop_mix();
+            } else if ((mMixerStatus != MIXER_DRAIN_TRACK)
+                        && (mMixerStatus != MIXER_DRAIN_ALL)) {
+                // threadLoop_sleepTime sets sleepTime to 0 if data
+                // must be written to HAL
+                threadLoop_sleepTime();
+                if (sleepTime == 0) {
+                    mCurrentWriteLength = mixBufferSize;
+                }
+            }
+            mBytesRemaining = mCurrentWriteLength;
+            if (isSuspended()) {
+                sleepTime = suspendSleepTimeUs();
+                // simulate write to HAL when suspended
+                mBytesWritten += mixBufferSize;
+                mBytesRemaining = 0;
+            }
 
-        if (isSuspended()) {
-            sleepTime = suspendSleepTimeUs();
-            mBytesWritten += mixBufferSize;
-        }
-
-        // only process effects if we're going to write
-        if (sleepTime == 0) {
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
+            // only process effects if we're going to write
+            if (sleepTime == 0) {
+                for (size_t i = 0; i < effectChains.size(); i ++) {
+                    effectChains[i]->process_l();
+                }
             }
         }
 
         // enable changes in effect chain
         unlockEffectChains(effectChains);
 
-        // sleepTime == 0 means we must write to audio hardware
-        if (sleepTime == 0) {
-
-            threadLoop_write();
-
-if (mType == MIXER) {
-            // write blocked detection
-            nsecs_t now = systemTime();
-            nsecs_t delta = now - mLastWriteTime;
-            if (!mStandby && delta > maxPeriod) {
-                mNumDelayedWrites++;
-                if ((now - lastWarning) > kWarningThrottleNs) {
-                    ATRACE_NAME("underrun");
-                    ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
-                            ns2ms(delta), mNumDelayedWrites, this);
-                    lastWarning = now;
+        if (!waitingAsyncCallback()) {
+            // sleepTime == 0 means we must write to audio hardware
+            if (sleepTime == 0) {
+                if (mBytesRemaining) {
+                    ssize_t ret = threadLoop_write();
+                    if (ret < 0) {
+                        mBytesRemaining = 0;
+                    } else {
+                        mBytesWritten += ret;
+                        mBytesRemaining -= ret;
+                    }
+                } else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
+                        (mMixerStatus == MIXER_DRAIN_ALL)) {
+                    threadLoop_drain();
                 }
-            }
+if (mType == MIXER) {
+                // write blocked detection
+                nsecs_t now = systemTime();
+                nsecs_t delta = now - mLastWriteTime;
+                if (!mStandby && delta > maxPeriod) {
+                    mNumDelayedWrites++;
+                    if ((now - lastWarning) > kWarningThrottleNs) {
+                        ATRACE_NAME("underrun");
+                        ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
+                                ns2ms(delta), mNumDelayedWrites, this);
+                        lastWarning = now;
+                    }
+                }
 }
 
-            mStandby = false;
-        } else {
-            usleep(sleepTime);
+                mStandby = false;
+            } else {
+                usleep(sleepTime);
+            }
         }
 
         // Finally let go of removed track(s), without the lock held
@@ -2041,8 +2261,10 @@
         // is now local to this block, but will keep it for now (at least until merge done).
     }
 
+    threadLoop_exit();
+
     // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
-    if (mType == MIXER || mType == DIRECT) {
+    if (mType == MIXER || mType == DIRECT || mType == OFFLOAD) {
         // put output stream into standby mode
         if (!mStandby) {
             mOutput->stream->common.standby(&mOutput->stream->common);
@@ -2055,6 +2277,28 @@
     return false;
 }
 
+// removeTracks_l() must be called with ThreadBase::mLock held
+void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
+{
+    size_t count = tracksToRemove.size();
+    if (count) {
+        for (size_t i=0 ; i<count ; i++) {
+            const sp<Track>& track = tracksToRemove.itemAt(i);
+            mActiveTracks.remove(track);
+            ALOGV("removeTracks_l removing track on session %d", track->sessionId());
+            sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+            if (chain != 0) {
+                ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
+                        track->sessionId());
+                chain->decActiveTrackCnt();
+            }
+            if (track->isTerminated()) {
+                removeTrack_l(track);
+            }
+        }
+    }
+
+}
 
 // ----------------------------------------------------------------------------
 
@@ -2069,7 +2313,7 @@
         // mNormalSink below
 {
     ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
-    ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%d, mFormat=%d, mFrameSize=%u, "
+    ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%u, "
             "mFrameCount=%d, mNormalFrameCount=%d",
             mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
             mNormalFrameCount);
@@ -2259,7 +2503,7 @@
     PlaybackThread::threadLoop_removeTracks(tracksToRemove);
 }
 
-void AudioFlinger::MixerThread::threadLoop_write()
+ssize_t AudioFlinger::MixerThread::threadLoop_write()
 {
     // FIXME we should only do one push per cycle; confirm this is true
     // Start the fast mixer if it's not already running
@@ -2280,6 +2524,8 @@
 #endif
             }
             state->mCommand = FastMixerState::MIX_WRITE;
+            mFastMixerDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
+                    FastMixerDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN);
             sq->end();
             sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
             if (kUseFastMixer == FastMixer_Dynamic) {
@@ -2289,7 +2535,7 @@
             sq->end(false /*didModify*/);
         }
     }
-    PlaybackThread::threadLoop_write();
+    return PlaybackThread::threadLoop_write();
 }
 
 void AudioFlinger::MixerThread::threadLoop_standby()
@@ -2321,11 +2567,40 @@
     PlaybackThread::threadLoop_standby();
 }
 
+// Empty implementation for standard mixer
+// Overridden for offloaded playback
+void AudioFlinger::PlaybackThread::flushOutput_l()
+{
+}
+
+bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l()
+{
+    return false;
+}
+
+bool AudioFlinger::PlaybackThread::shouldStandby_l()
+{
+    return !mStandby;
+}
+
+bool AudioFlinger::PlaybackThread::waitingAsyncCallback()
+{
+    Mutex::Autolock _l(mLock);
+    return waitingAsyncCallback_l();
+}
+
 // shared by MIXER and DIRECT, overridden by DUPLICATING
 void AudioFlinger::PlaybackThread::threadLoop_standby()
 {
     ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
     mOutput->stream->common.standby(&mOutput->stream->common);
+    if (mUseAsyncWrite != 0) {
+        mWriteBlocked = false;
+        mDraining = false;
+        ALOG_ASSERT(mCallbackThread != 0);
+        mCallbackThread->setWriteBlocked(false);
+        mCallbackThread->setDraining(false);
+    }
 }
 
 void AudioFlinger::MixerThread::threadLoop_mix()
@@ -2346,6 +2621,7 @@
 
     // mix buffers...
     mAudioMixer->process(pts);
+    mCurrentWriteLength = mixBufferSize;
     // increase sleep time progressively when application underrun condition clears.
     // Only increase sleep time if the mixer is ready for two consecutive times to avoid
     // that a steady state of alternating ready/not ready conditions keeps the sleep time
@@ -2473,7 +2749,7 @@
             switch (track->mState) {
             case TrackBase::STOPPING_1:
                 // track stays active in STOPPING_1 state until first underrun
-                if (recentUnderruns > 0) {
+                if (recentUnderruns > 0 || track->isTerminated()) {
                     track->mState = TrackBase::STOPPING_2;
                 }
                 break;
@@ -2515,7 +2791,6 @@
                 // fall through
             case TrackBase::STOPPING_2:
             case TrackBase::PAUSED:
-            case TrackBase::TERMINATED:
             case TrackBase::STOPPED:
             case TrackBase::FLUSHED:   // flush() while active
                 // Check for presentation complete if track is inactive
@@ -2628,8 +2903,7 @@
         if ((framesReady >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
-            ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server,
-                    this);
+            ALOGVV("track %d s=%08x [OK] on thread %p", name, cblk->server, this);
 
             mixedTracks++;
 
@@ -2703,6 +2977,7 @@
                 }
                 va = (uint32_t)(v * sendLevel);
             }
+
             // Delegate volume control to effect in track effect chain if needed
             if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
                 // Do not ramp volume if volume is controlled by effect
@@ -2794,8 +3069,7 @@
                 chain->clearInputBuffer();
             }
 
-            ALOGVV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user,
-                    cblk->server, this);
+            ALOGVV("track %d s=%08x [NOT READY] on thread %p", name, cblk->server, this);
             if ((track->sharedBuffer() != 0) || track->isTerminated() ||
                     track->isStopped() || track->isPaused()) {
                 // We have consumed all the buffers of this track.
@@ -2881,30 +3155,13 @@
     }
 
     // remove all the tracks that need to be...
-    count = tracksToRemove->size();
-    if (CC_UNLIKELY(count)) {
-        for (size_t i=0 ; i<count ; i++) {
-            const sp<Track>& track = tracksToRemove->itemAt(i);
-            mActiveTracks.remove(track);
-            if (track->mainBuffer() != mMixBuffer) {
-                chain = getEffectChain_l(track->sessionId());
-                if (chain != 0) {
-                    ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
-                            track->sessionId());
-                    chain->decActiveTrackCnt();
-                }
-            }
-            if (track->isTerminated()) {
-                removeTrack_l(track);
-            }
-        }
-    }
+    removeTracks_l(*tracksToRemove);
 
     // mix buffer must be cleared if all tracks are connected to an
     // effect chain as in this case the mixer will not write to
     // mix buffer and track effects will accumulate into it
-    if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
-            (mixedTracks == 0 && fastTracks > 0)) {
+    if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+            (mixedTracks == 0 && fastTracks > 0))) {
         // FIXME as a performance optimization, should remember previous zero status
         memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
     }
@@ -2968,7 +3225,7 @@
             }
         }
         if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
-            if (value != AUDIO_CHANNEL_OUT_STEREO) {
+            if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) {
                 status = BAD_VALUE;
             } else {
                 reconfig = true;
@@ -3029,10 +3286,8 @@
                                                        keyValuePair.string());
             }
             if (status == NO_ERROR && reconfig) {
-                delete mAudioMixer;
-                // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't)
-                mAudioMixer = NULL;
                 readOutputParameters();
+                delete mAudioMixer;
                 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
                 for (size_t i = 0; i < mTracks.size() ; i++) {
                     int name = getTrackName_l(mTracks[i]->mChannelMask, mTracks[i]->mSessionId);
@@ -3081,7 +3336,7 @@
     write(fd, result.string(), result.size());
 
     // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
-    FastMixerDumpState copy = mFastMixerDumpState;
+    const FastMixerDumpState copy(mFastMixerDumpState);
     copy.dump(fd);
 
 #ifdef STATE_QUEUE_DUMP
@@ -3136,10 +3391,63 @@
 {
 }
 
+AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
+        ThreadBase::type_t type)
+    :   PlaybackThread(audioFlinger, output, id, device, type)
+        // mLeftVolFloat, mRightVolFloat
+{
+}
+
 AudioFlinger::DirectOutputThread::~DirectOutputThread()
 {
 }
 
+void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
+{
+    audio_track_cblk_t* cblk = track->cblk();
+    float left, right;
+
+    if (mMasterMute || mStreamTypes[track->streamType()].mute) {
+        left = right = 0;
+    } else {
+        float typeVolume = mStreamTypes[track->streamType()].volume;
+        float v = mMasterVolume * typeVolume;
+        AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+        uint32_t vlr = proxy->getVolumeLR();
+        float v_clamped = v * (vlr & 0xFFFF);
+        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+        left = v_clamped/MAX_GAIN;
+        v_clamped = v * (vlr >> 16);
+        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+        right = v_clamped/MAX_GAIN;
+    }
+
+    if (lastTrack) {
+        if (left != mLeftVolFloat || right != mRightVolFloat) {
+            mLeftVolFloat = left;
+            mRightVolFloat = right;
+
+            // Convert volumes from float to 8.24
+            uint32_t vl = (uint32_t)(left * (1 << 24));
+            uint32_t vr = (uint32_t)(right * (1 << 24));
+
+            // Delegate volume control to effect in track effect chain if needed
+            // only one effect chain can be present on DirectOutputThread, so if
+            // there is one, the track is connected to it
+            if (!mEffectChains.isEmpty()) {
+                mEffectChains[0]->setVolume_l(&vl, &vr);
+                left = (float)vl / (1 << 24);
+                right = (float)vr / (1 << 24);
+            }
+            if (mOutput->stream->set_volume) {
+                mOutput->stream->set_volume(mOutput->stream, left, right);
+            }
+        }
+    }
+}
+
+
 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
     Vector< sp<Track> > *tracksToRemove
 )
@@ -3166,6 +3474,12 @@
         } else {
             minFrames = 1;
         }
+        // Only consider last track started for volume and mixer state control.
+        // This is the last entry in mActiveTracks unless a track underruns.
+        // As we only care about the transition phase between two tracks on a
+        // direct output, it is not a problem to ignore the underrun case.
+        bool last = (i == (count - 1));
+
         if ((track->framesReady() >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
@@ -3180,52 +3494,8 @@
             }
 
             // compute volume for this track
-            float left, right;
-            if (mMasterMute || track->isPausing() || mStreamTypes[track->streamType()].mute) {
-                left = right = 0;
-                if (track->isPausing()) {
-                    track->setPaused();
-                }
-            } else {
-                float typeVolume = mStreamTypes[track->streamType()].volume;
-                float v = mMasterVolume * typeVolume;
-                uint32_t vlr = track->mAudioTrackServerProxy->getVolumeLR();
-                float v_clamped = v * (vlr & 0xFFFF);
-                if (v_clamped > MAX_GAIN) {
-                    v_clamped = MAX_GAIN;
-                }
-                left = v_clamped/MAX_GAIN;
-                v_clamped = v * (vlr >> 16);
-                if (v_clamped > MAX_GAIN) {
-                    v_clamped = MAX_GAIN;
-                }
-                right = v_clamped/MAX_GAIN;
-            }
-            // Only consider last track started for volume and mixer state control.
-            // This is the last entry in mActiveTracks unless a track underruns.
-            // As we only care about the transition phase between two tracks on a
-            // direct output, it is not a problem to ignore the underrun case.
-            if (i == (count - 1)) {
-                if (left != mLeftVolFloat || right != mRightVolFloat) {
-                    mLeftVolFloat = left;
-                    mRightVolFloat = right;
-
-                    // Convert volumes from float to 8.24
-                    uint32_t vl = (uint32_t)(left * (1 << 24));
-                    uint32_t vr = (uint32_t)(right * (1 << 24));
-
-                    // Delegate volume control to effect in track effect chain if needed
-                    // only one effect chain can be present on DirectOutputThread, so if
-                    // there is one, the track is connected to it
-                    if (!mEffectChains.isEmpty()) {
-                        // Do not ramp volume if volume is controlled by effect
-                        mEffectChains[0]->setVolume_l(&vl, &vr);
-                        left = (float)vl / (1 << 24);
-                        right = (float)vr / (1 << 24);
-                    }
-                    mOutput->stream->set_volume(mOutput->stream, left, right);
-                }
-
+            processVolume_l(track, last);
+            if (last) {
                 // reset retry count
                 track->mRetryCount = kMaxTrackRetriesDirect;
                 mActiveTrack = t;
@@ -3259,7 +3529,7 @@
                 if (--(track->mRetryCount) <= 0) {
                     ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
                     tracksToRemove->add(track);
-                } else if (i == (count -1)){
+                } else if (last) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
             }
@@ -3267,35 +3537,21 @@
     }
 
     // remove all the tracks that need to be...
-    count = tracksToRemove->size();
-    if (CC_UNLIKELY(count)) {
-        for (size_t i = 0 ; i < count ; i++) {
-            const sp<Track>& track = tracksToRemove->itemAt(i);
-            mActiveTracks.remove(track);
-            if (!mEffectChains.isEmpty()) {
-                ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(),
-                      track->sessionId());
-                mEffectChains[0]->decActiveTrackCnt();
-            }
-            if (track->isTerminated()) {
-                removeTrack_l(track);
-            }
-        }
-    }
+    removeTracks_l(*tracksToRemove);
 
     return mixerStatus;
 }
 
 void AudioFlinger::DirectOutputThread::threadLoop_mix()
 {
-    AudioBufferProvider::Buffer buffer;
     size_t frameCount = mFrameCount;
     int8_t *curBuf = (int8_t *)mMixBuffer;
     // output audio to hardware
     while (frameCount) {
+        AudioBufferProvider::Buffer buffer;
         buffer.frameCount = frameCount;
         mActiveTrack->getNextBuffer(&buffer);
-        if (CC_UNLIKELY(buffer.raw == NULL)) {
+        if (buffer.raw == NULL) {
             memset(curBuf, 0, frameCount * mFrameSize);
             break;
         }
@@ -3304,10 +3560,10 @@
         curBuf += buffer.frameCount * mFrameSize;
         mActiveTrack->releaseBuffer(&buffer);
     }
+    mCurrentWriteLength = curBuf - (int8_t *)mMixBuffer;
     sleepTime = 0;
     standbyTime = systemTime() + standbyDelay;
     mActiveTrack.clear();
-
 }
 
 void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
@@ -3428,6 +3684,307 @@
 
 // ----------------------------------------------------------------------------
 
+AudioFlinger::AsyncCallbackThread::AsyncCallbackThread(
+        const sp<AudioFlinger::OffloadThread>& offloadThread)
+    :   Thread(false /*canCallJava*/),
+        mOffloadThread(offloadThread),
+        mWriteBlocked(false),
+        mDraining(false)
+{
+}
+
+AudioFlinger::AsyncCallbackThread::~AsyncCallbackThread()
+{
+}
+
+void AudioFlinger::AsyncCallbackThread::onFirstRef()
+{
+    run("Offload Cbk", ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+bool AudioFlinger::AsyncCallbackThread::threadLoop()
+{
+    while (!exitPending()) {
+        bool writeBlocked;
+        bool draining;
+
+        {
+            Mutex::Autolock _l(mLock);
+            mWaitWorkCV.wait(mLock);
+            if (exitPending()) {
+                break;
+            }
+            writeBlocked = mWriteBlocked;
+            draining = mDraining;
+            ALOGV("AsyncCallbackThread mWriteBlocked %d mDraining %d", mWriteBlocked, mDraining);
+        }
+        {
+            sp<AudioFlinger::OffloadThread> offloadThread = mOffloadThread.promote();
+            if (offloadThread != 0) {
+                if (writeBlocked == false) {
+                    offloadThread->setWriteBlocked(false);
+                }
+                if (draining == false) {
+                    offloadThread->setDraining(false);
+                }
+            }
+        }
+    }
+    return false;
+}
+
+void AudioFlinger::AsyncCallbackThread::exit()
+{
+    ALOGV("AsyncCallbackThread::exit");
+    Mutex::Autolock _l(mLock);
+    requestExit();
+    mWaitWorkCV.broadcast();
+}
+
+void AudioFlinger::AsyncCallbackThread::setWriteBlocked(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mWriteBlocked = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+void AudioFlinger::AsyncCallbackThread::setDraining(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mDraining = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+
+// ----------------------------------------------------------------------------
+AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device)
+    :   DirectOutputThread(audioFlinger, output, id, device, OFFLOAD),
+        mHwPaused(false),
+        mPausedBytesRemaining(0)
+{
+    mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
+}
+
+AudioFlinger::OffloadThread::~OffloadThread()
+{
+    mPreviousTrack.clear();
+}
+
+void AudioFlinger::OffloadThread::threadLoop_exit()
+{
+    if (mFlushPending || mHwPaused) {
+        // If a flush is pending or track was paused, just discard buffered data
+        flushHw_l();
+    } else {
+        mMixerStatus = MIXER_DRAIN_ALL;
+        threadLoop_drain();
+    }
+    mCallbackThread->exit();
+    PlaybackThread::threadLoop_exit();
+}
+
+AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTracks_l(
+    Vector< sp<Track> > *tracksToRemove
+)
+{
+    ALOGV("OffloadThread::prepareTracks_l");
+    size_t count = mActiveTracks.size();
+
+    mixer_state mixerStatus = MIXER_IDLE;
+    if (mFlushPending) {
+        flushHw_l();
+        mFlushPending = false;
+    }
+    // find out which tracks need to be processed
+    for (size_t i = 0; i < count; i++) {
+        sp<Track> t = mActiveTracks[i].promote();
+        // The track died recently
+        if (t == 0) {
+            continue;
+        }
+        Track* const track = t.get();
+        audio_track_cblk_t* cblk = track->cblk();
+        if (mPreviousTrack != NULL) {
+            if (t != mPreviousTrack) {
+                // Flush any data still being written from last track
+                mBytesRemaining = 0;
+                if (mPausedBytesRemaining) {
+                    // Last track was paused so we also need to flush saved
+                    // mixbuffer state and invalidate track so that it will
+                    // re-submit that unwritten data when it is next resumed
+                    mPausedBytesRemaining = 0;
+                    // Invalidate is a bit drastic - would be more efficient
+                    // to have a flag to tell client that some of the
+                    // previously written data was lost
+                    mPreviousTrack->invalidate();
+                }
+            }
+        }
+        mPreviousTrack = t;
+        bool last = (i == (count - 1));
+        if (track->isPausing()) {
+            track->setPaused();
+            if (last) {
+                if (!mHwPaused) {
+                    mOutput->stream->pause(mOutput->stream);
+                    mHwPaused = true;
+                }
+                // If we were part way through writing the mixbuffer to
+                // the HAL we must save this until we resume
+                // BUG - this will be wrong if a different track is made active,
+                // in that case we want to discard the pending data in the
+                // mixbuffer and tell the client to present it again when the
+                // track is resumed
+                mPausedWriteLength = mCurrentWriteLength;
+                mPausedBytesRemaining = mBytesRemaining;
+                mBytesRemaining = 0;    // stop writing
+            }
+            tracksToRemove->add(track);
+        } else if (track->framesReady() && track->isReady() &&
+                !track->isPaused() && !track->isTerminated()) {
+            ALOGVV("OffloadThread: track %d s=%08x [OK]", track->name(), cblk->server);
+            if (track->mFillingUpStatus == Track::FS_FILLED) {
+                track->mFillingUpStatus = Track::FS_ACTIVE;
+                mLeftVolFloat = mRightVolFloat = 0;
+                if (track->mState == TrackBase::RESUMING) {
+                    if (mPausedBytesRemaining) {
+                        // Need to continue write that was interrupted
+                        mCurrentWriteLength = mPausedWriteLength;
+                        mBytesRemaining = mPausedBytesRemaining;
+                        mPausedBytesRemaining = 0;
+                    }
+                    track->mState = TrackBase::ACTIVE;
+                }
+            }
+
+            if (last) {
+                if (mHwPaused) {
+                    mOutput->stream->resume(mOutput->stream);
+                    mHwPaused = false;
+                    // threadLoop_mix() will handle the case that we need to
+                    // resume an interrupted write
+                }
+                // reset retry count
+                track->mRetryCount = kMaxTrackRetriesOffload;
+                mActiveTrack = t;
+                mixerStatus = MIXER_TRACKS_READY;
+            }
+        } else {
+            ALOGVV("OffloadThread: track %d s=%08x [NOT READY]", track->name(), cblk->server);
+            if (track->isStopping_1()) {
+                // Hardware buffer can hold a large amount of audio so we must
+                // wait for all current track's data to drain before we say
+                // that the track is stopped.
+                if (mBytesRemaining == 0) {
+                    // Only start draining when all data in mixbuffer
+                    // has been written
+                    ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
+                    track->mState = TrackBase::STOPPING_2; // so presentation completes after drain
+                    sleepTime = 0;
+                    standbyTime = systemTime() + standbyDelay;
+                    if (last) {
+                        mixerStatus = MIXER_DRAIN_TRACK;
+                        if (mHwPaused) {
+                            // It is possible to move from PAUSED to STOPPING_1 without
+                            // a resume so we must ensure hardware is running
+                            mOutput->stream->resume(mOutput->stream);
+                            mHwPaused = false;
+                        }
+                    }
+                }
+            } else if (track->isStopping_2()) {
+                // Drain has completed, signal presentation complete
+                if (!mDraining || !last) {
+                    track->mState = TrackBase::STOPPED;
+                    size_t audioHALFrames =
+                            (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+                    size_t framesWritten =
+                            mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
+                    track->presentationComplete(framesWritten, audioHALFrames);
+                    track->reset();
+                    tracksToRemove->add(track);
+                }
+            } else {
+                // No buffers for this track. Give it a few chances to
+                // fill a buffer, then remove it from active list.
+                if (--(track->mRetryCount) <= 0) {
+                    ALOGV("OffloadThread: BUFFER TIMEOUT: remove(%d) from active list",
+                          track->name());
+                    tracksToRemove->add(track);
+                } else if (last){
+                    mixerStatus = MIXER_TRACKS_ENABLED;
+                }
+            }
+        }
+        // compute volume for this track
+        processVolume_l(track, last);
+    }
+    // remove all the tracks that need to be...
+    removeTracks_l(*tracksToRemove);
+
+    return mixerStatus;
+}
+
+void AudioFlinger::OffloadThread::flushOutput_l()
+{
+    mFlushPending = true;
+}
+
+// must be called with thread mutex locked
+bool AudioFlinger::OffloadThread::waitingAsyncCallback_l()
+{
+    ALOGV("waitingAsyncCallback_l mWriteBlocked %d mDraining %d", mWriteBlocked, mDraining);
+    if (mUseAsyncWrite && (mWriteBlocked || mDraining)) {
+        return true;
+    }
+    return false;
+}
+
+// must be called with thread mutex locked
+bool AudioFlinger::OffloadThread::shouldStandby_l()
+{
+    bool TrackPaused = false;
+
+    // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
+    // after a timeout and we will enter standby then.
+    if (mTracks.size() > 0) {
+        TrackPaused = mTracks[mTracks.size() - 1]->isPaused();
+    }
+
+    return !mStandby && !TrackPaused;
+}
+
+
+bool AudioFlinger::OffloadThread::waitingAsyncCallback()
+{
+    Mutex::Autolock _l(mLock);
+    return waitingAsyncCallback_l();
+}
+
+void AudioFlinger::OffloadThread::flushHw_l()
+{
+    mOutput->stream->flush(mOutput->stream);
+    // Flush anything still waiting in the mixbuffer
+    mCurrentWriteLength = 0;
+    mBytesRemaining = 0;
+    mPausedWriteLength = 0;
+    mPausedBytesRemaining = 0;
+    if (mUseAsyncWrite) {
+        mWriteBlocked = false;
+        mDraining = false;
+        ALOG_ASSERT(mCallbackThread != 0);
+        mCallbackThread->setWriteBlocked(false);
+        mCallbackThread->setDraining(false);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
         AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
     :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(),
@@ -3454,6 +4011,7 @@
     }
     sleepTime = 0;
     writeFrames = mNormalFrameCount;
+    mCurrentWriteLength = mixBufferSize;
     standbyTime = systemTime() + standbyDelay;
 }
 
@@ -3477,12 +4035,12 @@
     }
 }
 
-void AudioFlinger::DuplicatingThread::threadLoop_write()
+ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
 {
     for (size_t i = 0; i < outputTracks.size(); i++) {
         outputTracks[i]->write(mMixBuffer, writeFrames);
     }
-    mBytesWritten += mixBufferSize;
+    return (ssize_t)mixBufferSize;
 }
 
 void AudioFlinger::DuplicatingThread::threadLoop_standby()
@@ -3603,7 +4161,7 @@
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
     mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
-    // mRsmpInIndex and mInputBytes set by readInputParameters()
+    // mRsmpInIndex and mBufferSize set by readInputParameters()
     mReqChannelCount(popcount(channelMask)),
     mReqSampleRate(sampleRate)
     // mBytesRead is only meaningful while active, and so is cleared in start()
@@ -3676,7 +4234,10 @@
                 continue;
             }
             if (mActiveTrack != 0) {
-                if (mActiveTrack->mState == TrackBase::PAUSING) {
+                if (mActiveTrack->isTerminated()) {
+                    removeTrack_l(mActiveTrack);
+                    mActiveTrack.clear();
+                } else if (mActiveTrack->mState == TrackBase::PAUSING) {
                     standby();
                     mActiveTrack.clear();
                     mStartStopCond.broadcast();
@@ -3695,9 +4256,6 @@
                         mStartStopCond.broadcast();
                     }
                     mStandby = false;
-                } else if (mActiveTrack->mState == TrackBase::TERMINATED) {
-                    removeTrack_l(mActiveTrack);
-                    mActiveTrack.clear();
                 }
             }
             lockEffectChains_l(effectChains);
@@ -3716,7 +4274,7 @@
 
             buffer.frameCount = mFrameCount;
             status_t status = mActiveTrack->getNextBuffer(&buffer);
-            if (CC_LIKELY(status == NO_ERROR)) {
+            if (status == NO_ERROR) {
                 readOnce = true;
                 size_t framesOut = buffer.frameCount;
                 if (mResampler == NULL) {
@@ -3756,7 +4314,7 @@
                                 mRsmpInIndex = 0;
                             }
                             mBytesRead = mInput->stream->read(mInput->stream, readInto,
-                                    mInputBytes);
+                                    mBufferSize);
                             if (mBytesRead <= 0) {
                                 if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
                                 {
@@ -4025,8 +4583,9 @@
     }
 }
 
-bool AudioFlinger::RecordThread::stop_l(RecordThread::RecordTrack* recordTrack) {
+bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
     ALOGV("RecordThread::stop");
+    AutoMutex _l(mLock);
     if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) {
         return false;
     }
@@ -4077,7 +4636,8 @@
 // destroyTrack_l() must be called with ThreadBase::mLock held
 void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track)
 {
-    track->mState = TrackBase::TERMINATED;
+    track->terminate();
+    track->mState = TrackBase::STOPPED;
     // active tracks are removed by threadLoop()
     if (mActiveTrack != track) {
         removeTrack_l(track);
@@ -4109,7 +4669,7 @@
     if (mActiveTrack != 0) {
         snprintf(buffer, SIZE, "In index: %d\n", mRsmpInIndex);
         result.append(buffer);
-        snprintf(buffer, SIZE, "In size: %d\n", mInputBytes);
+        snprintf(buffer, SIZE, "Buffer size: %u bytes\n", mBufferSize);
         result.append(buffer);
         snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL));
         result.append(buffer);
@@ -4162,7 +4722,7 @@
     int channelCount;
 
     if (framesReady == 0) {
-        mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
+        mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mBufferSize);
         if (mBytesRead <= 0) {
             if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) {
                 ALOGE("RecordThread::getNextBuffer() Error reading audio input");
@@ -4311,16 +4871,13 @@
 
 String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
 {
-    char *s;
-    String8 out_s8 = String8();
-
     Mutex::Autolock _l(mLock);
     if (initCheck() != NO_ERROR) {
-        return out_s8;
+        return String8();
     }
 
-    s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
-    out_s8 = String8(s);
+    char *s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
+    const String8 out_s8(s);
     free(s);
     return out_s8;
 }
@@ -4332,7 +4889,7 @@
     switch (event) {
     case AudioSystem::INPUT_OPENED:
     case AudioSystem::INPUT_CONFIG_CHANGED:
-        desc.channels = mChannelMask;
+        desc.channelMask = mChannelMask;
         desc.samplingRate = mSampleRate;
         desc.format = mFormat;
         desc.frameCount = mFrameCount;
@@ -4358,12 +4915,11 @@
 
     mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
     mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
-    mChannelCount = (uint16_t)popcount(mChannelMask);
+    mChannelCount = popcount(mChannelMask);
     mFormat = mInput->stream->common.get_format(&mInput->stream->common);
     mFrameSize = audio_stream_frame_size(&mInput->stream->common);
-    mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
-    mFrameCount = mInputBytes / mFrameSize;
-    mNormalFrameCount = mFrameCount; // not used by record, but used by input effects
+    mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
+    mFrameCount = mBufferSize / mFrameSize;
     mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
 
     if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7de6872..7be6043 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -28,7 +28,8 @@
         MIXER,              // Thread class is MixerThread
         DIRECT,             // Thread class is DirectOutputThread
         DUPLICATING,        // Thread class is DuplicatingThread
-        RECORD              // Thread class is RecordThread
+        RECORD,             // Thread class is RecordThread
+        OFFLOAD             // Thread class is OffloadThread
     };
 
     ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
@@ -125,10 +126,9 @@
                 audio_channel_mask_t channelMask() const { return mChannelMask; }
                 audio_format_t format() const { return mFormat; }
                 // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
-                // and returns the normal mix buffer's frame count.
-                size_t      frameCount() const { return mNormalFrameCount; }
-                // Return's the HAL's frame count i.e. fast mixer buffer size.
-                size_t      frameCountHAL() const { return mFrameCount; }
+                // and returns the [normal mix] buffer's frame count.
+    virtual     size_t      frameCount() const = 0;
+                size_t      frameSize() const { return mFrameSize; }
 
     // Should be "virtual status_t requestExitAndWait()" and override same
     // method in Thread, but Thread::requestExitAndWait() is not yet virtual.
@@ -184,6 +184,8 @@
                 void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains);
                 // unlock effect chains after process
                 void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains);
+                // get a copy of mEffectChains vector
+                Vector< sp<EffectChain> > getEffectChains_l() const { return mEffectChains; };
                 // set audio mode to all effect chains
                 void setMode(audio_mode_t mode);
                 // get effect module with corresponding ID on specified audio session
@@ -259,11 +261,13 @@
                 Condition               mWaitWorkCV;
 
                 const sp<AudioFlinger>  mAudioFlinger;
+
+                // updated by PlaybackThread::readOutputParameters() or
+                // RecordThread::readInputParameters()
                 uint32_t                mSampleRate;
                 size_t                  mFrameCount;       // output HAL, direct output, record
-                size_t                  mNormalFrameCount; // normal mixer and effects
                 audio_channel_mask_t    mChannelMask;
-                uint16_t                mChannelCount;
+                uint32_t                mChannelCount;
                 size_t                  mFrameSize;
                 audio_format_t          mFormat;
 
@@ -290,6 +294,7 @@
                 Vector<String8>         mNewParameters;
                 status_t                mParamStatus;
 
+                // vector owns each ConfigEvent *, so must delete after removing
                 Vector<ConfigEvent *>     mConfigEvents;
 
                 // These fields are written and read by thread itself without lock or barrier,
@@ -328,11 +333,19 @@
     enum mixer_state {
         MIXER_IDLE,             // no active tracks
         MIXER_TRACKS_ENABLED,   // at least one active track, but no track has any data ready
-        MIXER_TRACKS_READY      // at least one active track, and at least one track has data
+        MIXER_TRACKS_READY,      // at least one active track, and at least one track has data
+        MIXER_DRAIN_TRACK,      // drain currently playing track
+        MIXER_DRAIN_ALL,        // fully drain the hardware
         // standby mode does not have an enum value
         // suspend by audio policy manager is orthogonal to mixer state
     };
 
+    // retry count before removing active track in case of underrun on offloaded thread:
+    // we need to make sure that AudioTrack client has enough time to send large buffers
+//FIXME may be more appropriate if expressed in time units. Need to revise how underrun is handled
+    // for offloaded tracks
+    static const int8_t kMaxTrackRetriesOffload = 20;
+
     PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
                    audio_io_handle_t id, audio_devices_t device, type_t type);
     virtual             ~PlaybackThread();
@@ -350,8 +363,10 @@
     // Code snippets that were lifted up out of threadLoop()
     virtual     void        threadLoop_mix() = 0;
     virtual     void        threadLoop_sleepTime() = 0;
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
+    virtual     void        threadLoop_drain();
     virtual     void        threadLoop_standby();
+    virtual     void        threadLoop_exit();
     virtual     void        threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
 
                 // prepareTracks_l reads and writes mActiveTracks, and returns
@@ -359,6 +374,19 @@
                 // is responsible for clearing or destroying this Vector later on, when it
                 // is safe to do so. That will drop the final ref count and destroy the tracks.
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
+                void        removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
+
+                void        writeCallback();
+                void        setWriteBlocked(bool value);
+                void        drainCallback();
+                void        setDraining(bool value);
+
+    static      int         asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+
+    virtual     bool        waitingAsyncCallback();
+    virtual     bool        waitingAsyncCallback_l();
+    virtual     bool        shouldStandby_l();
+
 
     // ThreadBase virtuals
     virtual     void        preExit();
@@ -429,11 +457,21 @@
 
                 virtual status_t setSyncEvent(const sp<SyncEvent>& event);
                 virtual bool     isValidSyncEvent(const sp<SyncEvent>& event) const;
+
+                // called with AudioFlinger lock held
                         void     invalidateTracks(audio_stream_type_t streamType);
 
+    virtual     size_t      frameCount() const { return mNormalFrameCount; }
+
+                // Return's the HAL's frame count i.e. fast mixer buffer size.
+                size_t      frameCountHAL() const { return mFrameCount; }
 
 protected:
-    int16_t*                        mMixBuffer;
+    // updated by readOutputParameters()
+    size_t                          mNormalFrameCount;  // normal mixer and effects
+
+    int16_t*                        mMixBuffer;         // frame size aligned mix buffer
+    int8_t*                         mAllocMixBuffer;    // mixer buffer allocation address
 
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
@@ -486,8 +524,9 @@
     PlaybackThread& operator = (const PlaybackThread&);
 
     status_t    addTrack_l(const sp<Track>& track);
-    void        destroyTrack_l(const sp<Track>& track);
+    bool        destroyTrack_l(const sp<Track>& track);
     void        removeTrack_l(const sp<Track>& track);
+    void        signal_l();
 
     void        readOutputParameters();
 
@@ -535,6 +574,14 @@
     // DUPLICATING only
     uint32_t                        writeFrames;
 
+    size_t                          mBytesRemaining;
+    size_t                          mCurrentWriteLength;
+    bool                            mUseAsyncWrite;
+    bool                            mWriteBlocked;
+    bool                            mDraining;
+    bool                            mSignalPending;
+    sp<AsyncCallbackThread>         mCallbackThread;
+
 private:
     // The HAL output sink is treated as non-blocking, but current implementation is blocking
     sp<NBAIO_Sink>          mOutputSink;
@@ -558,7 +605,7 @@
 protected:
                 // accessed by both binder threads and within threadLoop(), lock on mutex needed
                 unsigned    mFastTrackAvailMask;    // bit i set if fast track [i] is available
-
+    virtual     void        flushOutput_l();
 };
 
 class MixerThread : public PlaybackThread {
@@ -584,7 +631,7 @@
     virtual     void        cacheParameters_l();
 
     // threadLoop snippets
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
@@ -641,17 +688,73 @@
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
 
-private:
     // volumes last sent to audio HAL with stream->set_volume()
     float mLeftVolFloat;
     float mRightVolFloat;
 
+    DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                        audio_io_handle_t id, uint32_t device, ThreadBase::type_t type);
+    void processVolume_l(Track *track, bool lastTrack);
+
     // prepareTracks_l() tells threadLoop_mix() the name of the single active track
     sp<Track>               mActiveTrack;
 public:
     virtual     bool        hasFastMixer() const { return false; }
 };
 
+class OffloadThread : public DirectOutputThread {
+public:
+
+    OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                        audio_io_handle_t id, uint32_t device);
+    virtual                 ~OffloadThread();
+
+protected:
+    // threadLoop snippets
+    virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
+    virtual     void        threadLoop_exit();
+    virtual     void        flushOutput_l();
+
+    virtual     bool        waitingAsyncCallback();
+    virtual     bool        waitingAsyncCallback_l();
+    virtual     bool        shouldStandby_l();
+
+private:
+                void        flushHw_l();
+
+private:
+    bool        mHwPaused;
+    bool        mFlushPending;
+    size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
+    size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
+    sp<Track>   mPreviousTrack;         // used to detect track switch
+};
+
+class AsyncCallbackThread : public Thread {
+public:
+
+    AsyncCallbackThread(const sp<OffloadThread>& offloadThread);
+
+    virtual             ~AsyncCallbackThread();
+
+    // Thread virtuals
+    virtual bool        threadLoop();
+
+    // RefBase
+    virtual void        onFirstRef();
+
+            void        exit();
+            void        setWriteBlocked(bool value);
+            void        setDraining(bool value);
+
+private:
+    wp<OffloadThread>   mOffloadThread;
+    bool                mWriteBlocked;
+    bool                mDraining;
+    Condition           mWaitWorkCV;
+    Mutex               mLock;
+};
+
 class DuplicatingThread : public MixerThread {
 public:
     DuplicatingThread(const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
@@ -671,7 +774,7 @@
     // threadLoop snippets
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
     virtual     void        cacheParameters_l();
 
@@ -744,7 +847,7 @@
 
             // ask the thread to stop the specified track, and
             // return true if the caller should then do it's part of the stopping process
-            bool        stop_l(RecordTrack* recordTrack);
+            bool        stop(RecordTrack* recordTrack);
 
             void        dump(int fd, const Vector<String16>& args);
             AudioStreamIn* clearInput();
@@ -775,6 +878,8 @@
     static void syncStartEventCallback(const wp<SyncEvent>& event);
            void handleSyncStartEvent(const sp<SyncEvent>& event);
 
+    virtual size_t      frameCount() const { return mFrameCount; }
+
 private:
             void clearSyncStartEvent();
 
@@ -790,11 +895,13 @@
             // is used together with mStartStopCond to indicate start()/stop() progress
             sp<RecordTrack>                     mActiveTrack;
             Condition                           mStartStopCond;
+
+            // updated by RecordThread::readInputParameters()
             AudioResampler                      *mResampler;
             int32_t                             *mRsmpOutBuffer;
             int16_t                             *mRsmpInBuffer;
             size_t                              mRsmpInIndex;
-            size_t                              mInputBytes;
+            size_t                              mBufferSize;    // stream buffer size for read()
             const uint32_t                      mReqChannelCount;
             const uint32_t                      mReqSampleRate;
             ssize_t                             mBytesRead;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 55d96fa..523e4b2 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -25,10 +25,10 @@
 public:
     enum track_state {
         IDLE,
-        TERMINATED,
         FLUSHED,
         STOPPED,
-        // next 2 states are currently used for fast tracks only
+        // next 2 states are currently used for fast tracks
+        // and offloaded tracks only
         STOPPING_1,     // waiting for first underrun
         STOPPING_2,     // waiting for presentation complete
         RESUMING,
@@ -89,7 +89,7 @@
         return (mState == STOPPED || mState == FLUSHED);
     }
 
-    // for fast tracks only
+    // for fast tracks and offloaded tracks only
     bool isStopping() const {
         return mState == STOPPING_1 || mState == STOPPING_2;
     }
@@ -101,11 +101,12 @@
     }
 
     bool isTerminated() const {
-        return mState == TERMINATED;
+        return mTerminated;
     }
 
-    bool step();    // mStepCount is an implicit input
-    void reset();
+    void terminate() {
+        mTerminated = true;
+    }
 
     bool isOut() const { return mIsOut; }
                                     // true for Track and TimedTrack, false for RecordTrack,
@@ -117,24 +118,19 @@
     audio_track_cblk_t* mCblk;
     void*               mBuffer;    // start of track buffer, typically in shared memory
                                     // except for OutputTrack when it is in local memory
-    void*               mBufferEnd; // &mBuffer[mFrameCount * frameSize], where frameSize
-                                    //   is based on mChannelCount and 16-bit samples
-    uint32_t            mStepCount; // saves AudioBufferProvider::Buffer::frameCount as of
-                                    // time of releaseBuffer() for later use by step()
     // we don't really need a lock for these
     track_state         mState;
     const uint32_t      mSampleRate;    // initial sample rate only; for tracks which
                         // support dynamic rates, the current value is in control block
     const audio_format_t mFormat;
     const audio_channel_mask_t mChannelMask;
-    const uint8_t       mChannelCount;
+    const uint32_t      mChannelCount;
     const size_t        mFrameSize; // AudioFlinger's view of frame size in shared memory,
                                     // where for AudioTrack (but not AudioRecord),
                                     // 8-bit PCM samples are stored as 16-bit
     const size_t        mFrameCount;// size of track buffer given at createTrack() or
                                     // openRecord(), and then adjusted as needed
 
-    bool                mStepServerFailed;
     const int           mSessionId;
     Vector < sp<SyncEvent> >mSyncEvents;
     const bool          mIsOut;
@@ -142,4 +138,5 @@
     const int           mId;
     sp<NBAIO_Sink>      mTeeSink;
     sp<NBAIO_Source>    mTeeSource;
+    bool                mTerminated;
 };
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 6aca95f..3e184b4 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -19,8 +19,8 @@
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <math.h>
-#include <cutils/compiler.h>
 #include <utils/Log.h>
 
 #include <private/media/AudioTrackShared.h>
@@ -74,8 +74,6 @@
         mClient(client),
         mCblk(NULL),
         // mBuffer
-        // mBufferEnd
-        mStepCount(0),
         mState(IDLE),
         mSampleRate(sampleRate),
         mFormat(format),
@@ -84,11 +82,11 @@
         mFrameSize(audio_is_linear_pcm(format) ?
                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
         mFrameCount(frameCount),
-        mStepServerFailed(false),
         mSessionId(sessionId),
         mIsOut(isOut),
         mServerProxy(NULL),
-        mId(android_atomic_inc(&nextTrackId))
+        mId(android_atomic_inc(&nextTrackId)),
+        mTerminated(false)
 {
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -133,7 +131,6 @@
             mCblk->flags = CBLK_FORCEREADY;     // FIXME hack, need to fix the track ready logic
 #endif
         }
-        mBufferEnd = (uint8_t *)mBuffer + bufferSize;
 
 #ifdef TEE_SINK
         if (mTeeSinkTrackEnabled) {
@@ -201,11 +198,6 @@
     mServerProxy->releaseBuffer(&buf);
 }
 
-void AudioFlinger::ThreadBase::TrackBase::reset() {
-    ALOGV("TrackBase::reset");
-    // FIXME still needed?
-}
-
 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
 {
     mSyncEvents.add(event);
@@ -287,6 +279,10 @@
         xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
 }
 
+status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
+    return mTrack->setParameters(keyValuePairs);
+}
+
 status_t AudioFlinger::TrackHandle::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
@@ -323,7 +319,8 @@
     mUnderrunCount(0),
     mCachedVolume(1.0),
     mIsInvalid(false),
-    mAudioTrackServerProxy(NULL)
+    mAudioTrackServerProxy(NULL),
+    mResumeToStopping(false)
 {
     if (mCblk != NULL) {
         if (sharedBuffer == 0) {
@@ -381,28 +378,20 @@
     { // scope for mLock
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            if (!isOutputTrack()) {
-                if (mState == ACTIVE || mState == RESUMING) {
-                    AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-
-#ifdef ADD_BATTERY_DATA
-                    // to track the speaker usage
-                    addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-                }
-                AudioSystem::releaseOutput(thread->id());
-            }
             Mutex::Autolock _l(thread->mLock);
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-            playbackThread->destroyTrack_l(this);
+            bool wasActive = playbackThread->destroyTrack_l(this);
+            if (!isOutputTrack() && !wasActive) {
+                AudioSystem::releaseOutput(thread->id());
+            }
         }
     }
 }
 
 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
 {
-    result.append("   Name Client Type Fmt Chn mask   Session StpCnt fCount S F SRate  "
-                  "L dB  R dB    Server    Main buf    Aux Buf  Flags Underruns\n");
+    result.append("   Name Client Type Fmt Chn mask Session fCount S F SRate  "
+                  "L dB  R dB    Server Main buf  Aux Buf Flags Underruns\n");
 }
 
 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
@@ -415,40 +404,41 @@
     }
     track_state state = mState;
     char stateChar;
-    switch (state) {
-    case IDLE:
-        stateChar = 'I';
-        break;
-    case TERMINATED:
+    if (isTerminated()) {
         stateChar = 'T';
-        break;
-    case STOPPING_1:
-        stateChar = 's';
-        break;
-    case STOPPING_2:
-        stateChar = '5';
-        break;
-    case STOPPED:
-        stateChar = 'S';
-        break;
-    case RESUMING:
-        stateChar = 'R';
-        break;
-    case ACTIVE:
-        stateChar = 'A';
-        break;
-    case PAUSING:
-        stateChar = 'p';
-        break;
-    case PAUSED:
-        stateChar = 'P';
-        break;
-    case FLUSHED:
-        stateChar = 'F';
-        break;
-    default:
-        stateChar = '?';
-        break;
+    } else {
+        switch (state) {
+        case IDLE:
+            stateChar = 'I';
+            break;
+        case STOPPING_1:
+            stateChar = 's';
+            break;
+        case STOPPING_2:
+            stateChar = '5';
+            break;
+        case STOPPED:
+            stateChar = 'S';
+            break;
+        case RESUMING:
+            stateChar = 'R';
+            break;
+        case ACTIVE:
+            stateChar = 'A';
+            break;
+        case PAUSING:
+            stateChar = 'p';
+            break;
+        case PAUSED:
+            stateChar = 'P';
+            break;
+        case FLUSHED:
+            stateChar = 'F';
+            break;
+        default:
+            stateChar = '?';
+            break;
+        }
     }
     char nowInUnderrun;
     switch (mObservedUnderruns.mBitFields.mMostRecent) {
@@ -465,14 +455,13 @@
         nowInUnderrun = '?';
         break;
     }
-    snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g  "
-            "0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
+    snprintf(&buffer[7], size-7, " %6u %4u %3u %08X %7u %6u %1c %1d %5u %5.2g %5.2g  "
+                                 "%08X %08X %08X 0x%03X %9u%c\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mStreamType,
             mFormat,
             mChannelMask,
             mSessionId,
-            mStepCount,
             mFrameCount,
             stateChar,
             mFillingUpStatus,
@@ -550,32 +539,33 @@
         track_state state = mState;
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
+
         if (state == PAUSED) {
-            mState = TrackBase::RESUMING;
-            ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
+            if (mResumeToStopping) {
+                // happened we need to resume to STOPPING_1
+                mState = TrackBase::STOPPING_1;
+                ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
+            } else {
+                mState = TrackBase::RESUMING;
+                ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
+            }
         } else {
             mState = TrackBase::ACTIVE;
             ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
         }
 
-        if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
-            thread->mLock.unlock();
-            status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
-            thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
-            // to track the speaker usage
-            if (status == NO_ERROR) {
-                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
-            }
-#endif
-        }
-        if (status == NO_ERROR) {
-            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-            playbackThread->addTrack_l(this);
-        } else {
-            mState = state;
+        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        status = playbackThread->addTrack_l(this);
+        if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+            //  restore previous state if start was rejected by policy manager
+            if (status == PERMISSION_DENIED) {
+                mState = state;
+            }
+        }
+        // track was already in the active list, not a problem
+        if (status == ALREADY_EXISTS) {
+            status = NO_ERROR;
         }
     } else {
         status = BAD_VALUE;
@@ -596,26 +586,18 @@
             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
                 reset();
                 mState = STOPPED;
-            } else if (!isFastTrack()) {
+            } else if (!isFastTrack() && !isOffloaded()) {
                 mState = STOPPED;
             } else {
-                // prepareTracks_l() will set state to STOPPING_2 after next underrun,
-                // and then to STOPPED and reset() when presentation is complete
+                // For fast tracks prepareTracks_l() will set state to STOPPING_2
+                // presentation is complete
+                // For an offloaded track this starts a drain and state will
+                // move to STOPPING_2 when drain completes and then STOPPED
                 mState = STOPPING_1;
             }
             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
                     playbackThread);
         }
-        if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
-            thread->mLock.unlock();
-            AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-            thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
-            // to track the speaker usage
-            addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-        }
     }
 }
 
@@ -625,19 +607,27 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        if (mState == ACTIVE || mState == RESUMING) {
+        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        switch (mState) {
+        case STOPPING_1:
+        case STOPPING_2:
+            if (!isOffloaded()) {
+                /* nothing to do if track is not offloaded */
+                break;
+            }
+
+            // Offloaded track was draining, we need to carry on draining when resumed
+            mResumeToStopping = true;
+            // fall through...
+        case ACTIVE:
+        case RESUMING:
             mState = PAUSING;
             ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
-            if (!isOutputTrack()) {
-                thread->mLock.unlock();
-                AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-                thread->mLock.lock();
+            playbackThread->signal_l();
+            break;
 
-#ifdef ADD_BATTERY_DATA
-                // to track the speaker usage
-                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-            }
+        default:
+            break;
         }
     }
 }
@@ -648,21 +638,52 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
-                mState != PAUSING && mState != IDLE && mState != FLUSHED) {
-            return;
-        }
-        // No point remaining in PAUSED state after a flush => go to
-        // FLUSHED state
-        mState = FLUSHED;
-        // do not reset the track if it is still in the process of being stopped or paused.
-        // this will be done by prepareTracks_l() when the track is stopped.
-        // prepareTracks_l() will see mState == FLUSHED, then
-        // remove from active track list, reset(), and trigger presentation complete
         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-        if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+
+        if (isOffloaded()) {
+            // If offloaded we allow flush during any state except terminated
+            // and keep the track active to avoid problems if user is seeking
+            // rapidly and underlying hardware has a significant delay handling
+            // a pause
+            if (isTerminated()) {
+                return;
+            }
+
+            ALOGV("flush: offload flush");
             reset();
+
+            if (mState == STOPPING_1 || mState == STOPPING_2) {
+                ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
+                mState = ACTIVE;
+            }
+
+            if (mState == ACTIVE) {
+                ALOGV("flush called in active state, resetting buffer time out retry count");
+                mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
+            }
+
+            mResumeToStopping = false;
+        } else {
+            if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
+                    mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
+                return;
+            }
+            // No point remaining in PAUSED state after a flush => go to
+            // FLUSHED state
+            mState = FLUSHED;
+            // do not reset the track if it is still in the process of being stopped or paused.
+            // this will be done by prepareTracks_l() when the track is stopped.
+            // prepareTracks_l() will see mState == FLUSHED, then
+            // remove from active track list, reset(), and trigger presentation complete
+            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+                reset();
+            }
         }
+        // Prevent flush being lost if the track is flushed and then resumed
+        // before mixer thread can run. This is important when offloading
+        // because the hardware buffer could hold a large amount of audio
+        playbackThread->flushOutput_l();
+        playbackThread->signal_l();
     }
 }
 
@@ -671,7 +692,6 @@
     // Do not reset twice to avoid discarding data written just after a flush and before
     // the audioflinger thread detects the track is stopped.
     if (!mResetDone) {
-        TrackBase::reset();
         // Force underrun condition to avoid false underrun callback until first data is
         // written to buffer
         android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
@@ -683,6 +703,20 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
+{
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread == 0) {
+        ALOGE("thread is dead");
+        return FAILED_TRANSACTION;
+    } else if ((thread->type() == ThreadBase::DIRECT) ||
+                    (thread->type() == ThreadBase::OFFLOAD)) {
+        return thread->setParameters(keyValuePairs);
+    } else {
+        return PERMISSION_DENIED;
+    }
+}
+
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
 {
     status_t status = DEAD_OBJECT;
@@ -744,15 +778,23 @@
     // a track is considered presented when the total number of frames written to audio HAL
     // corresponds to the number of frames written when presentationComplete() is called for the
     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
+    // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
+    // to detect when all frames have been played. In this case framesWritten isn't
+    // useful because it doesn't always reflect whether there is data in the h/w
+    // buffers, particularly if a track has been paused and resumed during draining
+    ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
+                      mPresentationCompleteFrames, framesWritten);
     if (mPresentationCompleteFrames == 0) {
         mPresentationCompleteFrames = framesWritten + audioHalFrames;
         ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
                   mPresentationCompleteFrames, audioHalFrames);
     }
-    if (framesWritten >= mPresentationCompleteFrames) {
+
+    if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
         ALOGV("presentationComplete() session %d complete: framesWritten %d",
                   mSessionId, framesWritten);
         triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+        mAudioTrackServerProxy->setStreamEndDone();
         return true;
     }
     return false;
@@ -798,7 +840,7 @@
 
 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
 {
-    if (mState == TERMINATED || mState == PAUSED ||
+    if (isTerminated() || mState == PAUSED ||
             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
                                       (mState == STOPPED)))) {
         ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
@@ -1356,9 +1398,9 @@
         mOutBuffer.frameCount = 0;
         playbackThread->mTracks.add(this);
         ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
-                "mCblk->frameCount_ %u, mChannelMask 0x%08x mBufferEnd %p",
+                "mCblk->frameCount_ %u, mChannelMask 0x%08x",
                 mCblk, mBuffer,
-                mCblk->frameCount_, mChannelMask, mBufferEnd);
+                mCblk->frameCount_, mChannelMask);
         // since client and server are in the same process,
         // the buffer has the same virtual address on both sides
         mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
@@ -1613,7 +1655,7 @@
                   channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, false /*isOut*/),
         mOverflow(false)
 {
-    ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+    ALOGV("RecordTrack constructor");
     if (mCblk != NULL) {
         mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
                 mFrameSize);
@@ -1659,16 +1701,7 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        recordThread->mLock.lock();
-        bool doStop = recordThread->stop_l(this);
-        if (doStop) {
-            TrackBase::reset();
-            // Force overrun condition to avoid false overrun callback until first data is
-            // read from buffer
-            android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
-        }
-        recordThread->mLock.unlock();
-        if (doStop) {
+        if (recordThread->stop(this)) {
             AudioSystem::stopInput(recordThread->id());
         }
     }
@@ -1695,17 +1728,16 @@
 
 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.append("   Clien Fmt Chn mask   Session Step S Serv   FrameCount\n");
+    result.append("Client Fmt Chn mask Session S   Server fCount\n");
 }
 
 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
 {
-    snprintf(buffer, size, "   %05d %03u 0x%08x %05d   %04u %01d %08x %05d\n",
+    snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6u\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mFormat,
             mChannelMask,
             mSessionId,
-            mStepCount,
             mState,
             mCblk->server,
             mFrameCount);
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index a94c658..203d7c0 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -595,6 +595,22 @@
 
 void Camera2Client::setPreviewCallbackFlagL(Parameters &params, int flag) {
     status_t res = OK;
+
+    switch(params.state) {
+        case Parameters::STOPPED:
+        case Parameters::WAITING_FOR_PREVIEW_WINDOW:
+        case Parameters::PREVIEW:
+        case Parameters::STILL_CAPTURE:
+            // OK
+            break;
+        default:
+            if (flag & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+                ALOGE("%s: Camera %d: Can't use preview callbacks "
+                        "in state %d", __FUNCTION__, mCameraId, params.state);
+                return;
+            }
+    }
+
     if (flag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK) {
         ALOGV("%s: setting oneshot", __FUNCTION__);
         params.previewCallbackOneShot = true;
@@ -615,24 +631,15 @@
 
         params.previewCallbackFlags = flag;
 
-        switch(params.state) {
-            case Parameters::PREVIEW:
-                res = startPreviewL(params, true);
-                break;
-            case Parameters::RECORD:
-            case Parameters::VIDEO_SNAPSHOT:
-                res = startRecordingL(params, true);
-                break;
-            default:
-                break;
-        }
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to refresh request in state %s",
-                    __FUNCTION__, mCameraId,
-                    Parameters::getStateName(params.state));
+        if (params.state == Parameters::PREVIEW) {
+            res = startPreviewL(params, true);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+                        __FUNCTION__, mCameraId,
+                        Parameters::getStateName(params.state));
+            }
         }
     }
-
 }
 
 status_t Camera2Client::setPreviewCallbackTarget(
@@ -755,6 +762,26 @@
             params.previewCallbackSurface;
 
     if (callbacksEnabled) {
+        // Can't have recording stream hanging around when enabling callbacks,
+        // since it exceeds the max stream count on some devices.
+        if (mStreamingProcessor->getRecordingStreamId() != NO_STREAM) {
+            ALOGV("%s: Camera %d: Clearing out recording stream before "
+                    "creating callback stream", __FUNCTION__, mCameraId);
+            res = mStreamingProcessor->stopStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Can't stop streaming to delete "
+                        "recording stream", __FUNCTION__, mCameraId);
+                return res;
+            }
+            res = mStreamingProcessor->deleteRecordingStream();
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to delete recording stream before "
+                        "enabling callbacks: %s (%d)", __FUNCTION__, mCameraId,
+                        strerror(-res), res);
+                return res;
+            }
+        }
+
         res = mCallbackProcessor->updateStream(params);
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to update callback stream: %s (%d)",
@@ -951,6 +978,29 @@
         }
     }
 
+    // Not all devices can support a preview callback stream and a recording
+    // stream at the same time, so assume none of them can.
+    if (mCallbackProcessor->getStreamId() != NO_STREAM) {
+        ALOGV("%s: Camera %d: Clearing out callback stream before "
+                "creating recording stream", __FUNCTION__, mCameraId);
+        res = mStreamingProcessor->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mCallbackProcessor->deleteStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete callback stream before "
+                    "record: %s (%d)", __FUNCTION__, mCameraId,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+    // Disable callbacks if they're enabled; can't record and use callbacks,
+    // and we can't fail record start without stagefright asserting.
+    params.previewCallbackFlags = 0;
+
     res = updateProcessorStream<
             StreamingProcessor,
             &StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
@@ -962,19 +1012,6 @@
     }
 
     Vector<uint8_t> outputStreams;
-    bool callbacksEnabled = (params.previewCallbackFlags &
-            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
-            params.previewCallbackSurface;
-
-    if (callbacksEnabled) {
-        res = mCallbackProcessor->updateStream(params);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to update callback stream: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-            return res;
-        }
-        outputStreams.push(getCallbackStreamId());
-    }
     outputStreams.push(getPreviewStreamId());
     outputStreams.push(getRecordingStreamId());
 
@@ -1706,6 +1743,8 @@
      * queue) and then try again. Resume streaming once we're done.
      */
     if (res == -EBUSY) {
+        ALOGV("%s: Camera %d: Pausing to update stream", __FUNCTION__,
+                mCameraId);
         res = mStreamingProcessor->togglePauseStream(/*pause*/true);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp
index 77df152..710d0e9 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/Camera2Device.cpp
@@ -445,6 +445,10 @@
     return res;
 }
 
+bool Camera2Device::willNotify3A() {
+    return true;
+}
+
 void Camera2Device::notificationCallback(int32_t msg_type,
         int32_t ext1,
         int32_t ext2,
diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/Camera2Device.h
index 3034a1d..372ce9f 100644
--- a/services/camera/libcameraservice/Camera2Device.h
+++ b/services/camera/libcameraservice/Camera2Device.h
@@ -59,6 +59,7 @@
     virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
     virtual status_t waitUntilDrained();
     virtual status_t setNotifyCallback(NotificationListener *listener);
+    virtual bool     willNotify3A();
     virtual status_t waitForNextFrame(nsecs_t timeout);
     virtual status_t getNextFrame(CameraMetadata *frame);
     virtual status_t triggerAutofocus(uint32_t id);
diff --git a/services/camera/libcameraservice/Camera3Device.cpp b/services/camera/libcameraservice/Camera3Device.cpp
index 73bf30c..9d0f392 100644
--- a/services/camera/libcameraservice/Camera3Device.cpp
+++ b/services/camera/libcameraservice/Camera3Device.cpp
@@ -844,6 +844,10 @@
     return OK;
 }
 
+bool Camera3Device::willNotify3A() {
+    return false;
+}
+
 status_t Camera3Device::waitForNextFrame(nsecs_t timeout) {
     ATRACE_CALL();
     status_t res;
@@ -1244,13 +1248,6 @@
 
     }
 
-    AlgState cur3aState;
-    AlgState new3aState;
-    int32_t aeTriggerId = 0;
-    int32_t afTriggerId = 0;
-
-    NotificationListener *listener = NULL;
-
     // Process the result metadata, if provided
     if (result->result != NULL) {
         Mutex::Autolock l(mOutputLock);
@@ -1288,59 +1285,6 @@
                     " metadata for frame %d (%lld vs %lld respectively)",
                     frameNumber, timestamp, entry.data.i64[0]);
         }
-
-        // Get 3A states from result metadata
-
-        entry = captureResult.find(ANDROID_CONTROL_AE_STATE);
-        if (entry.count == 0) {
-            CLOGE("No AE state provided by HAL for frame %d!",
-                    frameNumber);
-        } else {
-            new3aState.aeState =
-                    static_cast<camera_metadata_enum_android_control_ae_state>(
-                        entry.data.u8[0]);
-        }
-
-        entry = captureResult.find(ANDROID_CONTROL_AF_STATE);
-        if (entry.count == 0) {
-            CLOGE("No AF state provided by HAL for frame %d!",
-                    frameNumber);
-        } else {
-            new3aState.afState =
-                    static_cast<camera_metadata_enum_android_control_af_state>(
-                        entry.data.u8[0]);
-        }
-
-        entry = captureResult.find(ANDROID_CONTROL_AWB_STATE);
-        if (entry.count == 0) {
-            CLOGE("No AWB state provided by HAL for frame %d!",
-                    frameNumber);
-        } else {
-            new3aState.awbState =
-                    static_cast<camera_metadata_enum_android_control_awb_state>(
-                        entry.data.u8[0]);
-        }
-
-        entry = captureResult.find(ANDROID_CONTROL_AF_TRIGGER_ID);
-        if (entry.count == 0) {
-            CLOGE("No AF trigger ID provided by HAL for frame %d!",
-                    frameNumber);
-        } else {
-            afTriggerId = entry.data.i32[0];
-        }
-
-        entry = captureResult.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
-        if (entry.count == 0) {
-            CLOGE("No AE precapture trigger ID provided by HAL"
-                    " for frame %d!", frameNumber);
-        } else {
-            aeTriggerId = entry.data.i32[0];
-        }
-
-        listener = mListener;
-        cur3aState = m3AState;
-
-        m3AState = new3aState;
     } // scope for mOutputLock
 
     // Return completed buffers to their streams with the timestamp
@@ -1357,30 +1301,16 @@
         }
     }
 
-    // Finally, dispatch any 3A change events to listeners if we got metadata
+    // Finally, signal any waiters for new frames
 
     if (result->result != NULL) {
         mResultSignal.signal();
     }
 
-    if (result->result != NULL && listener != NULL) {
-        if (new3aState.aeState != cur3aState.aeState) {
-            ALOGVV("%s: AE state changed from 0x%x to 0x%x",
-                    __FUNCTION__, cur3aState.aeState, new3aState.aeState);
-            listener->notifyAutoExposure(new3aState.aeState, aeTriggerId);
-        }
-        if (new3aState.afState != cur3aState.afState) {
-            ALOGVV("%s: AF state changed from 0x%x to 0x%x",
-                    __FUNCTION__, cur3aState.afState, new3aState.afState);
-            listener->notifyAutoFocus(new3aState.afState, afTriggerId);
-        }
-        if (new3aState.awbState != cur3aState.awbState) {
-            listener->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId);
-        }
-    }
-
 }
 
+
+
 void Camera3Device::notify(const camera3_notify_msg *msg) {
     ATRACE_CALL();
     NotificationListener *listener;
diff --git a/services/camera/libcameraservice/Camera3Device.h b/services/camera/libcameraservice/Camera3Device.h
index faa42b9..2328f89 100644
--- a/services/camera/libcameraservice/Camera3Device.h
+++ b/services/camera/libcameraservice/Camera3Device.h
@@ -107,6 +107,7 @@
     virtual status_t waitUntilDrained();
 
     virtual status_t setNotifyCallback(NotificationListener *listener);
+    virtual bool     willNotify3A();
     virtual status_t waitForNextFrame(nsecs_t timeout);
     virtual status_t getNextFrame(CameraMetadata *frame);
 
@@ -389,18 +390,6 @@
     Condition              mResultSignal;
     NotificationListener  *mListener;
 
-    struct AlgState {
-        camera_metadata_enum_android_control_ae_state  aeState;
-        camera_metadata_enum_android_control_af_state  afState;
-        camera_metadata_enum_android_control_awb_state awbState;
-
-        AlgState() :
-                aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
-                afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
-                awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) {
-        }
-    } m3AState;
-
     /**** End scope for mOutputLock ****/
 
     /**
diff --git a/services/camera/libcameraservice/CameraDeviceBase.h b/services/camera/libcameraservice/CameraDeviceBase.h
index 8c457d9..aa92bec 100644
--- a/services/camera/libcameraservice/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/CameraDeviceBase.h
@@ -156,6 +156,13 @@
     virtual status_t setNotifyCallback(NotificationListener *listener) = 0;
 
     /**
+     * Whether the device supports calling notifyAutofocus, notifyAutoExposure,
+     * and notifyAutoWhitebalance; if this returns false, the client must
+     * synthesize these notifications from received frame metadata.
+     */
+    virtual bool     willNotify3A() = 0;
+
+    /**
      * Wait for a new frame to be produced, with timeout in nanoseconds.
      * Returns TIMED_OUT when no frame produced within the specified duration
      */
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c284a0d..0eb3e32 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -489,6 +489,7 @@
             break;
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
+          case CAMERA_DEVICE_API_VERSION_3_0:
             client = new ProCamera2Client(this, cameraCb, String16(),
                     cameraId, facing, callingPid, USE_CALLING_UID, getpid());
             break;
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index bc81409..d7bafda 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -109,7 +109,9 @@
 
     if (!mCallbackToApp && mCallbackConsumer == 0) {
         // Create CPU buffer queue endpoint, since app hasn't given us one
-        mCallbackConsumer = new CpuConsumer(kCallbackHeapCount);
+        // Make it async to avoid disconnect deadlocks
+        sp<BufferQueue> bq = new BufferQueue();
+        mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount);
         mCallbackConsumer->setFrameAvailableListener(this);
         mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
         mCallbackWindow = new Surface(
@@ -167,7 +169,7 @@
 status_t CallbackProcessor::deleteStream() {
     ATRACE_CALL();
     sp<CameraDeviceBase> device;
-
+    status_t res;
     {
         Mutex::Autolock l(mInputMutex);
 
@@ -180,7 +182,19 @@
             return INVALID_OPERATION;
         }
     }
-    device->deleteStream(mCallbackStreamId);
+    res = device->waitUntilDrained();
+    if (res != OK) {
+        ALOGE("%s: Error waiting for HAL to drain: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    res = device->deleteStream(mCallbackStreamId);
+    if (res != OK) {
+        ALOGE("%s: Unable to delete callback stream: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
 
     {
         Mutex::Autolock l(mInputMutex);
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
index d13d398..114a7a8 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
@@ -33,6 +33,9 @@
     ProFrameProcessor(device),
     mClient(client),
     mLastFrameNumberOfFaces(0) {
+
+    sp<CameraDeviceBase> d = device.promote();
+    mSynthesize3ANotify = !(d->willNotify3A());
 }
 
 FrameProcessor::~FrameProcessor() {
@@ -50,6 +53,11 @@
         return false;
     }
 
+    if (mSynthesize3ANotify) {
+        // Ignoring missing fields for now
+        process3aState(frame, client);
+    }
+
     if (!ProFrameProcessor::processSingleFrame(frame, device)) {
         return false;
     }
@@ -185,6 +193,99 @@
     return OK;
 }
 
+status_t FrameProcessor::process3aState(const CameraMetadata &frame,
+        const sp<Camera2Client> &client) {
+
+    ATRACE_CALL();
+    camera_metadata_ro_entry_t entry;
+    int mId = client->getCameraId();
+
+    entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+    int32_t frameNumber = entry.data.i32[0];
+
+    // Get 3A states from result metadata
+    bool gotAllStates = true;
+
+    AlgState new3aState;
+
+    entry = frame.find(ANDROID_CONTROL_AE_STATE);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AE state provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        new3aState.aeState =
+                static_cast<camera_metadata_enum_android_control_ae_state>(
+                    entry.data.u8[0]);
+    }
+
+    entry = frame.find(ANDROID_CONTROL_AF_STATE);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AF state provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        new3aState.afState =
+                static_cast<camera_metadata_enum_android_control_af_state>(
+                    entry.data.u8[0]);
+    }
+
+    entry = frame.find(ANDROID_CONTROL_AWB_STATE);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AWB state provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        new3aState.awbState =
+                static_cast<camera_metadata_enum_android_control_awb_state>(
+                    entry.data.u8[0]);
+    }
+
+    int32_t afTriggerId = 0;
+    entry = frame.find(ANDROID_CONTROL_AF_TRIGGER_ID);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AF trigger ID provided by HAL for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        afTriggerId = entry.data.i32[0];
+    }
+
+    int32_t aeTriggerId = 0;
+    entry = frame.find(ANDROID_CONTROL_AE_PRECAPTURE_ID);
+    if (entry.count == 0) {
+        ALOGE("%s: Camera %d: No AE precapture trigger ID provided by HAL"
+                " for frame %d!",
+                __FUNCTION__, mId, frameNumber);
+        gotAllStates = false;
+    } else {
+        aeTriggerId = entry.data.i32[0];
+    }
+
+    if (!gotAllStates) return BAD_VALUE;
+
+    if (new3aState.aeState != m3aState.aeState) {
+        ALOGV("%s: AE state changed from 0x%x to 0x%x",
+                __FUNCTION__, m3aState.aeState, new3aState.aeState);
+        client->notifyAutoExposure(new3aState.aeState, aeTriggerId);
+    }
+    if (new3aState.afState != m3aState.afState) {
+        ALOGV("%s: AF state changed from 0x%x to 0x%x",
+                __FUNCTION__, m3aState.afState, new3aState.afState);
+        client->notifyAutoFocus(new3aState.afState, afTriggerId);
+    }
+    if (new3aState.awbState != m3aState.awbState) {
+        ALOGV("%s: AWB state changed from 0x%x to 0x%x",
+                __FUNCTION__, m3aState.awbState, new3aState.awbState);
+        client->notifyAutoWhitebalance(new3aState.awbState, aeTriggerId);
+    }
+
+    m3aState = new3aState;
+
+    return OK;
+}
+
+
 void FrameProcessor::callbackFaceDetection(sp<Camera2Client> client,
                                      const camera_frame_metadata &metadata) {
 
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/camera2/FrameProcessor.h
index 27ed8f6..f480c55 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.h
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.h
@@ -44,6 +44,9 @@
 
   private:
     wp<Camera2Client> mClient;
+
+    bool mSynthesize3ANotify;
+
     int mLastFrameNumberOfFaces;
 
     void processNewFrames(const sp<Camera2Client> &client);
@@ -54,6 +57,22 @@
     status_t processFaceDetect(const CameraMetadata &frame,
             const sp<Camera2Client> &client);
 
+    // Send 3A state change notifications to client based on frame metadata
+    status_t process3aState(const CameraMetadata &frame,
+            const sp<Camera2Client> &client);
+
+    struct AlgState {
+        camera_metadata_enum_android_control_ae_state  aeState;
+        camera_metadata_enum_android_control_af_state  afState;
+        camera_metadata_enum_android_control_awb_state awbState;
+
+        AlgState() :
+                aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
+                afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
+                awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE) {
+        }
+    } m3aState;
+
     // Emit FaceDetection event to java if faces changed
     void callbackFaceDetection(sp<Camera2Client> client,
                                const camera_frame_metadata &metadata);
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
index f0a13ca..1d739cd 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -82,7 +82,8 @@
 
     if (mCaptureConsumer == 0) {
         // Create CPU buffer queue endpoint
-        mCaptureConsumer = new CpuConsumer(1);
+        sp<BufferQueue> bq = new BufferQueue();
+        mCaptureConsumer = new CpuConsumer(bq, 1);
         mCaptureConsumer->setFrameAvailableListener(this);
         mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
         mCaptureWindow = new Surface(
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
index fed05a6..5981be7 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -17,6 +17,13 @@
 #define LOG_TAG "Camera2-StreamingProcessor"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0 // Per-frame verbose logging
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -42,7 +49,8 @@
         mRecordingRequestId(Camera2Client::kRecordingRequestIdStart),
         mRecordingStreamId(NO_STREAM),
         mRecordingFrameAvailable(false),
-        mRecordingHeapCount(kDefaultRecordingHeapCount)
+        mRecordingHeapCount(kDefaultRecordingHeapCount),
+        mRecordingHeapFree(kDefaultRecordingHeapCount)
 {
 }
 
@@ -215,22 +223,39 @@
 
 status_t StreamingProcessor::setRecordingBufferCount(size_t count) {
     ATRACE_CALL();
-    // 32 is the current upper limit on the video buffer count for BufferQueue
-    if (count > 32) {
-        ALOGE("%s: Camera %d: Error setting %d as video buffer count value",
-                __FUNCTION__, mId, count);
+    // Make sure we can support this many buffer slots
+    if (count > BufferQueue::NUM_BUFFER_SLOTS) {
+        ALOGE("%s: Camera %d: Too many recording buffers requested: %d, max %d",
+                __FUNCTION__, mId, count, BufferQueue::NUM_BUFFER_SLOTS);
         return BAD_VALUE;
     }
 
     Mutex::Autolock m(mMutex);
 
-    // Need to reallocate memory for heap
+    ALOGV("%s: Camera %d: New recording buffer count from encoder: %d",
+            __FUNCTION__, mId, count);
+
+    // Need to re-size consumer and heap
     if (mRecordingHeapCount != count) {
-        if  (mRecordingHeap != 0) {
+        ALOGV("%s: Camera %d: Resetting recording heap and consumer",
+            __FUNCTION__, mId);
+
+        if (isStreamActive(mActiveStreamIds, mRecordingStreamId)) {
+            ALOGE("%s: Camera %d: Setting recording buffer count when "
+                    "recording stream is already active!", __FUNCTION__,
+                    mId);
+            return INVALID_OPERATION;
+        }
+
+        releaseAllRecordingFramesLocked();
+
+        if (mRecordingHeap != 0) {
             mRecordingHeap.clear();
-            mRecordingHeap = NULL;
         }
         mRecordingHeapCount = count;
+        mRecordingHeapFree = count;
+
+        mRecordingConsumer.clear();
     }
 
     return OK;
@@ -287,18 +312,22 @@
         return INVALID_OPERATION;
     }
 
+    bool newConsumer = false;
     if (mRecordingConsumer == 0) {
+        ALOGV("%s: Camera %d: Creating recording consumer with %d + 1 "
+                "consumer-side buffers", __FUNCTION__, mId, mRecordingHeapCount);
         // Create CPU buffer queue endpoint. We need one more buffer here so that we can
         // always acquire and free a buffer when the heap is full; otherwise the consumer
         // will have buffers in flight we'll never clear out.
-        mRecordingConsumer = new BufferItemConsumer(
+        sp<BufferQueue> bq = new BufferQueue();
+        mRecordingConsumer = new BufferItemConsumer(bq,
                 GRALLOC_USAGE_HW_VIDEO_ENCODER,
-                mRecordingHeapCount + 1,
-                true);
+                mRecordingHeapCount + 1);
         mRecordingConsumer->setFrameAvailableListener(this);
         mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
         mRecordingWindow = new Surface(
             mRecordingConsumer->getProducerInterface());
+        newConsumer = true;
         // Allocate memory later, since we don't know buffer size until receipt
     }
 
@@ -314,7 +343,7 @@
             return res;
         }
         if (currentWidth != (uint32_t)params.videoWidth ||
-                currentHeight != (uint32_t)params.videoHeight) {
+                currentHeight != (uint32_t)params.videoHeight || newConsumer) {
             // TODO: Should wait to be sure previous recording has finished
             res = device->deleteStream(mRecordingStreamId);
 
@@ -400,6 +429,17 @@
 
     Mutex::Autolock m(mMutex);
 
+    // If a recording stream is being started up, free up any
+    // outstanding buffers left from the previous recording session.
+    // There should never be any, so if there are, warn about it.
+    if (isStreamActive(outputStreams, mRecordingStreamId)) {
+        releaseAllRecordingFramesLocked();
+    }
+
+    ALOGV("%s: Camera %d: %s started, recording heap has %d free of %d",
+            __FUNCTION__, mId, (type == PREVIEW) ? "preview" : "recording",
+            mRecordingHeapFree, mRecordingHeapCount);
+
     CameraMetadata &request = (type == PREVIEW) ?
             mPreviewRequest : mRecordingRequest;
 
@@ -428,7 +468,7 @@
     }
     mActiveRequest = type;
     mPaused = false;
-
+    mActiveStreamIds = outputStreams;
     return OK;
 }
 
@@ -500,6 +540,7 @@
     }
 
     mActiveRequest = NONE;
+    mActiveStreamIds.clear();
     mPaused = false;
 
     return OK;
@@ -576,7 +617,7 @@
     if (client == 0) {
         // Discard frames during shutdown
         BufferItemConsumer::BufferItem imgBuffer;
-        res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
         if (res != OK) {
             if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
                 ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
@@ -594,7 +635,7 @@
         SharedParameters::Lock l(client->getParameters());
         Mutex::Autolock m(mMutex);
         BufferItemConsumer::BufferItem imgBuffer;
-        res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
         if (res != OK) {
             if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
                 ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
@@ -605,7 +646,7 @@
         timestamp = imgBuffer.mTimestamp;
 
         mRecordingFrameCount++;
-        ALOGV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
+        ALOGVV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
 
         if (l.mParameters.state != Parameters::RECORD &&
                 l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
@@ -656,7 +697,7 @@
         mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
         mRecordingHeapFree--;
 
-        ALOGV("%s: Camera %d: Timestamp %lld",
+        ALOGVV("%s: Camera %d: Timestamp %lld",
                 __FUNCTION__, mId, timestamp);
 
         ssize_t offset;
@@ -669,7 +710,7 @@
         uint32_t type = kMetadataBufferTypeGrallocSource;
         *((uint32_t*)data) = type;
         *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
-        ALOGV("%s: Camera %d: Sending out buffer_handle_t %p",
+        ALOGVV("%s: Camera %d: Sending out buffer_handle_t %p",
                 __FUNCTION__, mId,
                 imgBuffer.mGraphicBuffer->handle);
         mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
@@ -682,7 +723,10 @@
         l.mRemoteCallback->dataCallbackTimestamp(timestamp,
                 CAMERA_MSG_VIDEO_FRAME,
                 recordingHeap->mBuffers[heapIdx]);
+    } else {
+        ALOGW("%s: Camera %d: Remote callback gone", __FUNCTION__, mId);
     }
+
     return OK;
 }
 
@@ -730,7 +774,7 @@
         return;
     }
 
-    ALOGV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
+    ALOGVV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
             mId, imgHandle);
 
     res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
@@ -743,6 +787,58 @@
     mRecordingBuffers.replaceAt(itemIndex);
 
     mRecordingHeapFree++;
+    ALOGV_IF(mRecordingHeapFree == mRecordingHeapCount,
+            "%s: Camera %d: All %d recording buffers returned",
+            __FUNCTION__, mId, mRecordingHeapCount);
+}
+
+void StreamingProcessor::releaseAllRecordingFramesLocked() {
+    ATRACE_CALL();
+    status_t res;
+
+    if (mRecordingConsumer == 0) {
+        return;
+    }
+
+    ALOGV("%s: Camera %d: Releasing all recording buffers", __FUNCTION__,
+            mId);
+
+    size_t releasedCount = 0;
+    for (size_t itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
+        const BufferItemConsumer::BufferItem item =
+                mRecordingBuffers[itemIndex];
+        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT) {
+            res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to free recording frame "
+                        "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
+                        mId, item.mGraphicBuffer->handle, strerror(-res), res);
+            }
+            mRecordingBuffers.replaceAt(itemIndex);
+            releasedCount++;
+        }
+    }
+
+    if (releasedCount > 0) {
+        ALOGW("%s: Camera %d: Force-freed %d outstanding buffers "
+                "from previous recording session", __FUNCTION__, mId, releasedCount);
+        ALOGE_IF(releasedCount != mRecordingHeapCount - mRecordingHeapFree,
+            "%s: Camera %d: Force-freed %d buffers, but expected %d",
+            __FUNCTION__, mId, releasedCount, mRecordingHeapCount - mRecordingHeapFree);
+    }
+
+    mRecordingHeapHead = 0;
+    mRecordingHeapFree = mRecordingHeapCount;
+}
+
+bool StreamingProcessor::isStreamActive(const Vector<uint8_t> &streams,
+        uint8_t recordingStreamId) {
+    for (size_t i = 0; i < streams.size(); i++) {
+        if (streams[i] == recordingStreamId) {
+            return true;
+        }
+    }
+    return false;
 }
 
 
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.h b/services/camera/libcameraservice/camera2/StreamingProcessor.h
index 9f71fa0..3ec2df7 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.h
@@ -97,6 +97,8 @@
     StreamType mActiveRequest;
     bool mPaused;
 
+    Vector<uint8_t> mActiveStreamIds;
+
     // Preview-related members
     int32_t mPreviewRequestId;
     int mPreviewStreamId;
@@ -125,6 +127,13 @@
     virtual bool threadLoop();
 
     status_t processRecordingFrame();
+
+    // Unilaterally free any buffers still outstanding to stagefright
+    void releaseAllRecordingFramesLocked();
+
+    // Determine if the specified stream is currently in use
+    static bool isStreamActive(const Vector<uint8_t> &streams,
+            uint8_t recordingStreamId);
 };
 
 
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
index 94059cd..0094992 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -128,10 +128,10 @@
 
     if (mZslConsumer == 0) {
         // Create CPU buffer queue endpoint
-        mZslConsumer = new BufferItemConsumer(
+        sp<BufferQueue> bq = new BufferQueue();
+        mZslConsumer = new BufferItemConsumer(bq,
             GRALLOC_USAGE_HW_CAMERA_ZSL,
-            kZslBufferDepth,
-            true);
+            kZslBufferDepth);
         mZslConsumer->setFrameAvailableListener(this);
         mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
         mZslWindow = new Surface(
@@ -426,7 +426,7 @@
     }
     ALOGVV("Trying to get next buffer");
     BufferItemConsumer::BufferItem item;
-    res = zslConsumer->acquireBuffer(&item);
+    res = zslConsumer->acquireBuffer(&item, 0);
     if (res != OK) {
         if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
             ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
diff --git a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
index 13e9c83..e9a9c2b 100644
--- a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
@@ -211,9 +211,9 @@
     mFrameCount = 0;
 
     if (mConsumer.get() == 0) {
-        mConsumer = new BufferItemConsumer(camera3_stream::usage,
-                                           mTotalBufferCount,
-                                           /*synchronousMode*/true);
+        sp<BufferQueue> bq = new BufferQueue();
+        mConsumer = new BufferItemConsumer(bq, camera3_stream::usage,
+                                           mTotalBufferCount);
         mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
     }
 
diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
index f085443..0ec2b05 100644
--- a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
@@ -304,7 +304,7 @@
     ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
             maxConsumerBuffers, camera3_stream::max_buffers);
     if (camera3_stream::max_buffers == 0) {
-        ALOGE("%s: Camera HAL requested no max_buffers, requires at least 1",
+        ALOGE("%s: Camera HAL requested max_buffer count: %d, requires at least 1",
                 __FUNCTION__, camera3_stream::max_buffers);
         return INVALID_OPERATION;
     }
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index dfa1066..8141f4e 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -36,11 +36,10 @@
 
 RingBufferConsumer::RingBufferConsumer(uint32_t consumerUsage,
         int bufferCount) :
-    ConsumerBase(new BufferQueue(true)),
+    ConsumerBase(new BufferQueue()),
     mBufferCount(bufferCount)
 {
     mBufferQueue->setConsumerUsageBits(consumerUsage);
-    mBufferQueue->setSynchronousMode(true);
     mBufferQueue->setMaxAcquiredBufferCount(bufferCount);
 
     assert(bufferCount > 0);
@@ -284,7 +283,7 @@
         /**
          * Acquire new frame
          */
-        err = acquireBufferLocked(&item);
+        err = acquireBufferLocked(&item, 0);
         if (err != OK) {
             if (err != NO_BUFFER_AVAILABLE) {
                 BI_LOGE("Error acquiring buffer: %s (%d)", strerror(err), err);
diff --git a/services/camera/libcameraservice/photography/CameraDeviceClient.cpp b/services/camera/libcameraservice/photography/CameraDeviceClient.cpp
index 3209a56..b7239e2 100644
--- a/services/camera/libcameraservice/photography/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/photography/CameraDeviceClient.cpp
@@ -176,7 +176,7 @@
             ALOGE("%s: Camera %d:  Got error %d after trying to set streaming "
                   "request", __FUNCTION__, mCameraId, res);
         } else {
-            mStreamingRequestList.push_back(mRequestIdCounter);
+            mStreamingRequestList.push_back(requestId);
         }
     } else {
         res = mDevice->capture(metadata);
@@ -325,8 +325,8 @@
 
     // FIXME: remove this override since the default format should be
     //       IMPLEMENTATION_DEFINED. b/9487482
-    if (format != HAL_PIXEL_FORMAT_BLOB &&
-        format != HAL_PIXEL_FORMAT_YCbCr_420_888) {
+    if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+        format <= HAL_PIXEL_FORMAT_BGRA_8888) {
         ALOGW("%s: Camera %d: Overriding format 0x%x to IMPLEMENTATION_DEFINED",
               __FUNCTION__, mCameraId, format);
         format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
@@ -337,8 +337,23 @@
     // after each call, but only once we are done with all.
 
     int streamId = -1;
-    res = mDevice->createStream(anw, width, height, format, /*size*/1,
-                                &streamId);
+    if (format == HAL_PIXEL_FORMAT_BLOB) {
+        // JPEG buffers need to be sized for maximum possible compressed size
+        CameraMetadata staticInfo = mDevice->info();
+        camera_metadata_entry_t entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
+        if (entry.count == 0) {
+            ALOGE("%s: Camera %d: Can't find maximum JPEG size in "
+                    "static metadata!", __FUNCTION__, mCameraId);
+            return INVALID_OPERATION;
+        }
+        int32_t maxJpegSize = entry.data.i32[0];
+        res = mDevice->createStream(anw, width, height, format, maxJpegSize,
+                &streamId);
+    } else {
+        // All other streams are a known size
+        res = mDevice->createStream(anw, width, height, format, /*size*/0,
+                &streamId);
+    }
 
     if (res == OK) {
         mStreamMap.add(bufferProducer->asBinder(), streamId);
@@ -376,28 +391,47 @@
     return res;
 }
 
-status_t CameraDeviceClient::getCameraInfo(int cameraId,
-                                         /*out*/
-                                         camera_metadata** info)
+status_t CameraDeviceClient::getCameraInfo(/*out*/CameraMetadata* info)
 {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
     status_t res = OK;
 
-    // TODO: remove cameraId. this should be device-specific info, not static.
-    if (cameraId != mCameraId) {
-        return INVALID_OPERATION;
-    }
-
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
     if (!mDevice.get()) return DEAD_OBJECT;
 
-    CameraMetadata deviceInfo = mDevice->info();
-    *info = deviceInfo.release();
+    if (info != NULL) {
+        *info = mDevice->info(); // static camera metadata
+        // TODO: merge with device-specific camera metadata
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::waitUntilIdle()
+{
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    status_t res = OK;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    // FIXME: Also need check repeating burst.
+    if (!mStreamingRequestList.isEmpty()) {
+        ALOGE("%s: Camera %d: Try to waitUntilIdle when there are active streaming requests",
+              __FUNCTION__, mCameraId);
+        return INVALID_OPERATION;
+    }
+    res = mDevice->waitUntilDrained();
+    ALOGV("%s Done", __FUNCTION__);
 
     return res;
 }
diff --git a/services/camera/libcameraservice/photography/CameraDeviceClient.h b/services/camera/libcameraservice/photography/CameraDeviceClient.h
index 806aa15..bb2949c 100644
--- a/services/camera/libcameraservice/photography/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/photography/CameraDeviceClient.h
@@ -85,10 +85,10 @@
 
     // Get the static metadata for the camera
     // -- Caller owns the newly allocated metadata
-    virtual status_t      getCameraInfo(int cameraId,
-                                        /*out*/
-                                        camera_metadata** info);
+    virtual status_t      getCameraInfo(/*out*/CameraMetadata* info);
 
+    // Wait until all the submitted requests have finished processing
+    virtual status_t      waitUntilIdle();
     /**
      * Interface used by CameraService
      */