am 29d06015: am 6e3ee310: am 31d49615: am 27b876d4: am 9bc40a2c: am 98603acb: am 36337f8f: am 7d72965f: am 60476336: Remove obsolete drm manager code

* commit '29d060155e2c3e838fcadf303fcf2d1ecfb78710':
  Remove obsolete drm manager code
diff --git a/camera/Android.mk b/camera/Android.mk
index fa518ff..e633450 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -16,6 +16,9 @@
 	ICameraRecordingProxyListener.cpp \
 	IProCameraUser.cpp \
 	IProCameraCallbacks.cpp \
+	camera2/ICameraDeviceUser.cpp \
+	camera2/ICameraDeviceCallbacks.cpp \
+	camera2/CaptureRequest.cpp \
 	ProCamera.cpp \
 	CameraBase.cpp \
 
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 1b136de..22016a9 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -39,6 +39,9 @@
 {
 }
 
+CameraTraits<Camera>::TCamConnectService CameraTraits<Camera>::fnConnectService =
+        &ICameraService::connect;
+
 // construct a camera client from an existing camera remote
 sp<Camera> Camera::create(const sp<ICamera>& camera)
 {
@@ -255,6 +258,14 @@
     mCamera->setPreviewCallbackFlag(flag);
 }
 
+status_t Camera::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer)
+{
+    sp <ICamera> c = mCamera;
+    if (c == 0) return NO_INIT;
+    return c->setPreviewCallbackTarget(callbackProducer);
+}
+
 // callback from camera service
 void Camera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
 {
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index c25c5fd..55376b0 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -92,20 +92,25 @@
 
 template <typename TCam, typename TCamTraits>
 sp<TCam> CameraBase<TCam, TCamTraits>::connect(int cameraId,
-                                         const String16& clientPackageName,
+                                               const String16& clientPackageName,
                                                int clientUid)
 {
     ALOGV("%s: connect", __FUNCTION__);
     sp<TCam> c = new TCam(cameraId);
     sp<TCamCallbacks> cl = c;
+    status_t status = NO_ERROR;
     const sp<ICameraService>& cs = getCameraService();
+
     if (cs != 0) {
-        c->mCamera = cs->connect(cl, cameraId, clientPackageName, clientUid);
+        TCamConnectService fnConnectService = TCamTraits::fnConnectService;
+        status = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid,
+                                             /*out*/ c->mCamera);
     }
-    if (c->mCamera != 0) {
+    if (status == OK && c->mCamera != 0) {
         c->mCamera->asBinder()->linkToDeath(c);
         c->mStatus = NO_ERROR;
     } else {
+        ALOGW("An error occurred while connecting to camera: %d", cameraId);
         c.clear();
     }
     return c;
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index a8f9eff..f447c5b 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -21,9 +21,13 @@
 #include <utils/Errors.h>
 
 #include <camera/CameraMetadata.h>
+#include <binder/Parcel.h>
 
 namespace android {
 
+typedef Parcel::WritableBlob WritableBlob;
+typedef Parcel::ReadableBlob ReadableBlob;
+
 CameraMetadata::CameraMetadata() :
         mBuffer(NULL), mLocked(false) {
 }
@@ -408,4 +412,175 @@
     return OK;
 }
 
+status_t CameraMetadata::readFromParcel(const Parcel& data,
+                                        camera_metadata_t** out) {
+
+    status_t err = OK;
+
+    camera_metadata_t* metadata = NULL;
+
+    if (out) {
+        *out = NULL;
+    }
+
+    // arg0 = metadataSize (int32)
+    int32_t metadataSizeTmp = -1;
+    if ((err = data.readInt32(&metadataSizeTmp)) != OK) {
+        ALOGE("%s: Failed to read metadata size (error %d %s)",
+              __FUNCTION__, err, strerror(-err));
+        return err;
+    }
+    const size_t metadataSize = static_cast<size_t>(metadataSizeTmp);
+
+    if (metadataSize == 0) {
+        ALOGV("%s: Read 0-sized metadata", __FUNCTION__);
+        return OK;
+    }
+
+    // NOTE: this doesn't make sense to me. shouldnt the blob
+    // know how big it is? why do we have to specify the size
+    // to Parcel::readBlob ?
+
+    ReadableBlob blob;
+    // arg1 = metadata (blob)
+    do {
+        if ((err = data.readBlob(metadataSize, &blob)) != OK) {
+            ALOGE("%s: Failed to read metadata blob (sized %d). Possible "
+                  " serialization bug. Error %d %s",
+                  __FUNCTION__, metadataSize, err, strerror(-err));
+            break;
+        }
+        const camera_metadata_t* tmp =
+                       reinterpret_cast<const camera_metadata_t*>(blob.data());
+
+        metadata = allocate_copy_camera_metadata_checked(tmp, metadataSize);
+        if (metadata == NULL) {
+            // We consider that allocation only fails if the validation
+            // also failed, therefore the readFromParcel was a failure.
+            err = BAD_VALUE;
+        }
+    } while(0);
+    blob.release();
+
+    if (out) {
+        ALOGV("%s: Set out metadata to %p", __FUNCTION__, metadata);
+        *out = metadata;
+    } else if (metadata != NULL) {
+        ALOGV("%s: Freed camera metadata at %p", __FUNCTION__, metadata);
+        free_camera_metadata(metadata);
+    }
+
+    return err;
+}
+
+status_t CameraMetadata::writeToParcel(Parcel& data,
+                                       const camera_metadata_t* metadata) {
+    status_t res = OK;
+
+    // arg0 = metadataSize (int32)
+
+    if (metadata == NULL) {
+        return data.writeInt32(0);
+    }
+
+    const size_t metadataSize = get_camera_metadata_compact_size(metadata);
+    res = data.writeInt32(static_cast<int32_t>(metadataSize));
+    if (res != OK) {
+        return res;
+    }
+
+    // arg1 = metadata (blob)
+    WritableBlob blob;
+    do {
+        res = data.writeBlob(metadataSize, &blob);
+        if (res != OK) {
+            break;
+        }
+        copy_camera_metadata(blob.data(), metadataSize, metadata);
+
+        IF_ALOGV() {
+            if (validate_camera_metadata_structure(
+                        (const camera_metadata_t*)blob.data(),
+                        &metadataSize) != OK) {
+                ALOGV("%s: Failed to validate metadata %p after writing blob",
+                       __FUNCTION__, blob.data());
+            } else {
+                ALOGV("%s: Metadata written to blob. Validation success",
+                        __FUNCTION__);
+            }
+        }
+
+        // Not too big of a problem since receiving side does hard validation
+        // Don't check the size since the compact size could be larger
+        if (validate_camera_metadata_structure(metadata, /*size*/NULL) != OK) {
+            ALOGW("%s: Failed to validate metadata %p before writing blob",
+                   __FUNCTION__, metadata);
+        }
+
+    } while(false);
+    blob.release();
+
+    return res;
+}
+
+status_t CameraMetadata::readFromParcel(Parcel *parcel) {
+
+    ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
+
+    status_t res = OK;
+
+    if (parcel == NULL) {
+        ALOGE("%s: parcel is null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    if (mLocked) {
+        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    camera_metadata *buffer = NULL;
+    // TODO: reading should return a status code, in case validation fails
+    res = CameraMetadata::readFromParcel(*parcel, &buffer);
+
+    if (res != NO_ERROR) {
+        ALOGE("%s: Failed to read from parcel. Metadata is unchanged.",
+              __FUNCTION__);
+        return res;
+    }
+
+    clear();
+    mBuffer = buffer;
+
+    return OK;
+}
+
+status_t CameraMetadata::writeToParcel(Parcel *parcel) const {
+
+    ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
+
+    if (parcel == NULL) {
+        ALOGE("%s: parcel is null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    return CameraMetadata::writeToParcel(*parcel, mBuffer);
+}
+
+void CameraMetadata::swap(CameraMetadata& other) {
+    if (mLocked) {
+        ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
+        return;
+    } else if (other.mLocked) {
+        ALOGE("%s: Other CameraMetadata is locked", __FUNCTION__);
+        return;
+    }
+
+    camera_metadata* thisBuf = mBuffer;
+    camera_metadata* otherBuf = other.mBuffer;
+
+    other.mBuffer = thisBuf;
+    mBuffer = otherBuf;
+}
+
 }; // namespace android
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 8900867..12356f0 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -31,6 +31,7 @@
     DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
     SET_PREVIEW_TEXTURE,
     SET_PREVIEW_CALLBACK_FLAG,
+    SET_PREVIEW_CALLBACK_TARGET,
     START_PREVIEW,
     STOP_PREVIEW,
     AUTO_FOCUS,
@@ -65,6 +66,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
         remote()->transact(DISCONNECT, data, &reply);
+        reply.readExceptionCode();
     }
 
     // pass the buffered IGraphicBufferProducer to the camera service
@@ -90,6 +92,18 @@
         remote()->transact(SET_PREVIEW_CALLBACK_FLAG, data, &reply);
     }
 
+    status_t setPreviewCallbackTarget(
+            const sp<IGraphicBufferProducer>& callbackProducer)
+    {
+        ALOGV("setPreviewCallbackTarget");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+        sp<IBinder> b(callbackProducer->asBinder());
+        data.writeStrongBinder(b);
+        remote()->transact(SET_PREVIEW_CALLBACK_TARGET, data, &reply);
+        return reply.readInt32();
+    }
+
     // start preview mode, must call setPreviewDisplay first
     status_t startPreview()
     {
@@ -268,6 +282,7 @@
             ALOGV("DISCONNECT");
             CHECK_INTERFACE(ICamera, data, reply);
             disconnect();
+            reply->writeNoException();
             return NO_ERROR;
         } break;
         case SET_PREVIEW_TEXTURE: {
@@ -285,6 +300,14 @@
             setPreviewCallbackFlag(callback_flag);
             return NO_ERROR;
         } break;
+        case SET_PREVIEW_CALLBACK_TARGET: {
+            ALOGV("SET_PREVIEW_CALLBACK_TARGET");
+            CHECK_INTERFACE(ICamera, data, reply);
+            sp<IGraphicBufferProducer> cp =
+                interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
+            reply->writeInt32(setPreviewCallbackTarget(cp));
+            return NO_ERROR;
+        }
         case START_PREVIEW: {
             ALOGV("START_PREVIEW");
             CHECK_INTERFACE(ICamera, data, reply);
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index 134f7f0..3debe22 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -15,6 +15,9 @@
 ** limitations under the License.
 */
 
+#define LOG_TAG "BpCameraService"
+#include <utils/Log.h>
+
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -28,9 +31,58 @@
 #include <camera/IProCameraCallbacks.h>
 #include <camera/ICamera.h>
 #include <camera/ICameraClient.h>
+#include <camera/camera2/ICameraDeviceUser.h>
+#include <camera/camera2/ICameraDeviceCallbacks.h>
 
 namespace android {
 
+namespace {
+
+enum {
+    EX_SECURITY = -1,
+    EX_BAD_PARCELABLE = -2,
+    EX_ILLEGAL_ARGUMENT = -3,
+    EX_NULL_POINTER = -4,
+    EX_ILLEGAL_STATE = -5,
+    EX_HAS_REPLY_HEADER = -128,  // special; see below
+};
+
+static bool readExceptionCode(Parcel& reply) {
+    int32_t exceptionCode = reply.readExceptionCode();
+
+    if (exceptionCode != 0) {
+        const char* errorMsg;
+        switch(exceptionCode) {
+            case EX_SECURITY:
+                errorMsg = "Security";
+                break;
+            case EX_BAD_PARCELABLE:
+                errorMsg = "BadParcelable";
+                break;
+            case EX_NULL_POINTER:
+                errorMsg = "NullPointer";
+                break;
+            case EX_ILLEGAL_STATE:
+                errorMsg = "IllegalState";
+                break;
+            // Binder should be handling this code inside Parcel::readException
+            // but lets have a to-string here anyway just in case.
+            case EX_HAS_REPLY_HEADER:
+                errorMsg = "HasReplyHeader";
+                break;
+            default:
+                errorMsg = "Unknown";
+        }
+
+        ALOGE("Binder transmission error %s (%d)", errorMsg, exceptionCode);
+        return true;
+    }
+
+    return false;
+}
+
+};
+
 class BpCameraService: public BpInterface<ICameraService>
 {
 public:
@@ -45,6 +97,8 @@
         Parcel data, reply;
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         remote()->transact(BnCameraService::GET_NUMBER_OF_CAMERAS, data, &reply);
+
+        if (readExceptionCode(reply)) return 0;
         return reply.readInt32();
     }
 
@@ -55,14 +109,21 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeInt32(cameraId);
         remote()->transact(BnCameraService::GET_CAMERA_INFO, data, &reply);
-        cameraInfo->facing = reply.readInt32();
-        cameraInfo->orientation = reply.readInt32();
-        return reply.readInt32();
+
+        if (readExceptionCode(reply)) return -EPROTO;
+        status_t result = reply.readInt32();
+        if (reply.readInt32() != 0) {
+            cameraInfo->facing = reply.readInt32();
+            cameraInfo->orientation = reply.readInt32();
+        }
+        return result;
     }
 
-    // connect to camera service
-    virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId,
-                                const String16 &clientPackageName, int clientUid)
+    // connect to camera service (android.hardware.Camera)
+    virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
+                             const String16 &clientPackageName, int clientUid,
+                             /*out*/
+                             sp<ICamera>& device)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
@@ -71,12 +132,20 @@
         data.writeString16(clientPackageName);
         data.writeInt32(clientUid);
         remote()->transact(BnCameraService::CONNECT, data, &reply);
-        return interface_cast<ICamera>(reply.readStrongBinder());
+
+        if (readExceptionCode(reply)) return -EPROTO;
+        status_t status = reply.readInt32();
+        if (reply.readInt32() != 0) {
+            device = interface_cast<ICamera>(reply.readStrongBinder());
+        }
+        return status;
     }
 
     // connect to camera service (pro client)
-    virtual sp<IProCameraUser> connect(const sp<IProCameraCallbacks>& cameraCb, int cameraId,
-                                       const String16 &clientPackageName, int clientUid)
+    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb, int cameraId,
+                                const String16 &clientPackageName, int clientUid,
+                                /*out*/
+                                sp<IProCameraUser>& device)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
@@ -85,7 +154,38 @@
         data.writeString16(clientPackageName);
         data.writeInt32(clientUid);
         remote()->transact(BnCameraService::CONNECT_PRO, data, &reply);
-        return interface_cast<IProCameraUser>(reply.readStrongBinder());
+
+        if (readExceptionCode(reply)) return -EPROTO;
+        status_t status = reply.readInt32();
+        if (reply.readInt32() != 0) {
+            device = interface_cast<IProCameraUser>(reply.readStrongBinder());
+        }
+        return status;
+    }
+
+    // connect to camera service (android.hardware.camera2.CameraDevice)
+    virtual status_t connectDevice(
+            const sp<ICameraDeviceCallbacks>& cameraCb,
+            int cameraId,
+            const String16& clientPackageName,
+            int clientUid,
+            /*out*/
+            sp<ICameraDeviceUser>& device)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
+        data.writeStrongBinder(cameraCb->asBinder());
+        data.writeInt32(cameraId);
+        data.writeString16(clientPackageName);
+        data.writeInt32(clientUid);
+        remote()->transact(BnCameraService::CONNECT_DEVICE, data, &reply);
+
+        if (readExceptionCode(reply)) return -EPROTO;
+        status_t status = reply.readInt32();
+        if (reply.readInt32() != 0) {
+            device = interface_cast<ICameraDeviceUser>(reply.readStrongBinder());
+        }
+        return status;
     }
 
     virtual status_t addListener(const sp<ICameraServiceListener>& listener)
@@ -94,6 +194,8 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeStrongBinder(listener->asBinder());
         remote()->transact(BnCameraService::ADD_LISTENER, data, &reply);
+
+        if (readExceptionCode(reply)) return -EPROTO;
         return reply.readInt32();
     }
 
@@ -103,6 +205,8 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeStrongBinder(listener->asBinder());
         remote()->transact(BnCameraService::REMOVE_LISTENER, data, &reply);
+
+        if (readExceptionCode(reply)) return -EPROTO;
         return reply.readInt32();
     }
 };
@@ -117,17 +221,22 @@
     switch(code) {
         case GET_NUMBER_OF_CAMERAS: {
             CHECK_INTERFACE(ICameraService, data, reply);
+            reply->writeNoException();
             reply->writeInt32(getNumberOfCameras());
             return NO_ERROR;
         } break;
         case GET_CAMERA_INFO: {
             CHECK_INTERFACE(ICameraService, data, reply);
-            CameraInfo cameraInfo;
+            CameraInfo cameraInfo = CameraInfo();
             memset(&cameraInfo, 0, sizeof(cameraInfo));
             status_t result = getCameraInfo(data.readInt32(), &cameraInfo);
+            reply->writeNoException();
+            reply->writeInt32(result);
+
+            // Fake a parcelable object here
+            reply->writeInt32(1); // means the parcelable is included
             reply->writeInt32(cameraInfo.facing);
             reply->writeInt32(cameraInfo.orientation);
-            reply->writeInt32(result);
             return NO_ERROR;
         } break;
         case CONNECT: {
@@ -137,26 +246,64 @@
             int32_t cameraId = data.readInt32();
             const String16 clientName = data.readString16();
             int32_t clientUid = data.readInt32();
-            sp<ICamera> camera = connect(cameraClient, cameraId,
-                    clientName, clientUid);
-            reply->writeStrongBinder(camera->asBinder());
+            sp<ICamera> camera;
+            status_t status = connect(cameraClient, cameraId,
+                    clientName, clientUid, /*out*/ camera);
+            reply->writeNoException();
+            reply->writeInt32(status);
+            if (camera != NULL) {
+                reply->writeInt32(1);
+                reply->writeStrongBinder(camera->asBinder());
+            } else {
+                reply->writeInt32(0);
+            }
             return NO_ERROR;
         } break;
         case CONNECT_PRO: {
             CHECK_INTERFACE(ICameraService, data, reply);
-            sp<IProCameraCallbacks> cameraClient = interface_cast<IProCameraCallbacks>(data.readStrongBinder());
+            sp<IProCameraCallbacks> cameraClient =
+                interface_cast<IProCameraCallbacks>(data.readStrongBinder());
             int32_t cameraId = data.readInt32();
             const String16 clientName = data.readString16();
             int32_t clientUid = data.readInt32();
-            sp<IProCameraUser> camera = connect(cameraClient, cameraId,
-                                                clientName, clientUid);
-            reply->writeStrongBinder(camera->asBinder());
+            sp<IProCameraUser> camera;
+            status_t status = connectPro(cameraClient, cameraId,
+                    clientName, clientUid, /*out*/ camera);
+            reply->writeNoException();
+            reply->writeInt32(status);
+            if (camera != NULL) {
+                reply->writeInt32(1);
+                reply->writeStrongBinder(camera->asBinder());
+            } else {
+                reply->writeInt32(0);
+            }
+            return NO_ERROR;
+        } break;
+        case CONNECT_DEVICE: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            sp<ICameraDeviceCallbacks> cameraClient =
+                interface_cast<ICameraDeviceCallbacks>(data.readStrongBinder());
+            int32_t cameraId = data.readInt32();
+            const String16 clientName = data.readString16();
+            int32_t clientUid = data.readInt32();
+            sp<ICameraDeviceUser> camera;
+            status_t status = connectDevice(cameraClient, cameraId,
+                    clientName, clientUid, /*out*/ camera);
+            reply->writeNoException();
+            reply->writeInt32(status);
+            if (camera != NULL) {
+                reply->writeInt32(1);
+                reply->writeStrongBinder(camera->asBinder());
+            } else {
+                reply->writeInt32(0);
+            }
             return NO_ERROR;
         } break;
         case ADD_LISTENER: {
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraServiceListener> listener =
                 interface_cast<ICameraServiceListener>(data.readStrongBinder());
+            reply->writeNoException();
             reply->writeInt32(addListener(listener));
             return NO_ERROR;
         } break;
@@ -164,6 +311,7 @@
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraServiceListener> listener =
                 interface_cast<ICameraServiceListener>(data.readStrongBinder());
+            reply->writeNoException();
             reply->writeInt32(removeListener(listener));
             return NO_ERROR;
         } break;
diff --git a/camera/ICameraServiceListener.cpp b/camera/ICameraServiceListener.cpp
index 640ee35..b2f1729 100644
--- a/camera/ICameraServiceListener.cpp
+++ b/camera/ICameraServiceListener.cpp
@@ -54,6 +54,8 @@
                            data,
                            &reply,
                            IBinder::FLAG_ONEWAY);
+
+        reply.readExceptionCode();
     }
 };
 
@@ -73,6 +75,7 @@
             int32_t cameraId = data.readInt32();
 
             onStatusChanged(status, cameraId);
+            reply->writeNoException();
 
             return NO_ERROR;
         } break;
diff --git a/camera/IProCameraCallbacks.cpp b/camera/IProCameraCallbacks.cpp
index b9cd14d..0fdb85a 100644
--- a/camera/IProCameraCallbacks.cpp
+++ b/camera/IProCameraCallbacks.cpp
@@ -28,7 +28,7 @@
 
 #include <camera/IProCameraCallbacks.h>
 
-#include <system/camera_metadata.h>
+#include "camera/CameraMetadata.h"
 
 namespace android {
 
@@ -38,9 +38,6 @@
     RESULT_RECEIVED,
 };
 
-void readMetadata(const Parcel& data, camera_metadata_t** out);
-void writeMetadata(Parcel& data, camera_metadata_t* metadata);
-
 class BpProCameraCallbacks: public BpInterface<IProCameraCallbacks>
 {
 public:
@@ -75,7 +72,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor());
         data.writeInt32(frameId);
-        writeMetadata(data, result);
+        CameraMetadata::writeToParcel(data, result);
         remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
     }
 };
@@ -112,7 +109,7 @@
             CHECK_INTERFACE(IProCameraCallbacks, data, reply);
             int32_t frameId = data.readInt32();
             camera_metadata_t *result = NULL;
-            readMetadata(data, &result);
+            CameraMetadata::readFromParcel(data, &result);
             onResultReceived(frameId, result);
             return NO_ERROR;
             break;
diff --git a/camera/IProCameraUser.cpp b/camera/IProCameraUser.cpp
index 4c4dec3..8f22124 100644
--- a/camera/IProCameraUser.cpp
+++ b/camera/IProCameraUser.cpp
@@ -15,7 +15,7 @@
 ** limitations under the License.
 */
 
-//#define LOG_NDEBUG 0
+// #define LOG_NDEBUG 0
 #define LOG_TAG "IProCameraUser"
 #include <utils/Log.h>
 #include <stdint.h>
@@ -24,13 +24,10 @@
 #include <camera/IProCameraUser.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
-#include <system/camera_metadata.h>
+#include "camera/CameraMetadata.h"
 
 namespace android {
 
-typedef Parcel::WritableBlob WritableBlob;
-typedef Parcel::ReadableBlob ReadableBlob;
-
 enum {
     DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
     CONNECT,
@@ -46,107 +43,6 @@
     GET_CAMERA_INFO,
 };
 
-/**
-  * Caller becomes the owner of the new metadata
-  * 'const Parcel' doesnt prevent us from calling the read functions.
-  *  which is interesting since it changes the internal state
-  *
-  * NULL can be returned when no metadata was sent, OR if there was an issue
-  * unpacking the serialized data (i.e. bad parcel or invalid structure).
-  */
-void readMetadata(const Parcel& data, camera_metadata_t** out) {
-
-    status_t err = OK;
-
-    camera_metadata_t* metadata = NULL;
-
-    if (out) {
-        *out = NULL;
-    }
-
-    // arg0 = metadataSize (int32)
-    int32_t metadataSizeTmp = -1;
-    if ((err = data.readInt32(&metadataSizeTmp)) != OK) {
-        ALOGE("%s: Failed to read metadata size (error %d %s)",
-              __FUNCTION__, err, strerror(-err));
-        return;
-    }
-    const size_t metadataSize = static_cast<size_t>(metadataSizeTmp);
-
-    if (metadataSize == 0) {
-        return;
-    }
-
-    // NOTE: this doesn't make sense to me. shouldnt the blob
-    // know how big it is? why do we have to specify the size
-    // to Parcel::readBlob ?
-
-    ReadableBlob blob;
-    // arg1 = metadata (blob)
-    do {
-        if ((err = data.readBlob(metadataSize, &blob)) != OK) {
-            ALOGE("%s: Failed to read metadata blob (sized %d). Possible "
-                  " serialization bug. Error %d %s",
-                  __FUNCTION__, metadataSize, err, strerror(-err));
-            break;
-        }
-        const camera_metadata_t* tmp =
-                       reinterpret_cast<const camera_metadata_t*>(blob.data());
-
-        metadata = allocate_copy_camera_metadata_checked(tmp, metadataSize);
-    } while(0);
-    blob.release();
-
-    if (out) {
-        *out = metadata;
-    } else if (metadata != NULL) {
-        free_camera_metadata(metadata);
-    }
-}
-
-/**
-  * Caller retains ownership of metadata
-  * - Write 2 (int32 + blob) args in the current position
-  */
-void writeMetadata(Parcel& data, camera_metadata_t* metadata) {
-    // arg0 = metadataSize (int32)
-
-    if (metadata == NULL) {
-        data.writeInt32(0);
-        return;
-    }
-
-    const size_t metadataSize = get_camera_metadata_compact_size(metadata);
-    data.writeInt32(static_cast<int32_t>(metadataSize));
-
-    // arg1 = metadata (blob)
-    WritableBlob blob;
-    {
-        data.writeBlob(metadataSize, &blob);
-        copy_camera_metadata(blob.data(), metadataSize, metadata);
-
-        IF_ALOGV() {
-            if (validate_camera_metadata_structure(
-                        (const camera_metadata_t*)blob.data(),
-                        &metadataSize) != OK) {
-                ALOGV("%s: Failed to validate metadata %p after writing blob",
-                       __FUNCTION__, blob.data());
-            } else {
-                ALOGV("%s: Metadata written to blob. Validation success",
-                        __FUNCTION__);
-            }
-        }
-
-        // Not too big of a problem since receiving side does hard validation
-        if (validate_camera_metadata_structure(metadata, &metadataSize) != OK) {
-            ALOGW("%s: Failed to validate metadata %p before writing blob",
-                   __FUNCTION__, metadata);
-        }
-
-    }
-    blob.release();
-}
-
 class BpProCameraUser: public BpInterface<IProCameraUser>
 {
 public:
@@ -162,6 +58,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
         remote()->transact(DISCONNECT, data, &reply);
+        reply.readExceptionCode();
     }
 
     virtual status_t connect(const sp<IProCameraCallbacks>& cameraClient)
@@ -213,7 +110,7 @@
         data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
 
         // arg0+arg1
-        writeMetadata(data, metadata);
+        CameraMetadata::writeToParcel(data, metadata);
 
         // arg2 = streaming (bool)
         data.writeInt32(streaming);
@@ -274,7 +171,7 @@
         data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
         data.writeInt32(templateId);
         remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply);
-        readMetadata(reply, /*out*/request);
+        CameraMetadata::readFromParcel(reply, /*out*/request);
         return reply.readInt32();
     }
 
@@ -285,7 +182,7 @@
         data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
         data.writeInt32(cameraId);
         remote()->transact(GET_CAMERA_INFO, data, &reply);
-        readMetadata(reply, /*out*/info);
+        CameraMetadata::readFromParcel(reply, /*out*/info);
         return reply.readInt32();
     }
 
@@ -307,6 +204,7 @@
             ALOGV("DISCONNECT");
             CHECK_INTERFACE(IProCameraUser, data, reply);
             disconnect();
+            reply->writeNoException();
             return NO_ERROR;
         } break;
         case CONNECT: {
@@ -341,7 +239,7 @@
         case SUBMIT_REQUEST: {
             CHECK_INTERFACE(IProCameraUser, data, reply);
             camera_metadata_t* metadata;
-            readMetadata(data, /*out*/&metadata);
+            CameraMetadata::readFromParcel(data, /*out*/&metadata);
 
             // arg2 = streaming (bool)
             bool streaming = data.readInt32();
@@ -393,7 +291,7 @@
             status_t ret;
             ret = createDefaultRequest(templateId, &request);
 
-            writeMetadata(*reply, request);
+            CameraMetadata::writeToParcel(*reply, request);
             reply->writeInt32(ret);
 
             free_camera_metadata(request);
@@ -409,7 +307,7 @@
             status_t ret;
             ret = getCameraInfo(cameraId, &info);
 
-            writeMetadata(*reply, info);
+            CameraMetadata::writeToParcel(*reply, info);
             reply->writeInt32(ret);
 
             free_camera_metadata(info);
diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp
index fec5461..577c760 100644
--- a/camera/ProCamera.cpp
+++ b/camera/ProCamera.cpp
@@ -26,7 +26,6 @@
 #include <binder/IMemory.h>
 
 #include <camera/ProCamera.h>
-#include <camera/ICameraService.h>
 #include <camera/IProCameraUser.h>
 #include <camera/IProCameraCallbacks.h>
 
@@ -47,6 +46,9 @@
 {
 }
 
+CameraTraits<ProCamera>::TCamConnectService CameraTraits<ProCamera>::fnConnectService =
+        &ICameraService::connectPro;
+
 ProCamera::~ProCamera()
 {
 
@@ -247,11 +249,11 @@
     sp <IProCameraUser> c = mCamera;
     if (c == 0) return NO_INIT;
 
-    sp<CpuConsumer> cc = new CpuConsumer(heapCount, synchronousMode);
+    sp<BufferQueue> bq = new BufferQueue();
+    sp<CpuConsumer> cc = new CpuConsumer(bq, heapCount/*, synchronousMode*/);
     cc->setName(String8("ProCamera::mCpuConsumer"));
 
-    sp<Surface> stc = new Surface(
-        cc->getProducerInterface());
+    sp<Surface> stc = new Surface(bq);
 
     status_t s = createStream(width, height, format,
                               stc->getIGraphicBufferProducer(),
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
new file mode 100644
index 0000000..57e5319
--- /dev/null
+++ b/camera/camera2/CaptureRequest.cpp
@@ -0,0 +1,124 @@
+/*
+**
+** Copyright 2013, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "CameraRequest"
+#include <utils/Log.h>
+
+#include <camera/camera2/CaptureRequest.h>
+
+#include <binder/Parcel.h>
+#include <gui/Surface.h>
+
+namespace android {
+
+status_t CaptureRequest::readFromParcel(Parcel* parcel) {
+    if (parcel == NULL) {
+        ALOGE("%s: Null parcel", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    mMetadata.clear();
+    mSurfaceList.clear();
+
+    status_t err;
+
+    if ((err = mMetadata.readFromParcel(parcel)) != OK) {
+        ALOGE("%s: Failed to read metadata from parcel", __FUNCTION__);
+        return err;
+    }
+    ALOGV("%s: Read metadata from parcel", __FUNCTION__);
+
+    int32_t size;
+    if ((err = parcel->readInt32(&size)) != OK) {
+        ALOGE("%s: Failed to read surface list size from parcel", __FUNCTION__);
+        return err;
+    }
+    ALOGV("%s: Read surface list size = %d", __FUNCTION__, size);
+
+    // Do not distinguish null arrays from 0-sized arrays.
+    for (int i = 0; i < size; ++i) {
+        // Parcel.writeParcelableArray
+        size_t len;
+        const char16_t* className = parcel->readString16Inplace(&len);
+        ALOGV("%s: Read surface class = %s", __FUNCTION__,
+              className != NULL ? String8(className).string() : "<null>");
+
+        if (className == NULL) {
+            continue;
+        }
+
+        // Surface.writeToParcel
+        String16 name = parcel->readString16();
+        ALOGV("%s: Read surface name = %s",
+              __FUNCTION__, String8(name).string());
+        sp<IBinder> binder(parcel->readStrongBinder());
+        ALOGV("%s: Read surface binder = %p",
+              __FUNCTION__, binder.get());
+
+        sp<Surface> surface;
+
+        if (binder != NULL) {
+            sp<IGraphicBufferProducer> gbp =
+                    interface_cast<IGraphicBufferProducer>(binder);
+            surface = new Surface(gbp);
+        }
+
+        mSurfaceList.push_back(surface);
+    }
+
+    return OK;
+}
+
+status_t CaptureRequest::writeToParcel(Parcel* parcel) const {
+    if (parcel == NULL) {
+        ALOGE("%s: Null parcel", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    status_t err;
+
+    if ((err = mMetadata.writeToParcel(parcel)) != OK) {
+        return err;
+    }
+
+    int32_t size = static_cast<int32_t>(mSurfaceList.size());
+
+    // Send 0-sized arrays when it's empty. Do not send null arrays.
+    parcel->writeInt32(size);
+
+    for (int32_t i = 0; i < size; ++i) {
+        sp<Surface> surface = mSurfaceList[i];
+
+        sp<IBinder> binder;
+        if (surface != 0) {
+            binder = surface->getIGraphicBufferProducer()->asBinder();
+        }
+
+        // not sure if readParcelableArray does this, hard to tell from source
+        parcel->writeString16(String16("android.view.Surface"));
+
+        // Surface.writeToParcel
+        parcel->writeString16(String16("unknown_name"));
+        // Surface.nativeWriteToParcel
+        parcel->writeStrongBinder(binder);
+    }
+
+    return OK;
+}
+
+}; // namespace android
diff --git a/camera/camera2/ICameraDeviceCallbacks.cpp b/camera/camera2/ICameraDeviceCallbacks.cpp
new file mode 100644
index 0000000..3cec1f4
--- /dev/null
+++ b/camera/camera2/ICameraDeviceCallbacks.cpp
@@ -0,0 +1,114 @@
+/*
+**
+** Copyright 2013, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ICameraDeviceCallbacks"
+#include <utils/Log.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <utils/Mutex.h>
+
+#include <camera/camera2/ICameraDeviceCallbacks.h>
+#include "camera/CameraMetadata.h"
+
+namespace android {
+
+enum {
+    NOTIFY_CALLBACK = IBinder::FIRST_CALL_TRANSACTION,
+    RESULT_RECEIVED,
+};
+
+class BpCameraDeviceCallbacks: public BpInterface<ICameraDeviceCallbacks>
+{
+public:
+    BpCameraDeviceCallbacks(const sp<IBinder>& impl)
+        : BpInterface<ICameraDeviceCallbacks>(impl)
+    {
+    }
+
+    // generic callback from camera service to app
+    void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
+    {
+        ALOGV("notifyCallback");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
+        data.writeInt32(msgType);
+        data.writeInt32(ext1);
+        data.writeInt32(ext2);
+        remote()->transact(NOTIFY_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
+        data.writeNoException();
+    }
+
+    void onResultReceived(int32_t requestId, const CameraMetadata& result) {
+        ALOGV("onResultReceived");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
+        data.writeInt32(requestId);
+        data.writeInt32(1); // to mark presence of metadata object
+        result.writeToParcel(&data);
+        remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
+        data.writeNoException();
+    }
+};
+
+IMPLEMENT_META_INTERFACE(CameraDeviceCallbacks,
+                         "android.hardware.camera2.ICameraDeviceCallbacks");
+
+// ----------------------------------------------------------------------
+
+status_t BnCameraDeviceCallbacks::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    ALOGV("onTransact - code = %d", code);
+    switch(code) {
+        case NOTIFY_CALLBACK: {
+            ALOGV("NOTIFY_CALLBACK");
+            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
+            int32_t msgType = data.readInt32();
+            int32_t ext1 = data.readInt32();
+            int32_t ext2 = data.readInt32();
+            notifyCallback(msgType, ext1, ext2);
+            data.readExceptionCode();
+            return NO_ERROR;
+        } break;
+        case RESULT_RECEIVED: {
+            ALOGV("RESULT_RECEIVED");
+            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
+            int32_t requestId = data.readInt32();
+            CameraMetadata result;
+            if (data.readInt32() != 0) {
+                result.readFromParcel(const_cast<Parcel*>(&data));
+            } else {
+                ALOGW("No metadata object is present in result");
+            }
+            onResultReceived(requestId, result);
+            data.readExceptionCode();
+            return NO_ERROR;
+            break;
+        }
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
new file mode 100644
index 0000000..923f487
--- /dev/null
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -0,0 +1,322 @@
+/*
+**
+** Copyright 2013, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "ICameraDeviceUser"
+#include <utils/Log.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <binder/Parcel.h>
+#include <camera/camera2/ICameraDeviceUser.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <camera/CameraMetadata.h>
+#include <camera/camera2/CaptureRequest.h>
+
+namespace android {
+
+typedef Parcel::WritableBlob WritableBlob;
+typedef Parcel::ReadableBlob ReadableBlob;
+
+enum {
+    DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
+    SUBMIT_REQUEST,
+    CANCEL_REQUEST,
+    DELETE_STREAM,
+    CREATE_STREAM,
+    CREATE_DEFAULT_REQUEST,
+    GET_CAMERA_INFO,
+    WAIT_UNTIL_IDLE,
+};
+
+class BpCameraDeviceUser : public BpInterface<ICameraDeviceUser>
+{
+public:
+    BpCameraDeviceUser(const sp<IBinder>& impl)
+        : BpInterface<ICameraDeviceUser>(impl)
+    {
+    }
+
+    // disconnect from camera service
+    void disconnect()
+    {
+        ALOGV("disconnect");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        remote()->transact(DISCONNECT, data, &reply);
+        reply.readExceptionCode();
+    }
+
+    virtual int submitRequest(sp<CaptureRequest> request, bool streaming)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+
+        // arg0 = CaptureRequest
+        if (request != 0) {
+            data.writeInt32(1);
+            request->writeToParcel(&data);
+        } else {
+            data.writeInt32(0);
+        }
+
+        // arg1 = streaming (bool)
+        data.writeInt32(streaming);
+
+        remote()->transact(SUBMIT_REQUEST, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+    virtual status_t cancelRequest(int requestId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(requestId);
+
+        remote()->transact(CANCEL_REQUEST, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+    virtual status_t deleteStream(int streamId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(streamId);
+
+        remote()->transact(DELETE_STREAM, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+    virtual status_t createStream(int width, int height, int format,
+                          const sp<IGraphicBufferProducer>& bufferProducer)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(width);
+        data.writeInt32(height);
+        data.writeInt32(format);
+
+        data.writeInt32(1); // marker that bufferProducer is not null
+        data.writeString16(String16("unknown_name")); // name of surface
+        sp<IBinder> b(bufferProducer->asBinder());
+        data.writeStrongBinder(b);
+
+        remote()->transact(CREATE_STREAM, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+    // Create a request object from a template.
+    virtual status_t createDefaultRequest(int templateId,
+                                          /*out*/
+                                          CameraMetadata* request)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(templateId);
+        remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply);
+
+        reply.readExceptionCode();
+        status_t result = reply.readInt32();
+
+        CameraMetadata out;
+        if (reply.readInt32() != 0) {
+            out.readFromParcel(&reply);
+        }
+
+        if (request != NULL) {
+            request->swap(out);
+        }
+        return result;
+    }
+
+
+    virtual status_t getCameraInfo(CameraMetadata* info)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        remote()->transact(GET_CAMERA_INFO, data, &reply);
+
+        reply.readExceptionCode();
+        status_t result = reply.readInt32();
+
+        CameraMetadata out;
+        if (reply.readInt32() != 0) {
+            out.readFromParcel(&reply);
+        }
+
+        if (info != NULL) {
+            info->swap(out);
+        }
+
+        return result;
+    }
+
+    virtual status_t waitUntilIdle()
+    {
+        ALOGV("waitUntilIdle");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        remote()->transact(WAIT_UNTIL_IDLE, data, &reply);
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+private:
+
+
+};
+
+IMPLEMENT_META_INTERFACE(CameraDeviceUser,
+                         "android.hardware.camera2.ICameraDeviceUser");
+
+// ----------------------------------------------------------------------
+
+status_t BnCameraDeviceUser::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch(code) {
+        case DISCONNECT: {
+            ALOGV("DISCONNECT");
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            disconnect();
+            reply->writeNoException();
+            return NO_ERROR;
+        } break;
+        case SUBMIT_REQUEST: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+
+            // arg0 = request
+            sp<CaptureRequest> request;
+            if (data.readInt32() != 0) {
+                request = new CaptureRequest();
+                request->readFromParcel(const_cast<Parcel*>(&data));
+            }
+
+            // arg1 = streaming (bool)
+            bool streaming = data.readInt32();
+
+            // return code: requestId (int32)
+            reply->writeNoException();
+            reply->writeInt32(submitRequest(request, streaming));
+
+            return NO_ERROR;
+        } break;
+        case CANCEL_REQUEST: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int requestId = data.readInt32();
+            reply->writeNoException();
+            reply->writeInt32(cancelRequest(requestId));
+            return NO_ERROR;
+        } break;
+        case DELETE_STREAM: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int streamId = data.readInt32();
+            reply->writeNoException();
+            reply->writeInt32(deleteStream(streamId));
+            return NO_ERROR;
+        } break;
+        case CREATE_STREAM: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int width, height, format;
+
+            width = data.readInt32();
+            ALOGV("%s: CREATE_STREAM: width = %d", __FUNCTION__, width);
+            height = data.readInt32();
+            ALOGV("%s: CREATE_STREAM: height = %d", __FUNCTION__, height);
+            format = data.readInt32();
+            ALOGV("%s: CREATE_STREAM: format = %d", __FUNCTION__, format);
+
+            sp<IGraphicBufferProducer> bp;
+            if (data.readInt32() != 0) {
+                String16 name = data.readString16();
+                bp = interface_cast<IGraphicBufferProducer>(
+                        data.readStrongBinder());
+
+                ALOGV("%s: CREATE_STREAM: bp = %p, name = %s", __FUNCTION__,
+                      bp.get(), String8(name).string());
+            } else {
+                ALOGV("%s: CREATE_STREAM: bp = unset, name = unset",
+                      __FUNCTION__);
+            }
+
+            status_t ret;
+            ret = createStream(width, height, format, bp);
+
+            reply->writeNoException();
+            ALOGV("%s: CREATE_STREAM: write noException", __FUNCTION__);
+            reply->writeInt32(ret);
+            ALOGV("%s: CREATE_STREAM: write ret = %d", __FUNCTION__, ret);
+
+            return NO_ERROR;
+        } break;
+
+        case CREATE_DEFAULT_REQUEST: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+
+            int templateId = data.readInt32();
+
+            CameraMetadata request;
+            status_t ret;
+            ret = createDefaultRequest(templateId, &request);
+
+            reply->writeNoException();
+            reply->writeInt32(ret);
+
+            // out-variables are after exception and return value
+            reply->writeInt32(1); // to mark presence of metadata object
+            request.writeToParcel(const_cast<Parcel*>(reply));
+
+            return NO_ERROR;
+        } break;
+        case GET_CAMERA_INFO: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+
+            CameraMetadata info;
+            status_t ret;
+            ret = getCameraInfo(&info);
+
+            reply->writeNoException();
+            reply->writeInt32(ret);
+
+            // out-variables are after exception and return value
+            reply->writeInt32(1); // to mark presence of metadata object
+            info.writeToParcel(reply);
+
+            return NO_ERROR;
+        } break;
+        case WAIT_UNTIL_IDLE: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            reply->writeNoException();
+            reply->writeInt32(waitUntilIdle());
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
new file mode 100644
index 0000000..b4a5947
--- /dev/null
+++ b/cmds/screenrecord/Android.mk
@@ -0,0 +1,38 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+	screenrecord.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright libmedia libutils libbinder libstagefright_foundation \
+	libjpeg libgui libcutils liblog
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/media/libstagefright \
+	frameworks/av/media/libstagefright/include \
+	$(TOP)/frameworks/native/include/media/openmax \
+	external/jpeg
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_MODULE:= screenrecord
+
+include $(BUILD_EXECUTABLE)
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
new file mode 100644
index 0000000..3f8567c
--- /dev/null
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -0,0 +1,614 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ScreenRecord"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+#include <utils/Errors.h>
+#include <utils/Thread.h>
+
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <gui/ISurfaceComposer.h>
+#include <ui/DisplayInfo.h>
+#include <media/openmax/OMX_IVCommon.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaMuxer.h>
+#include <media/ICrypto.h>
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <getopt.h>
+
+using namespace android;
+
+// Command-line parameters.
+static bool gVerbose = false;               // chatty on stdout
+static bool gRotate = false;                // rotate 90 degrees
+static bool gSizeSpecified = false;         // was size explicitly requested?
+static uint32_t gVideoWidth = 0;            // default width+height
+static uint32_t gVideoHeight = 0;
+static uint32_t gBitRate = 4000000;         // 4Mbps
+
+// Set by signal handler to stop recording.
+static bool gStopRequested;
+
+// Previous signal handler state, restored after first hit.
+static struct sigaction gOrigSigactionINT;
+static struct sigaction gOrigSigactionHUP;
+
+static const uint32_t kMinBitRate = 100000;         // 0.1Mbps
+static const uint32_t kMaxBitRate = 100 * 1000000;  // 100Mbps
+
+/*
+ * Catch keyboard interrupt signals.  On receipt, the "stop requested"
+ * flag is raised, and the original handler is restored (so that, if
+ * we get stuck finishing, a second Ctrl-C will kill the process).
+ */
+static void signalCatcher(int signum)
+{
+    gStopRequested = true;
+    switch (signum) {
+    case SIGINT:
+        sigaction(SIGINT, &gOrigSigactionINT, NULL);
+        break;
+    case SIGHUP:
+        sigaction(SIGHUP, &gOrigSigactionHUP, NULL);
+        break;
+    default:
+        abort();
+        break;
+    }
+}
+
+/*
+ * Configures signal handlers.  The previous handlers are saved.
+ *
+ * If the command is run from an interactive adb shell, we get SIGINT
+ * when Ctrl-C is hit.  If we're run from the host, the local adb process
+ * gets the signal, and we get a SIGHUP when the terminal disconnects.
+ */
+static status_t configureSignals()
+{
+    struct sigaction act;
+    memset(&act, 0, sizeof(act));
+    act.sa_handler = signalCatcher;
+    if (sigaction(SIGINT, &act, &gOrigSigactionINT) != 0) {
+        status_t err = -errno;
+        fprintf(stderr, "Unable to configure SIGINT handler: %s\n",
+                strerror(errno));
+        return err;
+    }
+    if (sigaction(SIGHUP, &act, &gOrigSigactionHUP) != 0) {
+        status_t err = -errno;
+        fprintf(stderr, "Unable to configure SIGHUP handler: %s\n",
+                strerror(errno));
+        return err;
+    }
+    return NO_ERROR;
+}
+
+/*
+ * Returns "true" if the device is rotated 90 degrees.
+ */
+static bool isDeviceRotated(int orientation) {
+    return orientation != DISPLAY_ORIENTATION_0 &&
+            orientation != DISPLAY_ORIENTATION_180;
+}
+
+/*
+ * Configures and starts the MediaCodec encoder.  Obtains an input surface
+ * from the codec.
+ */
+static status_t prepareEncoder(float displayFps, sp<MediaCodec>* pCodec,
+        sp<IGraphicBufferProducer>* pBufferProducer) {
+    status_t err;
+
+    if (gVerbose) {
+        printf("Configuring recorder for %dx%d video at %.2fMbps\n",
+                gVideoWidth, gVideoHeight, gBitRate / 1000000.0);
+    }
+
+    sp<AMessage> format = new AMessage;
+    format->setInt32("width", gVideoWidth);
+    format->setInt32("height", gVideoHeight);
+    format->setString("mime", "video/avc");
+    format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+    format->setInt32("bitrate", gBitRate);
+    format->setFloat("frame-rate", displayFps);
+    format->setInt32("i-frame-interval", 10);
+
+    /// MediaCodec
+    sp<ALooper> looper = new ALooper;
+    looper->setName("screenrecord_looper");
+    looper->start();
+    ALOGV("Creating codec");
+    sp<MediaCodec> codec = MediaCodec::CreateByType(looper, "video/avc", true);
+    if (codec == NULL) {
+        fprintf(stderr, "ERROR: unable to create video/avc codec instance\n");
+        return UNKNOWN_ERROR;
+    }
+    err = codec->configure(format, NULL, NULL,
+            MediaCodec::CONFIGURE_FLAG_ENCODE);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to configure codec (err=%d)\n", err);
+        return err;
+    }
+
+    ALOGV("Creating buffer producer");
+    sp<IGraphicBufferProducer> bufferProducer;
+    err = codec->createInputSurface(&bufferProducer);
+    if (err != NO_ERROR) {
+        fprintf(stderr,
+            "ERROR: unable to create encoder input surface (err=%d)\n", err);
+        return err;
+    }
+
+    ALOGV("Starting codec");
+    err = codec->start();
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to start codec (err=%d)\n", err);
+        return err;
+    }
+
+    ALOGV("Codec prepared");
+    *pCodec = codec;
+    *pBufferProducer = bufferProducer;
+    return 0;
+}
+
+/*
+ * Configures the virtual display.  When this completes, virtual display
+ * frames will start being sent to the encoder's surface.
+ */
+static status_t prepareVirtualDisplay(const DisplayInfo& mainDpyInfo,
+        const sp<IGraphicBufferProducer>& bufferProducer,
+        sp<IBinder>* pDisplayHandle) {
+    status_t err;
+
+    // Set the region of the layer stack we're interested in, which in our
+    // case is "all of it".  If the app is rotated (so that the width of the
+    // app is based on the height of the display), reverse width/height.
+    bool deviceRotated = isDeviceRotated(mainDpyInfo.orientation);
+    uint32_t sourceWidth, sourceHeight;
+    if (!deviceRotated) {
+        sourceWidth = mainDpyInfo.w;
+        sourceHeight = mainDpyInfo.h;
+    } else {
+        ALOGV("using rotated width/height");
+        sourceHeight = mainDpyInfo.w;
+        sourceWidth = mainDpyInfo.h;
+    }
+    Rect layerStackRect(sourceWidth, sourceHeight);
+
+    // We need to preserve the aspect ratio of the display.
+    float displayAspect = (float) sourceHeight / (float) sourceWidth;
+
+
+    // Set the way we map the output onto the display surface (which will
+    // be e.g. 1280x720 for a 720p video).  The rect is interpreted
+    // post-rotation, so if the display is rotated 90 degrees we need to
+    // "pre-rotate" it by flipping width/height, so that the orientation
+    // adjustment changes it back.
+    //
+    // We might want to encode a portrait display as landscape to use more
+    // of the screen real estate.  (If players respect a 90-degree rotation
+    // hint, we can essentially get a 720x1280 video instead of 1280x720.)
+    // In that case, we swap the configured video width/height and then
+    // supply a rotation value to the display projection.
+    uint32_t videoWidth, videoHeight;
+    uint32_t outWidth, outHeight;
+    if (!gRotate) {
+        videoWidth = gVideoWidth;
+        videoHeight = gVideoHeight;
+    } else {
+        videoWidth = gVideoHeight;
+        videoHeight = gVideoWidth;
+    }
+    if (videoHeight > (uint32_t)(videoWidth * displayAspect)) {
+        // limited by narrow width; reduce height
+        outWidth = videoWidth;
+        outHeight = (uint32_t)(videoWidth * displayAspect);
+    } else {
+        // limited by short height; restrict width
+        outHeight = videoHeight;
+        outWidth = (uint32_t)(videoHeight / displayAspect);
+    }
+    uint32_t offX, offY;
+    offX = (videoWidth - outWidth) / 2;
+    offY = (videoHeight - outHeight) / 2;
+    Rect displayRect(offX, offY, offX + outWidth, offY + outHeight);
+
+    if (gVerbose) {
+        if (gRotate) {
+            printf("Rotated content area is %ux%u at offset x=%d y=%d\n",
+                    outHeight, outWidth, offY, offX);
+        } else {
+            printf("Content area is %ux%u at offset x=%d y=%d\n",
+                    outWidth, outHeight, offX, offY);
+        }
+    }
+
+
+    sp<IBinder> dpy = SurfaceComposerClient::createDisplay(
+            String8("ScreenRecorder"), false /* secure */);
+
+    SurfaceComposerClient::openGlobalTransaction();
+    SurfaceComposerClient::setDisplaySurface(dpy, bufferProducer);
+    SurfaceComposerClient::setDisplayProjection(dpy,
+            gRotate ? DISPLAY_ORIENTATION_90 : DISPLAY_ORIENTATION_0,
+            layerStackRect, displayRect);
+    SurfaceComposerClient::setDisplayLayerStack(dpy, 0);    // default stack
+    SurfaceComposerClient::closeGlobalTransaction();
+
+    *pDisplayHandle = dpy;
+
+    return NO_ERROR;
+}
+
+/*
+ * Runs the MediaCodec encoder, sending the output to the MediaMuxer.  The
+ * input frames are coming from the virtual display as fast as SurfaceFlinger
+ * wants to send them.
+ *
+ * The muxer must *not* have been started before calling.
+ */
+static status_t runEncoder(const sp<MediaCodec>& encoder,
+        const sp<MediaMuxer>& muxer) {
+    static int kTimeout = 250000;   // be responsive on signal
+    status_t err;
+    ssize_t trackIdx = -1;
+    uint32_t debugNumFrames = 0;
+    time_t debugStartWhen = time(NULL);
+
+    Vector<sp<ABuffer> > buffers;
+    err = encoder->getOutputBuffers(&buffers);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "Unable to get output buffers (err=%d)\n", err);
+        return err;
+    }
+
+    // This is set by the signal handler.
+    gStopRequested = false;
+
+    // Run until we're signaled.
+    while (!gStopRequested) {
+        size_t bufIndex, offset, size;
+        int64_t ptsUsec;
+        uint32_t flags;
+        ALOGV("Calling dequeueOutputBuffer");
+        err = encoder->dequeueOutputBuffer(&bufIndex, &offset, &size, &ptsUsec,
+                &flags, kTimeout);
+        ALOGV("dequeueOutputBuffer returned %d", err);
+        switch (err) {
+        case NO_ERROR:
+            // got a buffer
+            if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) != 0) {
+                // ignore this -- we passed the CSD into MediaMuxer when
+                // we got the format change notification
+                ALOGV("Got codec config buffer (%u bytes); ignoring", size);
+                size = 0;
+            }
+            if (size != 0) {
+                ALOGV("Got data in buffer %d, size=%d, pts=%lld",
+                        bufIndex, size, ptsUsec);
+                CHECK(trackIdx != -1);
+
+                // If the virtual display isn't providing us with timestamps,
+                // use the current time.
+                if (ptsUsec == 0) {
+                    ptsUsec = systemTime(SYSTEM_TIME_MONOTONIC) / 1000;
+                }
+
+                // The MediaMuxer docs are unclear, but it appears that we
+                // need to pass either the full set of BufferInfo flags, or
+                // (flags & BUFFER_FLAG_SYNCFRAME).
+                err = muxer->writeSampleData(buffers[bufIndex], trackIdx,
+                        ptsUsec, flags);
+                if (err != NO_ERROR) {
+                    fprintf(stderr, "Failed writing data to muxer (err=%d)\n",
+                            err);
+                    return err;
+                }
+                debugNumFrames++;
+            }
+            err = encoder->releaseOutputBuffer(bufIndex);
+            if (err != NO_ERROR) {
+                fprintf(stderr, "Unable to release output buffer (err=%d)\n",
+                        err);
+                return err;
+            }
+            if ((flags & MediaCodec::BUFFER_FLAG_EOS) != 0) {
+                // Not expecting EOS from SurfaceFlinger.  Go with it.
+                ALOGD("Received end-of-stream");
+                gStopRequested = false;
+            }
+            break;
+        case -EAGAIN:                       // INFO_TRY_AGAIN_LATER
+            // not expected with infinite timeout
+            ALOGV("Got -EAGAIN, looping");
+            break;
+        case INFO_FORMAT_CHANGED:           // INFO_OUTPUT_FORMAT_CHANGED
+            {
+                // format includes CSD, which we must provide to muxer
+                ALOGV("Encoder format changed");
+                sp<AMessage> newFormat;
+                encoder->getOutputFormat(&newFormat);
+                trackIdx = muxer->addTrack(newFormat);
+                ALOGV("Starting muxer");
+                err = muxer->start();
+                if (err != NO_ERROR) {
+                    fprintf(stderr, "Unable to start muxer (err=%d)\n", err);
+                    return err;
+                }
+            }
+            break;
+        case INFO_OUTPUT_BUFFERS_CHANGED:   // INFO_OUTPUT_BUFFERS_CHANGED
+            // not expected for an encoder; handle it anyway
+            ALOGV("Encoder buffers changed");
+            err = encoder->getOutputBuffers(&buffers);
+            if (err != NO_ERROR) {
+                fprintf(stderr,
+                        "Unable to get new output buffers (err=%d)\n", err);
+            }
+            break;
+        default:
+            ALOGW("Got weird result %d from dequeueOutputBuffer", err);
+            return err;
+        }
+    }
+
+    ALOGV("Encoder stopping (req=%d)", gStopRequested);
+    if (gVerbose) {
+        printf("Encoder stopping; recorded %u frames in %ld seconds\n",
+                debugNumFrames, time(NULL) - debugStartWhen);
+    }
+    return NO_ERROR;
+}
+
+/*
+ * Main "do work" method.
+ *
+ * Configures codec, muxer, and virtual display, then starts moving bits
+ * around.
+ */
+static status_t recordScreen(const char* fileName) {
+    status_t err;
+
+    // Configure signal handler.
+    err = configureSignals();
+    if (err != NO_ERROR) return err;
+
+    // Start Binder thread pool.  MediaCodec needs to be able to receive
+    // messages from mediaserver.
+    sp<ProcessState> self = ProcessState::self();
+    self->startThreadPool();
+
+    // Get main display parameters.
+    sp<IBinder> mainDpy = SurfaceComposerClient::getBuiltInDisplay(
+            ISurfaceComposer::eDisplayIdMain);
+    DisplayInfo mainDpyInfo;
+    err = SurfaceComposerClient::getDisplayInfo(mainDpy, &mainDpyInfo);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to get display characteristics\n");
+        return err;
+    }
+    if (gVerbose) {
+        printf("Main display is %dx%d @%.2ffps (orientation=%u)\n",
+                mainDpyInfo.w, mainDpyInfo.h, mainDpyInfo.fps,
+                mainDpyInfo.orientation);
+    }
+
+    bool rotated = isDeviceRotated(mainDpyInfo.orientation);
+    if (gVideoWidth == 0) {
+        gVideoWidth = rotated ? mainDpyInfo.h : mainDpyInfo.w;
+    }
+    if (gVideoHeight == 0) {
+        gVideoHeight = rotated ? mainDpyInfo.w : mainDpyInfo.h;
+    }
+
+    // Configure and start the encoder.
+    sp<MediaCodec> encoder;
+    sp<IGraphicBufferProducer> bufferProducer;
+    err = prepareEncoder(mainDpyInfo.fps, &encoder, &bufferProducer);
+    if (err != NO_ERROR && !gSizeSpecified) {
+        ALOGV("Retrying with 720p");
+        if (gVideoWidth != 1280 && gVideoHeight != 720) {
+            fprintf(stderr, "WARNING: failed at %dx%d, retrying at 720p\n",
+                    gVideoWidth, gVideoHeight);
+            gVideoWidth = 1280;
+            gVideoHeight = 720;
+            err = prepareEncoder(mainDpyInfo.fps, &encoder, &bufferProducer);
+        }
+    }
+    if (err != NO_ERROR) {
+        return err;
+    }
+
+    // Configure virtual display.
+    sp<IBinder> dpy;
+    err = prepareVirtualDisplay(mainDpyInfo, bufferProducer, &dpy);
+    if (err != NO_ERROR) return err;
+
+    // Configure, but do not start, muxer.
+    sp<MediaMuxer> muxer = new MediaMuxer(fileName,
+            MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+    if (gRotate) {
+        muxer->setOrientationHint(90);
+    }
+
+    // Main encoder loop.
+    err = runEncoder(encoder, muxer);
+    if (err != NO_ERROR) return err;
+
+    if (gVerbose) {
+        printf("Stopping encoder and muxer\n");
+    }
+
+    // Shut everything down, starting with the producer side.
+    bufferProducer = NULL;
+    SurfaceComposerClient::destroyDisplay(dpy);
+
+    encoder->stop();
+    muxer->stop();
+    encoder->release();
+
+    return 0;
+}
+
+/*
+ * Parses a string of the form "1280x720".
+ *
+ * Returns true on success.
+ */
+static bool parseWidthHeight(const char* widthHeight, uint32_t* pWidth,
+        uint32_t* pHeight) {
+    long width, height;
+    char* end;
+
+    // Must specify base 10, or "0x0" gets parsed differently.
+    width = strtol(widthHeight, &end, 10);
+    if (end == widthHeight || *end != 'x' || *(end+1) == '\0') {
+        // invalid chars in width, or missing 'x', or missing height
+        return false;
+    }
+    height = strtol(end + 1, &end, 10);
+    if (*end != '\0') {
+        // invalid chars in height
+        return false;
+    }
+
+    *pWidth = width;
+    *pHeight = height;
+    return true;
+}
+
+/*
+ * Dumps usage on stderr.
+ */
+static void usage() {
+    fprintf(stderr,
+        "Usage: screenrecord [options] <filename>\n"
+        "\n"
+        "Records the device's display to a .mp4 file.\n"
+        "\n"
+        "Options:\n"
+        "--size WIDTHxHEIGHT\n"
+        "    Set the video size, e.g. \"1280x720\".  For best results, use\n"
+        "    a size supported by the AVC encoder.\n"
+        "--bit-rate RATE\n"
+        "    Set the video bit rate, in megabits per second.  Default 4Mbps.\n"
+        "--rotate\n"
+        "    Rotate the output 90 degrees.\n"
+        "--verbose\n"
+        "    Display interesting information on stdout.\n"
+        "--help\n"
+        "    Show this message.\n"
+        "\n"
+        "Recording continues until Ctrl-C is hit.\n"
+        "\n"
+        );
+}
+
+/*
+ * Parses args and kicks things off.
+ */
+int main(int argc, char* const argv[]) {
+    static const struct option longOptions[] = {
+        { "help",       no_argument,        NULL, 'h' },
+        { "verbose",    no_argument,        NULL, 'v' },
+        { "size",       required_argument,  NULL, 's' },
+        { "bit-rate",   required_argument,  NULL, 'b' },
+        { "rotate",     no_argument,        NULL, 'r' },
+        { NULL,         0,                  NULL, 0 }
+    };
+
+    while (true) {
+        int optionIndex = 0;
+        int ic = getopt_long(argc, argv, "", longOptions, &optionIndex);
+        if (ic == -1) {
+            break;
+        }
+
+        switch (ic) {
+        case 'h':
+            usage();
+            return 0;
+        case 'v':
+            gVerbose = true;
+            break;
+        case 's':
+            if (!parseWidthHeight(optarg, &gVideoWidth, &gVideoHeight)) {
+                fprintf(stderr, "Invalid size '%s', must be width x height\n",
+                        optarg);
+                return 2;
+            }
+            if (gVideoWidth == 0 || gVideoHeight == 0) {
+                fprintf(stderr,
+                    "Invalid size %ux%u, width and height may not be zero\n",
+                    gVideoWidth, gVideoHeight);
+                return 2;
+            }
+            gSizeSpecified = true;
+            break;
+        case 'b':
+            gBitRate = atoi(optarg);
+            if (gBitRate < kMinBitRate || gBitRate > kMaxBitRate) {
+                fprintf(stderr,
+                        "Bit rate %dbps outside acceptable range [%d,%d]\n",
+                        gBitRate, kMinBitRate, kMaxBitRate);
+                return 2;
+            }
+            break;
+        case 'r':
+            gRotate = true;
+            break;
+        default:
+            if (ic != '?') {
+                fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
+            }
+            return 2;
+        }
+    }
+
+    if (optind != argc - 1) {
+        fprintf(stderr, "Must specify output file (see --help).\n");
+        return 2;
+    }
+
+    // MediaMuxer tries to create the file in the constructor, but we don't
+    // learn about the failure until muxer.start(), which returns a generic
+    // error code without logging anything.  We attempt to create the file
+    // now for better diagnostics.
+    const char* fileName = argv[optind];
+    int fd = open(fileName, O_CREAT | O_RDWR, 0644);
+    if (fd < 0) {
+        fprintf(stderr, "Unable to open '%s': %s\n", fileName, strerror(errno));
+        return 1;
+    }
+    close(fd);
+
+    status_t err = recordScreen(fileName);
+    ALOGD(err == NO_ERROR ? "success" : "failed");
+    return (int) err;
+}
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 3844487..561ce02 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -19,7 +19,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= stagefright
 
@@ -42,7 +42,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= record
 
@@ -65,7 +65,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= recordvideo
 
@@ -89,7 +89,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= audioloop
 
@@ -112,7 +112,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= stream
 
@@ -135,7 +135,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= sf2
 
@@ -159,7 +159,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= codec
 
@@ -182,7 +182,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= muxer
 
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 115b07c..030bf1b 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -30,8 +30,6 @@
 #include <binder/ProcessState.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include "include/LiveSession.h"
 #include "include/NuCachedSource2.h"
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/DataSource.h>
@@ -53,6 +51,7 @@
 
 #include <fcntl.h>
 
+#include <gui/GLConsumer.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
 
@@ -618,7 +617,7 @@
         MEDIA_MIMETYPE_AUDIO_AMR_NB, MEDIA_MIMETYPE_AUDIO_AMR_WB,
         MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
         MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
-        MEDIA_MIMETYPE_VIDEO_VPX
+        MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9
     };
 
     const char *codecType = queryDecoders? "decoder" : "encoder";
@@ -678,7 +677,6 @@
     gDisplayHistogram = false;
 
     sp<ALooper> looper;
-    sp<LiveSession> liveSession;
 
     int res;
     while ((res = getopt(argc, argv, "han:lm:b:ptsrow:kxSTd:D:")) >= 0) {
@@ -940,8 +938,9 @@
         } else {
             CHECK(useSurfaceTexAlloc);
 
-            sp<GLConsumer> texture = new GLConsumer(0 /* tex */);
-            gSurface = new Surface(texture->getBufferQueue());
+            sp<BufferQueue> bq = new BufferQueue();
+            sp<GLConsumer> texture = new GLConsumer(bq, 0 /* tex */);
+            gSurface = new Surface(bq);
         }
 
         CHECK_EQ((status_t)OK,
@@ -961,9 +960,7 @@
 
         sp<DataSource> dataSource = DataSource::CreateFromURI(filename);
 
-        if (strncasecmp(filename, "sine:", 5)
-                && strncasecmp(filename, "httplive://", 11)
-                && dataSource == NULL) {
+        if (strncasecmp(filename, "sine:", 5) && dataSource == NULL) {
             fprintf(stderr, "Unable to create data source.\n");
             return 1;
         }
@@ -995,44 +992,21 @@
                 mediaSources.push(mediaSource);
             }
         } else {
-            sp<MediaExtractor> extractor;
+            sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
 
-            if (!strncasecmp("httplive://", filename, 11)) {
-                String8 uri("http://");
-                uri.append(filename + 11);
+            if (extractor == NULL) {
+                fprintf(stderr, "could not create extractor.\n");
+                return -1;
+            }
 
-                if (looper == NULL) {
-                    looper = new ALooper;
-                    looper->start();
-                }
-                liveSession = new LiveSession(NULL /* notify */);
-                looper->registerHandler(liveSession);
+            sp<MetaData> meta = extractor->getMetaData();
 
-                liveSession->connect(uri.string());
-                dataSource = liveSession->getDataSource();
+            if (meta != NULL) {
+                const char *mime;
+                CHECK(meta->findCString(kKeyMIMEType, &mime));
 
-                extractor =
-                    MediaExtractor::Create(
-                            dataSource, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
-
-                syncInfoPresent = false;
-            } else {
-                extractor = MediaExtractor::Create(dataSource);
-
-                if (extractor == NULL) {
-                    fprintf(stderr, "could not create extractor.\n");
-                    return -1;
-                }
-
-                sp<MetaData> meta = extractor->getMetaData();
-
-                if (meta != NULL) {
-                    const char *mime;
-                    CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-                    if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
-                        syncInfoPresent = false;
-                    }
+                if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
+                    syncInfoPresent = false;
                 }
             }
 
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 37626a4..81848b3 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -51,8 +51,14 @@
     typedef CameraListener        TCamListener;
     typedef ICamera               TCamUser;
     typedef ICameraClient         TCamCallbacks;
+    typedef status_t (ICameraService::*TCamConnectService)(const sp<ICameraClient>&,
+                                                           int, const String16&, int,
+                                                           /*out*/
+                                                           sp<ICamera>&);
+    static TCamConnectService     fnConnectService;
 };
 
+
 class Camera :
     public CameraBase<Camera>,
     public BnCameraClient
@@ -121,7 +127,15 @@
 
             void        setListener(const sp<CameraListener>& listener);
             void        setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener);
+
+            // Configure preview callbacks to app. Only one of the older
+            // callbacks or the callback surface can be active at the same time;
+            // enabling one will disable the other if active. Flags can be
+            // disabled by calling it with CAMERA_FRAME_CALLBACK_FLAG_NOOP, and
+            // Target by calling it with a NULL interface.
             void        setPreviewCallbackFlags(int preview_callback_flag);
+            status_t    setPreviewCallbackTarget(
+                    const sp<IGraphicBufferProducer>& callbackProducer);
 
             sp<ICameraRecordingProxy> getRecordingProxy();
 
diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h
index 9b08c0f..1b93157 100644
--- a/include/camera/CameraBase.h
+++ b/include/camera/CameraBase.h
@@ -54,9 +54,10 @@
 class CameraBase : public IBinder::DeathRecipient
 {
 public:
-    typedef typename TCamTraits::TCamListener    TCamListener;
-    typedef typename TCamTraits::TCamUser        TCamUser;
-    typedef typename TCamTraits::TCamCallbacks   TCamCallbacks;
+    typedef typename TCamTraits::TCamListener       TCamListener;
+    typedef typename TCamTraits::TCamUser           TCamUser;
+    typedef typename TCamTraits::TCamCallbacks      TCamCallbacks;
+    typedef typename TCamTraits::TCamConnectService TCamConnectService;
 
     static sp<TCam>      connect(int cameraId,
                                  const String16& clientPackageName,
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index 8eeb2e7..fe2bd19 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -22,6 +22,7 @@
 #include <utils/Vector.h>
 
 namespace android {
+class Parcel;
 
 /**
  * A convenience wrapper around the C-based camera_metadata_t library.
@@ -159,6 +160,12 @@
     status_t erase(uint32_t tag);
 
     /**
+     * Swap the underlying camera metadata between this and the other
+     * metadata object.
+     */
+    void swap(CameraMetadata &other);
+
+    /**
      * Dump contents into FD for debugging. The verbosity levels are
      * 0: Tag entry information only, no data values
      * 1: Level 0 plus at most 16 data values per entry
@@ -169,6 +176,31 @@
      */
     void dump(int fd, int verbosity = 1, int indentation = 0) const;
 
+    /**
+     * Serialization over Binder
+     */
+
+    // Metadata object is unchanged when reading from parcel fails.
+    status_t readFromParcel(Parcel *parcel);
+    status_t writeToParcel(Parcel *parcel) const;
+
+    /**
+      * Caller becomes the owner of the new metadata
+      * 'const Parcel' doesnt prevent us from calling the read functions.
+      *  which is interesting since it changes the internal state
+      *
+      * NULL can be returned when no metadata was sent, OR if there was an issue
+      * unpacking the serialized data (i.e. bad parcel or invalid structure).
+      */
+    static status_t readFromParcel(const Parcel &parcel,
+                                   camera_metadata_t** out);
+    /**
+      * Caller retains ownership of metadata
+      * - Write 2 (int32 + blob) args in the current position
+      */
+    static status_t writeToParcel(Parcel &parcel,
+                                  const camera_metadata_t* metadata);
+
   private:
     camera_metadata_t *mBuffer;
     bool               mLocked;
diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h
index 2236c1f..f3a186e 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/ICamera.h
@@ -32,6 +32,9 @@
 
 class ICamera: public IInterface
 {
+    /**
+     * Keep up-to-date with ICamera.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(Camera);
 
@@ -51,8 +54,15 @@
             const sp<IGraphicBufferProducer>& bufferProducer) = 0;
 
     // set the preview callback flag to affect how the received frames from
-    // preview are handled.
+    // preview are handled. Enabling preview callback flags disables any active
+    // preview callback surface set by setPreviewCallbackTarget().
     virtual void            setPreviewCallbackFlag(int flag) = 0;
+    // set a buffer interface to use for client-received preview frames instead
+    // of preview callback buffers. Passing a valid interface here disables any
+    // active preview callbacks set by setPreviewCallbackFlag(). Passing NULL
+    // disables the use of the callback target.
+    virtual status_t        setPreviewCallbackTarget(
+            const sp<IGraphicBufferProducer>& callbackProducer) = 0;
 
     // start preview mode, must call setPreviewDisplay first
     virtual status_t        startPreview() = 0;
diff --git a/include/camera/ICameraClient.h b/include/camera/ICameraClient.h
index b30aa7a..1584dba 100644
--- a/include/camera/ICameraClient.h
+++ b/include/camera/ICameraClient.h
@@ -28,6 +28,9 @@
 
 class ICameraClient: public IInterface
 {
+    /**
+     * Keep up-to-date with ICameraClient.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(CameraClient);
 
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index aaf6eb3..0e10699 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -28,15 +28,21 @@
 class IProCameraUser;
 class IProCameraCallbacks;
 class ICameraServiceListener;
+class ICameraDeviceUser;
+class ICameraDeviceCallbacks;
 
 class ICameraService : public IInterface
 {
 public:
+    /**
+     * Keep up-to-date with ICameraService.aidl in frameworks/base
+     */
     enum {
         GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
         GET_CAMERA_INFO,
         CONNECT,
         CONNECT_PRO,
+        CONNECT_DEVICE,
         ADD_LISTENER,
         REMOVE_LISTENER,
     };
@@ -65,15 +71,27 @@
      * clientUid == USE_CALLING_UID, then the calling UID is used instead. Only
      * trusted callers can set a clientUid other than USE_CALLING_UID.
      */
-    virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient,
+    virtual status_t connect(const sp<ICameraClient>& cameraClient,
             int cameraId,
             const String16& clientPackageName,
-            int clientUid) = 0;
+            int clientUid,
+            /*out*/
+            sp<ICamera>& device) = 0;
 
-    virtual sp<IProCameraUser> connect(const sp<IProCameraCallbacks>& cameraCb,
+    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
             int cameraId,
             const String16& clientPackageName,
-            int clientUid) = 0;
+            int clientUid,
+            /*out*/
+            sp<IProCameraUser>& device) = 0;
+
+    virtual status_t connectDevice(
+            const sp<ICameraDeviceCallbacks>& cameraCb,
+            int cameraId,
+            const String16& clientPackageName,
+            int clientUid,
+            /*out*/
+            sp<ICameraDeviceUser>& device) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/ICameraServiceListener.h b/include/camera/ICameraServiceListener.h
index f2a11c2..0a0e43a 100644
--- a/include/camera/ICameraServiceListener.h
+++ b/include/camera/ICameraServiceListener.h
@@ -26,6 +26,9 @@
 
 class ICameraServiceListener : public IInterface
 {
+    /**
+     * Keep up-to-date with ICameraServiceListener.aidl in frameworks/base
+     */
 public:
 
     /**
diff --git a/include/camera/IProCameraCallbacks.h b/include/camera/IProCameraCallbacks.h
index 563ec17..c774698 100644
--- a/include/camera/IProCameraCallbacks.h
+++ b/include/camera/IProCameraCallbacks.h
@@ -30,6 +30,9 @@
 
 class IProCameraCallbacks : public IInterface
 {
+    /**
+     * Keep up-to-date with IProCameraCallbacks.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(ProCameraCallbacks);
 
diff --git a/include/camera/IProCameraUser.h b/include/camera/IProCameraUser.h
index 45b818c..2ccc4d2 100644
--- a/include/camera/IProCameraUser.h
+++ b/include/camera/IProCameraUser.h
@@ -34,6 +34,9 @@
 
 class IProCameraUser: public IInterface
 {
+    /**
+     * Keep up-to-date with IProCameraUser.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(ProCameraUser);
 
diff --git a/include/camera/ProCamera.h b/include/camera/ProCamera.h
index 3d1652f..d9ee662 100644
--- a/include/camera/ProCamera.h
+++ b/include/camera/ProCamera.h
@@ -25,6 +25,7 @@
 #include <camera/IProCameraUser.h>
 #include <camera/Camera.h>
 #include <camera/CameraMetadata.h>
+#include <camera/ICameraService.h>
 #include <gui/CpuConsumer.h>
 
 #include <gui/Surface.h>
@@ -87,8 +88,14 @@
     typedef ProCameraListener     TCamListener;
     typedef IProCameraUser        TCamUser;
     typedef IProCameraCallbacks   TCamCallbacks;
+    typedef status_t (ICameraService::*TCamConnectService)(const sp<IProCameraCallbacks>&,
+                                                           int, const String16&, int,
+                                                           /*out*/
+                                                           sp<IProCameraUser>&);
+    static TCamConnectService     fnConnectService;
 };
 
+
 class ProCamera :
     public CameraBase<ProCamera>,
     public BnProCameraCallbacks
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
new file mode 100644
index 0000000..e56d61f
--- /dev/null
+++ b/include/camera/camera2/CaptureRequest.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CAPTUREREQUEST_H
+#define ANDROID_HARDWARE_PHOTOGRAPHY_CAPTUREREQUEST_H
+
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+#include <camera/CameraMetadata.h>
+
+namespace android {
+
+class Surface;
+
+struct CaptureRequest : public virtual RefBase {
+public:
+
+    CameraMetadata          mMetadata;
+    Vector<sp<Surface> >    mSurfaceList;
+
+    /**
+     * Keep impl up-to-date with CaptureRequest.java in frameworks/base
+     */
+    status_t                readFromParcel(Parcel* parcel);
+    status_t                writeToParcel(Parcel* parcel) const;
+};
+}; // namespace android
+
+#endif
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
new file mode 100644
index 0000000..041fa65
--- /dev/null
+++ b/include/camera/camera2/ICameraDeviceCallbacks.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
+#define ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+#include <binder/IMemory.h>
+#include <utils/Timers.h>
+#include <system/camera.h>
+
+namespace android {
+class CameraMetadata;
+
+class ICameraDeviceCallbacks : public IInterface
+{
+    /**
+     * Keep up-to-date with ICameraDeviceCallbacks.aidl in frameworks/base
+     */
+public:
+    DECLARE_META_INTERFACE(CameraDeviceCallbacks);
+
+    // One way
+    virtual void            notifyCallback(int32_t msgType,
+                                           int32_t ext1,
+                                           int32_t ext2) = 0;
+
+    // One way
+    virtual void            onResultReceived(int32_t frameId,
+                                             const CameraMetadata& result) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnCameraDeviceCallbacks : public BnInterface<ICameraDeviceCallbacks>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
new file mode 100644
index 0000000..45988d0
--- /dev/null
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
+#define ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
+
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+struct camera_metadata;
+
+namespace android {
+
+class ICameraDeviceUserClient;
+class IGraphicBufferProducer;
+class Surface;
+class CaptureRequest;
+class CameraMetadata;
+
+class ICameraDeviceUser : public IInterface
+{
+    /**
+     * Keep up-to-date with ICameraDeviceUser.aidl in frameworks/base
+     */
+public:
+    DECLARE_META_INTERFACE(CameraDeviceUser);
+
+    virtual void            disconnect() = 0;
+
+    /**
+     * Request Handling
+     **/
+
+    virtual int             submitRequest(sp<CaptureRequest> request,
+                                          bool streaming = false) = 0;
+    virtual status_t        cancelRequest(int requestId) = 0;
+
+    virtual status_t        deleteStream(int streamId) = 0;
+    virtual status_t        createStream(
+            int width, int height, int format,
+            const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+
+    // Create a request object from a template.
+    virtual status_t        createDefaultRequest(int templateId,
+                                                 /*out*/
+                                                 CameraMetadata* request) = 0;
+    // Get static camera metadata
+    virtual status_t        getCameraInfo(/*out*/
+                                          CameraMetadata* info) = 0;
+
+    // Wait until all the submitted requests have finished processing
+    virtual status_t        waitUntilIdle() =  0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnCameraDeviceUser: public BnInterface<ICameraDeviceUser>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif
diff --git a/include/cpustats/CentralTendencyStatistics.h b/include/cpustats/CentralTendencyStatistics.h
new file mode 100644
index 0000000..21b6981
--- /dev/null
+++ b/include/cpustats/CentralTendencyStatistics.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _CENTRAL_TENDENCY_STATISTICS_H
+#define _CENTRAL_TENDENCY_STATISTICS_H
+
+#include <math.h>
+
+// Not multithread safe
+class CentralTendencyStatistics {
+
+public:
+
+    CentralTendencyStatistics() :
+            mMean(NAN), mMedian(NAN), mMinimum(INFINITY), mMaximum(-INFINITY), mN(0), mM2(0),
+            mVariance(NAN), mVarianceKnownForN(0), mStddev(NAN), mStddevKnownForN(0) { }
+
+    ~CentralTendencyStatistics() { }
+
+    // add x to the set of samples
+    void sample(double x);
+
+    // return the arithmetic mean of all samples so far
+    double mean() const { return mMean; }
+
+    // return the minimum of all samples so far
+    double minimum() const { return mMinimum; }
+
+    // return the maximum of all samples so far
+    double maximum() const { return mMaximum; }
+
+    // return the variance of all samples so far
+    double variance() const;
+
+    // return the standard deviation of all samples so far
+    double stddev() const;
+
+    // return the number of samples added so far
+    unsigned n() const { return mN; }
+
+    // reset the set of samples to be empty
+    void reset();
+
+private:
+    double mMean;
+    double mMedian;
+    double mMinimum;
+    double mMaximum;
+    unsigned mN;    // number of samples so far
+    double mM2;
+
+    // cached variance, and n at time of caching
+    mutable double mVariance;
+    mutable unsigned mVarianceKnownForN;
+
+    // cached standard deviation, and n at time of caching
+    mutable double mStddev;
+    mutable unsigned mStddevKnownForN;
+
+};
+
+#endif // _CENTRAL_TENDENCY_STATISTICS_H
diff --git a/include/cpustats/README.txt b/include/cpustats/README.txt
new file mode 100644
index 0000000..14439f0
--- /dev/null
+++ b/include/cpustats/README.txt
@@ -0,0 +1,6 @@
+This is a static library of CPU usage statistics, originally written
+for audio but most are not actually specific to audio.
+
+Requirements to be here:
+ * should be related to CPU usage statistics
+ * should be portable to host; avoid Android OS dependencies without a conditional
diff --git a/include/cpustats/ThreadCpuUsage.h b/include/cpustats/ThreadCpuUsage.h
new file mode 100644
index 0000000..9756844
--- /dev/null
+++ b/include/cpustats/ThreadCpuUsage.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _THREAD_CPU_USAGE_H
+#define _THREAD_CPU_USAGE_H
+
+#include <fcntl.h>
+#include <pthread.h>
+
+namespace android {
+
+// Track CPU usage for the current thread.
+// Units are in per-thread CPU ns, as reported by
+// clock_gettime(CLOCK_THREAD_CPUTIME_ID).  Simple usage: for cyclic
+// threads where you want to measure the execution time of the whole
+// cycle, just call sampleAndEnable() at the start of each cycle.
+// For acyclic threads, or for cyclic threads where you want to measure/track
+// only part of each cycle, call enable(), disable(), and/or setEnabled()
+// to demarcate the region(s) of interest, and then call sample() periodically.
+// This class is not thread-safe for concurrent calls from multiple threads;
+// the methods of this class may only be called by the current thread
+// which constructed the object.
+
+class ThreadCpuUsage
+{
+
+public:
+    ThreadCpuUsage() :
+        mIsEnabled(false),
+        mWasEverEnabled(false),
+        mAccumulator(0),
+        // mPreviousTs
+        // mMonotonicTs
+        mMonotonicKnown(false)
+        {
+            (void) pthread_once(&sOnceControl, &init);
+            for (int i = 0; i < sKernelMax; ++i) {
+                mCurrentkHz[i] = (uint32_t) ~0;   // unknown
+            }
+        }
+
+    ~ThreadCpuUsage() { }
+
+    // Return whether currently tracking CPU usage by current thread
+    bool isEnabled() const  { return mIsEnabled; }
+
+    // Enable tracking of CPU usage by current thread;
+    // any CPU used from this point forward will be tracked.
+    // Returns the previous enabled status.
+    bool enable()       { return setEnabled(true); }
+
+    // Disable tracking of CPU usage by current thread;
+    // any CPU used from this point forward will be ignored.
+    // Returns the previous enabled status.
+    bool disable()      { return setEnabled(false); }
+
+    // Set the enabled status and return the previous enabled status.
+    // This method is intended to be used for safe nested enable/disabling.
+    bool setEnabled(bool isEnabled);
+
+    // Add a sample point, and also enable tracking if needed.
+    // If tracking has never been enabled, then this call enables tracking but
+    // does _not_ add a sample -- it is not possible to add a sample the
+    // first time because there is no previous point to subtract from.
+    // Otherwise, if tracking is enabled,
+    // then adds a sample for tracked CPU ns since the previous
+    // sample, or since the first call to sampleAndEnable(), enable(), or
+    // setEnabled(true).  If there was a previous sample but tracking is
+    // now disabled, then adds a sample for the tracked CPU ns accumulated
+    // up until the most recent disable(), resets this accumulator, and then
+    // enables tracking.  Calling this method rather than enable() followed
+    // by sample() avoids a race condition for the first sample.
+    // Returns true if the sample 'ns' is valid, or false if invalid.
+    // Note that 'ns' is an output parameter passed by reference.
+    // The caller does not need to initialize this variable.
+    // The units are CPU nanoseconds consumed by current thread.
+    bool sampleAndEnable(double& ns);
+
+    // Add a sample point, but do not
+    // change the tracking enabled status.  If tracking has either never been
+    // enabled, or has never been enabled since the last sample, then log a warning
+    // and don't add sample.  Otherwise, adds a sample for tracked CPU ns since
+    // the previous sample or since the first call to sampleAndEnable(),
+    // enable(), or setEnabled(true) if no previous sample.
+    // Returns true if the sample is valid, or false if invalid.
+    // Note that 'ns' is an output parameter passed by reference.
+    // The caller does not need to initialize this variable.
+    // The units are CPU nanoseconds consumed by current thread.
+    bool sample(double& ns);
+
+    // Return the elapsed delta wall clock ns since initial enable or reset,
+    // as reported by clock_gettime(CLOCK_MONOTONIC).
+    long long elapsed() const;
+
+    // Reset elapsed wall clock.  Has no effect on tracking or accumulator.
+    void resetElapsed();
+
+    // Return current clock frequency for specified CPU, in kHz.
+    // You can get your CPU number using sched_getcpu(2).  Note that, unless CPU affinity
+    // has been configured appropriately, the CPU number can change.
+    // Also note that, unless the CPU governor has been configured appropriately,
+    // the CPU frequency can change.  And even if the CPU frequency is locked down
+    // to a particular value, that the frequency might still be adjusted
+    // to prevent thermal overload.  Therefore you should poll for your thread's
+    // current CPU number and clock frequency periodically.
+    uint32_t getCpukHz(int cpuNum);
+
+private:
+    bool mIsEnabled;                // whether tracking is currently enabled
+    bool mWasEverEnabled;           // whether tracking was ever enabled
+    long long mAccumulator;         // accumulated thread CPU time since last sample, in ns
+    struct timespec mPreviousTs;    // most recent thread CPU time, valid only if mIsEnabled is true
+    struct timespec mMonotonicTs;   // most recent monotonic time
+    bool mMonotonicKnown;           // whether mMonotonicTs has been set
+
+    static const int MAX_CPU = 8;
+    static int sScalingFds[MAX_CPU];// file descriptor per CPU for reading scaling_cur_freq
+    uint32_t mCurrentkHz[MAX_CPU];  // current CPU frequency in kHz, not static to avoid a race
+    static pthread_once_t sOnceControl;
+    static int sKernelMax;          // like MAX_CPU, but determined at runtime == cpu/kernel_max + 1
+    static void init();             // called once at first ThreadCpuUsage construction
+    static pthread_mutex_t sMutex;  // protects sScalingFds[] after initialization
+};
+
+}   // namespace android
+
+#endif //  _THREAD_CPU_USAGE_H
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
index 43e4de7..ef392f0 100644
--- a/include/media/AudioBufferProvider.h
+++ b/include/media/AudioBufferProvider.h
@@ -26,6 +26,8 @@
 {
 public:
 
+    // FIXME merge with AudioTrackShared::Buffer, AudioTrack::Buffer, and AudioRecord::Buffer
+    //       and rename getNextBuffer() to obtainBuffer()
     struct Buffer {
         Buffer() : raw(NULL), frameCount(0) { }
         union {
@@ -44,6 +46,19 @@
     // pts is the local time when the next sample yielded by getNextBuffer
     // will be rendered.
     // Pass kInvalidPTS if the PTS is unknown or not applicable.
+    // On entry:
+    //  buffer              != NULL
+    //  buffer->raw         unused
+    //  buffer->frameCount  maximum number of desired frames
+    // On successful return:
+    //  status              NO_ERROR
+    //  buffer->raw         non-NULL pointer to buffer->frameCount contiguous available frames
+    //  buffer->frameCount  number of contiguous available frames at buffer->raw,
+    //                      0 < buffer->frameCount <= entry value
+    // On error return:
+    //  status              != NO_ERROR
+    //  buffer->raw         NULL
+    //  buffer->frameCount  0
     virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0;
 
     virtual void releaseBuffer(Buffer* buffer) = 0;
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 38c6548..62f0c64 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -14,31 +14,27 @@
  * limitations under the License.
  */
 
-#ifndef AUDIORECORD_H_
-#define AUDIORECORD_H_
+#ifndef ANDROID_AUDIORECORD_H
+#define ANDROID_AUDIORECORD_H
 
-#include <binder/IMemory.h>
 #include <cutils/sched_policy.h>
 #include <media/AudioSystem.h>
 #include <media/IAudioRecord.h>
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
 #include <utils/threads.h>
 
 namespace android {
 
+// ----------------------------------------------------------------------------
+
 class audio_track_cblk_t;
 class AudioRecordClientProxy;
 
 // ----------------------------------------------------------------------------
 
-class AudioRecord : virtual public RefBase
+class AudioRecord : public RefBase
 {
 public:
 
-    static const int DEFAULT_SAMPLE_RATE = 8000;
-
     /* Events used by AudioRecord callback function (callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
      */
@@ -49,6 +45,8 @@
                                     // (See setMarkerPosition()).
         EVENT_NEW_POS = 3,          // Record head is at a new position
                                     // (See setPositionUpdatePeriod()).
+        EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and
+                                    // voluntary invalidation by mediaserver, or mediaserver crash.
     };
 
     /* Client should declare Buffer on the stack and pass address to obtainBuffer()
@@ -58,11 +56,17 @@
     class Buffer
     {
     public:
+        // FIXME use m prefix
         size_t      frameCount;     // number of sample frames corresponding to size;
                                     // on input it is the number of frames available,
                                     // on output is the number of frames actually drained
+                                    // (currently ignored, but will make the primary field in future)
 
-        size_t      size;           // total size in bytes == frameCount * frameSize
+        size_t      size;           // input/output in bytes == frameCount * frameSize
+                                    // FIXME this is redundant with respect to frameCount,
+                                    // and TRANSFER_OBTAIN mode is broken for 8-bit data
+                                    // since we don't define the frame format
+
         union {
             void*       raw;
             short*      i16;        // signed 16-bit
@@ -84,6 +88,7 @@
      *          - EVENT_OVERRUN: unused.
      *          - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
      *          - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
+     *          - EVENT_NEW_IAUDIORECORD: unused.
      */
 
     typedef void (*callback_t)(int event, void* user, void *info);
@@ -101,94 +106,112 @@
                                       audio_format_t format,
                                       audio_channel_mask_t channelMask);
 
+    /* How data is transferred from AudioRecord
+     */
+    enum transfer_type {
+        TRANSFER_DEFAULT,   // not specified explicitly; determine from other parameters
+        TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
+        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_SYNC,      // synchronous read()
+    };
+
     /* Constructs an uninitialized AudioRecord. No connection with
-     * AudioFlinger takes place.
+     * AudioFlinger takes place.  Use set() after this.
      */
                         AudioRecord();
 
     /* Creates an AudioRecord object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
-     * Unspecified values are set to the audio hardware's current
-     * values.
+     * Unspecified values are set to appropriate default values.
      *
      * Parameters:
      *
-     * inputSource:        Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT).
-     * sampleRate:         Track sampling rate in Hz.
+     * inputSource:        Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT).
+     * sampleRate:         Data sink sampling rate in Hz.
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
-     * channelMask:        Channel mask.
+     * channelMask:        Channel mask, such that audio_is_input_channel(channelMask) is true.
      * frameCount:         Minimum size of track PCM buffer in frames. This defines the
      *                     application's contribution to the
      *                     latency of the track.  The actual size selected by the AudioRecord could
      *                     be larger if the requested size is not compatible with current audio HAL
      *                     latency.  Zero means to use a default value.
      * cbf:                Callback function. If not null, this function is called periodically
-     *                     to consume new PCM data.
+     *                     to consume new PCM data and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames are ready in record track output buffer.
      * sessionId:          Not yet supported.
+     * transferType:       How data is transferred from AudioRecord.
+     * flags:              See comments on audio_input_flags_t in <system/audio.h>
+     * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
 
                         AudioRecord(audio_source_t inputSource,
-                                    uint32_t sampleRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
                                     int frameCount      = 0,
                                     callback_t cbf = NULL,
                                     void* user = NULL,
                                     int notificationFrames = 0,
-                                    int sessionId = 0);
-
+                                    int sessionId = 0,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE);
 
     /* Terminates the AudioRecord and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioRecord.
      */
-                        ~AudioRecord();
+protected:
+                        virtual ~AudioRecord();
+public:
 
-
-    /* Initialize an uninitialized AudioRecord.
+    /* Initialize an AudioRecord that was created using the AudioRecord() constructor.
+     * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful intialization
-     *  - INVALID_OPERATION: AudioRecord is already intitialized or record device is already in use
+     *  - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
      *  - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
      *  - NO_INIT: audio server or audio hardware not initialized
      *  - PERMISSION_DENIED: recording is not allowed for the requesting process
+     *
+     * Parameters not listed in the AudioRecord constructors above:
+     *
+     * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
      */
-            status_t    set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT,
-                            uint32_t sampleRate = 0,
-                            audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                            audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_MONO,
+            status_t    set(audio_source_t inputSource,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
                             int frameCount      = 0,
                             callback_t cbf = NULL,
                             void* user = NULL,
                             int notificationFrames = 0,
                             bool threadCanCallJava = false,
-                            int sessionId = 0);
-
+                            int sessionId = 0,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE);
 
     /* Result of constructing the AudioRecord. This must be checked
      * before using any AudioRecord API (except for set()), because using
      * an uninitialized AudioRecord produces undefined results.
      * See set() method above for possible return codes.
      */
-            status_t    initCheck() const;
+            status_t    initCheck() const   { return mStatus; }
 
     /* Returns this track's estimated latency in milliseconds.
      * This includes the latency due to AudioRecord buffer size,
      * and audio hardware driver.
      */
-            uint32_t     latency() const;
+            uint32_t    latency() const     { return mLatency; }
 
    /* getters, see constructor and set() */
 
-            audio_format_t format() const;
-            uint32_t    channelCount() const;
-            size_t      frameCount() const;
-            size_t      frameSize() const { return mFrameSize; }
-            audio_source_t inputSource() const;
-
+            audio_format_t format() const   { return mFormat; }
+            uint32_t    channelCount() const    { return mChannelCount; }
+            size_t      frameCount() const  { return mFrameCount; }
+            size_t      frameSize() const   { return mFrameSize; }
+            audio_source_t inputSource() const  { return mInputSource; }
 
     /* After it's created the track is not active. Call start() to
      * make it active. If set, the callback will start being called.
@@ -198,26 +221,29 @@
             status_t    start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
                               int triggerSession = 0);
 
-    /* Stop a track. If set, the callback will cease being called and
-     * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
-     * and will drain buffers until the pool is exhausted.
+    /* Stop a track. If set, the callback will cease being called.  Note that obtainBuffer() still
+     * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
      */
             void        stop();
             bool        stopped() const;
 
-    /* Get sample rate for this record track in Hz.
+    /* Return the sink sample rate for this record track in Hz.
+     * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
      */
-            uint32_t    getSampleRate() const;
+            uint32_t    getSampleRate() const   { return mSampleRate; }
 
     /* Sets marker position. When record reaches the number of frames specified,
      * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
      * with marker == 0 cancels marker notification callback.
+     * To set a marker at a position which would compute as 0,
+     * a workaround is to the set the marker at a nearby position such as ~0 or 1.
      * If the AudioRecord has been opened with no callback function associated,
      * the operation will fail.
      *
      * Parameters:
      *
-     * marker:   marker position expressed in frames.
+     * marker:   marker position expressed in wrapping (overflow) frame units,
+     *           like the return value of getPosition().
      *
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
@@ -226,13 +252,13 @@
             status_t    setMarkerPosition(uint32_t marker);
             status_t    getMarkerPosition(uint32_t *marker) const;
 
-
     /* Sets position update period. Every time the number of frames specified has been recorded,
      * a callback with event type EVENT_NEW_POS is called.
      * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
      * callback.
      * If the AudioRecord has been opened with no callback function associated,
      * the operation will fail.
+     * Extremely small values may be rounded up to a value the implementation can support.
      *
      * Parameters:
      *
@@ -245,13 +271,13 @@
             status_t    setPositionUpdatePeriod(uint32_t updatePeriod);
             status_t    getPositionUpdatePeriod(uint32_t *updatePeriod) const;
 
-
-    /* Gets record head position. The position is the total number of frames
-     * recorded since record start.
+    /* Return the total number of frames recorded since recording started.
+     * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
+     * It is reset to zero by stop().
      *
      * Parameters:
      *
-     *  position:  Address where to return record head position within AudioRecord buffer.
+     *  position:  Address where to return record head position.
      *
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
@@ -276,38 +302,74 @@
      *
      * Returned value:
      *  AudioRecord session ID.
+     *
+     * No lock needed because session ID doesn't change after first set().
      */
-            int    getSessionId() const;
+            int    getSessionId() const { return mSessionId; }
 
-    /* Obtains a buffer of "frameCount" frames. The buffer must be
-     * drained entirely, and then released with releaseBuffer().
-     * If the track is stopped, obtainBuffer() returns
-     * STOPPED instead of NO_ERROR as long as there are buffers available,
-     * at which point NO_MORE_BUFFERS is returned.
+    /* Obtains a buffer of up to "audioBuffer->frameCount" full frames.
+     * After draining these frames of data, the caller should release them with releaseBuffer().
+     * If the track buffer is not empty, obtainBuffer() returns as many contiguous
+     * full frames as are available immediately.
+     * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK
+     * regardless of the value of waitCount.
+     * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a
+     * maximum timeout based on waitCount; see chart below.
      * Buffers will be returned until the pool
      * is exhausted, at which point obtainBuffer() will either block
-     * or return WOULD_BLOCK depending on the value of the "blocking"
+     * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
      *
+     * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
+     * which should use read() or callback EVENT_MORE_DATA instead.
+     *
      * Interpretation of waitCount:
      *  +n  limits wait time to n * WAIT_PERIOD_MS,
      *  -1  causes an (almost) infinite wait time,
      *   0  non-blocking.
+     *
+     * Buffer fields
+     * On entry:
+     *  frameCount  number of frames requested
+     * After error return:
+     *  frameCount  0
+     *  size        0
+     *  raw         undefined
+     * After successful return:
+     *  frameCount  actual number of frames available, <= number requested
+     *  size        actual number of bytes available
+     *  raw         pointer to the buffer
      */
 
-        enum {
-            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
-            STOPPED = 1
-        };
+    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+                                __attribute__((__deprecated__));
 
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+private:
+    /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
+     * additional non-contiguous frames that are available immediately.
+     * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+     * in case the requested amount of frames is in two or more non-contiguous regions.
+     * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
+     */
+            status_t    obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+                                     struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
 
-    /* Release an emptied buffer of "frameCount" frames for AudioFlinger to re-fill. */
+    /* Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill. */
+    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(Buffer* audioBuffer);
 
-
     /* As a convenience we provide a read() interface to the audio buffer.
-     * This is implemented on top of obtainBuffer/releaseBuffer.
+     * Input parameter 'size' is in byte units.
+     * This is implemented on top of obtainBuffer/releaseBuffer. For best
+     * performance use callbacks. Returns actual number of bytes read >= 0,
+     * or one of the following negative status codes:
+     *      INVALID_OPERATION   AudioRecord is configured for streaming mode
+     *      BAD_VALUE           size is invalid
+     *      WOULD_BLOCK         when obtainBuffer() returns same, or
+     *                          AudioRecord was stopped during the read
+     *      or any other error code returned by IAudioRecord::start() or restoreRecord_l().
      */
             ssize_t     read(void* buffer, size_t size);
 
@@ -336,68 +398,113 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
+                void        pauseConditional();
+                                        // like pause(), but only if prior resume() wasn't latched
 
     private:
         friend class AudioRecord;
         virtual bool        threadLoop();
-        AudioRecord& mReceiver;
+        AudioRecord&        mReceiver;
         virtual ~AudioRecordThread();
         Mutex               mMyLock;    // Thread::mLock is private
         Condition           mMyCond;    // Thread::mThreadExitedCondition is private
         bool                mPaused;    // whether thread is currently paused
+        bool                mResumeLatch;   // whether next pauseConditional() will be a nop
     };
 
             // body of AudioRecordThread::threadLoop()
-            bool processAudioBuffer(const sp<AudioRecordThread>& thread);
+            // returns the maximum amount of time before we would like to run again, where:
+            //      0           immediately
+            //      > 0         no later than this many nanoseconds from now
+            //      NS_WHENEVER still active but no particular deadline
+            //      NS_INACTIVE inactive so don't run again until re-started
+            //      NS_NEVER    never again
+            static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+            nsecs_t processAudioBuffer(const sp<AudioRecordThread>& thread);
 
-            status_t openRecord_l(uint32_t sampleRate,
-                                audio_format_t format,
-                                size_t frameCount,
-                                audio_io_handle_t input);
-            audio_io_handle_t getInput_l();
-            status_t restoreRecord_l(audio_track_cblk_t*& cblk);
+            // caller must hold lock on mLock for all _l methods
+            status_t openRecord_l(size_t epoch);
+
+            // FIXME enum is faster than strcmp() for parameter 'from'
+            status_t restoreRecord_l(const char *from);
 
     sp<AudioRecordThread>   mAudioRecordThread;
     mutable Mutex           mLock;
 
-    bool                    mActive;            // protected by mLock
+    // Current client state:  false = stopped, true = active.  Protected by mLock.  If more states
+    // are added, consider changing this to enum State { ... } mState as in AudioTrack.
+    bool                    mActive;
 
     // for client callback handler
     callback_t              mCbf;               // callback handler for events, or NULL
     void*                   mUserData;
 
     // for notification APIs
-    uint32_t                mNotificationFrames;
-    uint32_t                mRemainingFrames;
-    uint32_t                mMarkerPosition;    // in frames
+    uint32_t                mNotificationFramesReq; // requested number of frames between each
+                                                    // notification callback
+    uint32_t                mNotificationFramesAct; // actual number of frames between each
+                                                    // notification callback
+    bool                    mRefreshRemaining;  // processAudioBuffer() should refresh next 2
+
+    // These are private to processAudioBuffer(), and are not protected by a lock
+    uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
+    bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
+    int                     mObservedSequence;      // last observed value of mSequence
+
+    uint32_t                mMarkerPosition;    // in wrapping (overflow) frame units
     bool                    mMarkerReached;
     uint32_t                mNewPosition;       // in frames
-    uint32_t                mUpdatePeriod;      // in ms
+    uint32_t                mUpdatePeriod;      // in frames, zero means no EVENT_NEW_POS
+
+    status_t                mStatus;
 
     // constant after constructor or set()
     uint32_t                mSampleRate;
     size_t                  mFrameCount;
     audio_format_t          mFormat;
-    uint8_t                 mChannelCount;
+    uint32_t                mChannelCount;
     size_t                  mFrameSize;         // app-level frame size == AudioFlinger frame size
     audio_source_t          mInputSource;
-    status_t                mStatus;
-    uint32_t                mLatency;
+    uint32_t                mLatency;           // in ms
     audio_channel_mask_t    mChannelMask;
-    audio_io_handle_t       mInput;                     // returned by AudioSystem::getInput()
+    audio_input_flags_t     mFlags;
     int                     mSessionId;
+    transfer_type           mTransfer;
+
+    audio_io_handle_t       mInput;             // returned by AudioSystem::getInput()
 
     // may be changed if IAudioRecord object is re-created
     sp<IAudioRecord>        mAudioRecord;
     sp<IMemory>             mCblkMemory;
-    audio_track_cblk_t*     mCblk;
-    void*                   mBuffers;           // starting address of buffers in shared memory
+    audio_track_cblk_t*     mCblk;              // re-load after mLock.unlock()
 
-    int                     mPreviousPriority;          // before start()
+    int                     mPreviousPriority;  // before start()
     SchedPolicy             mPreviousSchedulingGroup;
-    AudioRecordClientProxy* mProxy;
+    bool                    mAwaitBoost;    // thread should wait for priority boost before running
+
+    // The proxy should only be referenced while a lock is held because the proxy isn't
+    // multi-thread safe.
+    // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+    // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+    // them around in case they are replaced during the obtainBuffer().
+    sp<AudioRecordClientProxy> mProxy;
+
+    bool                    mInOverrun;         // whether recorder is currently in overrun state
+
+private:
+    class DeathNotifier : public IBinder::DeathRecipient {
+    public:
+        DeathNotifier(AudioRecord* audioRecord) : mAudioRecord(audioRecord) { }
+    protected:
+        virtual void        binderDied(const wp<IBinder>& who);
+    private:
+        const wp<AudioRecord> mAudioRecord;
+    };
+
+    sp<DeathNotifier>       mDeathNotifier;
+    uint32_t                mSequence;              // incremented for each new IAudioRecord attempt
 };
 
 }; // namespace android
 
-#endif /*AUDIORECORD_H_*/
+#endif // ANDROID_AUDIORECORD_H
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index b11c812..006af08 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -17,20 +17,18 @@
 #ifndef ANDROID_AUDIOSYSTEM_H_
 #define ANDROID_AUDIOSYSTEM_H_
 
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-#include <media/IAudioFlinger.h>
-
+#include <hardware/audio_effect.h>
+#include <media/IAudioFlingerClient.h>
 #include <system/audio.h>
 #include <system/audio_policy.h>
-
-/* XXX: Should be include by all the users instead */
-#include <media/AudioParameter.h>
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
 
 namespace android {
 
 typedef void (*audio_error_callback)(status_t err);
 
+class IAudioFlinger;
 class IAudioPolicyService;
 class String8;
 
@@ -128,8 +126,10 @@
     // - BAD_VALUE: invalid parameter
     // NOTE: this feature is not supported on all hardware platforms and it is
     // necessary to check returned status before using the returned values.
-    static status_t getRenderPosition(size_t *halFrames, size_t *dspFrames,
-            audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+    static status_t getRenderPosition(audio_io_handle_t output,
+                                      size_t *halFrames,
+                                      size_t *dspFrames,
+                                      audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
 
     // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
     static size_t getInputFramesLost(audio_io_handle_t ioHandle);
@@ -155,11 +155,11 @@
     class OutputDescriptor {
     public:
         OutputDescriptor()
-        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channels(0), frameCount(0), latency(0)  {}
+        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)  {}
 
         uint32_t samplingRate;
-        int32_t format;
-        int32_t channels;
+        audio_format_t format;
+        audio_channel_mask_t channelMask;
         size_t frameCount;
         uint32_t latency;
     };
@@ -197,7 +197,8 @@
                                         uint32_t samplingRate = 0,
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
-                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE);
+                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                        const audio_offload_info_t *offloadInfo = NULL);
     static status_t startOutput(audio_io_handle_t output,
                                 audio_stream_type_t stream,
                                 int session = 0);
@@ -245,6 +246,12 @@
     static uint32_t getPrimaryOutputSamplingRate();
     static size_t getPrimaryOutputFrameCount();
 
+    static status_t setLowRamDevice(bool isLowRamDevice);
+
+    // Check if hw offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming or offload property is enabled
+    static bool isOffloadSupported(const audio_offload_info_t& info);
+
     // ----------------------------------------------------------------------------
 
 private:
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 64f82bb..ae92cdd 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -17,18 +17,9 @@
 #ifndef ANDROID_AUDIOTRACK_H
 #define ANDROID_AUDIOTRACK_H
 
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <media/IAudioFlinger.h>
-#include <media/IAudioTrack.h>
-#include <media/AudioSystem.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
 #include <cutils/sched_policy.h>
+#include <media/AudioSystem.h>
+#include <media/IAudioTrack.h>
 #include <utils/threads.h>
 
 namespace android {
@@ -37,10 +28,11 @@
 
 class audio_track_cblk_t;
 class AudioTrackClientProxy;
+class StaticAudioTrackClientProxy;
 
 // ----------------------------------------------------------------------------
 
-class AudioTrack : virtual public RefBase
+class AudioTrack : public RefBase
 {
 public:
     enum channel_index {
@@ -49,7 +41,7 @@
         RIGHT  = 1
     };
 
-    /* Events used by AudioTrack callback function (audio_track_cblk_t).
+    /* Events used by AudioTrack callback function (callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
      */
     enum event_type {
@@ -64,7 +56,12 @@
                                     // (See setMarkerPosition()).
         EVENT_NEW_POS = 4,          // Playback head is at a new position
                                     // (See setPositionUpdatePeriod()).
-        EVENT_BUFFER_END = 5        // Playback head is at the end of the buffer.
+        EVENT_BUFFER_END = 5,       // Playback head is at the end of the buffer.
+                                    // Not currently used by android.media.AudioTrack.
+        EVENT_NEW_IAUDIOTRACK = 6,  // IAudioTrack was re-created, either due to re-routing and
+                                    // voluntary invalidation by mediaserver, or mediaserver crash.
+        EVENT_STREAM_END = 7,       // Sent after all the buffers queued in AF and HW are played
+                                    // back (after stop is called)
     };
 
     /* Client should declare Buffer on the stack and pass address to obtainBuffer()
@@ -74,19 +71,25 @@
     class Buffer
     {
     public:
+        // FIXME use m prefix
         size_t      frameCount;   // number of sample frames corresponding to size;
                                   // on input it is the number of frames desired,
                                   // on output is the number of frames actually filled
+                                  // (currently ignored, but will make the primary field in future)
 
-        size_t      size;         // input/output in byte units
+        size_t      size;         // input/output in bytes == frameCount * frameSize
+                                  // on output is the number of bytes actually filled
+                                  // FIXME this is redundant with respect to frameCount,
+                                  // and TRANSFER_OBTAIN mode is broken for 8-bit data
+                                  // since we don't define the frame format
+
         union {
             void*       raw;
-            short*      i16;    // signed 16-bit
-            int8_t*     i8;     // unsigned 8-bit, offset by 0x80
+            short*      i16;      // signed 16-bit
+            int8_t*     i8;       // unsigned 8-bit, offset by 0x80
         };
     };
 
-
     /* As a convenience, if a callback is supplied, a handler thread
      * is automatically created with the appropriate priority. This thread
      * invokes the callback when a new buffer becomes available or various conditions occur.
@@ -100,9 +103,10 @@
      *            written.
      *          - EVENT_UNDERRUN: unused.
      *          - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
-     *          - EVENT_MARKER: pointer to an uint32_t containing the marker position in frames.
-     *          - EVENT_NEW_POS: pointer to an uint32_t containing the new position in frames.
+     *          - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
+     *          - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
      *          - EVENT_BUFFER_END: unused.
+     *          - EVENT_NEW_IAUDIOTRACK: unused.
      */
 
     typedef void (*callback_t)(int event, void* user, void *info);
@@ -112,11 +116,22 @@
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
      *  - NO_INIT: audio server or audio hardware not initialized
+     *  - BAD_VALUE: unsupported configuration
      */
 
-     static status_t getMinFrameCount(size_t* frameCount,
-                                      audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
-                                      uint32_t sampleRate = 0);
+    static status_t getMinFrameCount(size_t* frameCount,
+                                     audio_stream_type_t streamType,
+                                     uint32_t sampleRate);
+
+    /* How data is transferred to AudioTrack
+     */
+    enum transfer_type {
+        TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
+        TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
+        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_SYNC,      // synchronous write()
+        TRANSFER_SHARED,    // shared memory
+    };
 
     /* Constructs an uninitialized AudioTrack. No connection with
      * AudioFlinger takes place.  Use set() after this.
@@ -128,13 +143,13 @@
      * Unspecified values are set to appropriate default values.
      * With this constructor, the track is configured for streaming mode.
      * Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA.
-     * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is deprecated.
+     * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is not allowed.
      *
      * Parameters:
      *
      * streamType:         Select the type of audio stream this track is attached to
      *                     (e.g. AUDIO_STREAM_MUSIC).
-     * sampleRate:         Track sampling rate in Hz.
+     * sampleRate:         Data source sampling rate in Hz.
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
      * channelMask:        Channel mask.
@@ -149,21 +164,24 @@
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames have been consumed from track input buffer.
+     *                     This is expressed in units of frames at the initial source sample rate.
      * sessionId:          Specific session ID, or zero to use default.
-     * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
-     *                     If not present in parameter list, then fixed at false.
+     * transferType:       How data is transferred to AudioTrack.
+     * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
 
                         AudioTrack( audio_stream_type_t streamType,
-                                    uint32_t sampleRate  = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t,
                                     int frameCount       = 0,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf       = NULL,
                                     void* user           = NULL,
                                     int notificationFrames = 0,
-                                    int sessionId        = 0);
+                                    int sessionId        = 0,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = NULL);
 
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
@@ -174,26 +192,30 @@
      * The write() method is not supported in this case.
      * It is recommended to pass a callback function to be notified of playback end by an
      * EVENT_UNDERRUN event.
-     * FIXME EVENT_MORE_DATA still occurs; it must be ignored.
      */
 
                         AudioTrack( audio_stream_type_t streamType,
-                                    uint32_t sampleRate = 0,
-                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                    audio_channel_mask_t channelMask = 0,
-                                    const sp<IMemory>& sharedBuffer = 0,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    const sp<IMemory>& sharedBuffer,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf      = NULL,
                                     void* user          = NULL,
                                     int notificationFrames = 0,
-                                    int sessionId       = 0);
+                                    int sessionId       = 0,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    const audio_offload_info_t *offloadInfo = NULL);
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioTrack.
      */
-                        ~AudioTrack();
+protected:
+                        virtual ~AudioTrack();
+public:
 
-    /* Initialize an uninitialized AudioTrack.
+    /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
+     * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful initialization
      *  - INVALID_OPERATION: AudioTrack is already initialized
@@ -201,11 +223,15 @@
      *  - NO_INIT: audio server or audio hardware not initialized
      * If sharedBuffer is non-0, the frameCount parameter is ignored and
      * replaced by the shared buffer's total allocated size in frame units.
+     *
+     * Parameters not listed in the AudioTrack constructors above:
+     *
+     * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
      */
-            status_t    set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
-                            uint32_t sampleRate = 0,
-                            audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                            audio_channel_mask_t channelMask = 0,
+            status_t    set(audio_stream_type_t streamType,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
                             int frameCount      = 0,
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                             callback_t cbf      = NULL,
@@ -213,7 +239,9 @@
                             int notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
-                            int sessionId       = 0);
+                            int sessionId       = 0,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            const audio_offload_info_t *offloadInfo = NULL);
 
     /* Result of constructing the AudioTrack. This must be checked
      * before using any AudioTrack API (except for set()), because using
@@ -233,15 +261,16 @@
             audio_stream_type_t streamType() const { return mStreamType; }
             audio_format_t format() const   { return mFormat; }
 
-    /* Return frame size in bytes, which for linear PCM is channelCount * (bit depth per channel / 8).
+    /* Return frame size in bytes, which for linear PCM is
+     * channelCount * (bit depth per channel / 8).
      * channelCount is determined from channelMask, and bit depth comes from format.
      * For non-linear formats, the frame size is typically 1 byte.
      */
-            uint32_t    channelCount() const { return mChannelCount; }
-
-            uint32_t    frameCount() const  { return mFrameCount; }
             size_t      frameSize() const   { return mFrameSize; }
 
+            uint32_t    channelCount() const { return mChannelCount; }
+            uint32_t    frameCount() const  { return mFrameCount; }
+
     /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
             sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
 
@@ -249,14 +278,13 @@
      * make it active. If set, the callback will start being called.
      * If the track was previously paused, volume is ramped up over the first mix buffer.
      */
-            void        start();
+            status_t        start();
 
     /* Stop a track.
      * In static buffer mode, the track is stopped immediately.
-     * In streaming mode, the callback will cease being called and
-     * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
-     * and will fill up buffers until the pool is exhausted.
-     * The stop does not occur immediately: any data remaining in the buffer
+     * In streaming mode, the callback will cease being called.  Note that obtainBuffer() still
+     * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
+     * In streaming mode the stop does not occur immediately: any data remaining in the buffer
      * is first drained, mixed, and output, and only then is the track marked as stopped.
      */
             void        stop();
@@ -270,7 +298,7 @@
             void        flush();
 
     /* Pause a track. After pause, the callback will cease being called and
-     * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
+     * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
      * and will fill up buffers until the pool is exhausted.
      * Volume is ramped down over the next mix buffer following the pause request,
      * and then the track is marked as paused.  It can be resumed with ramp up by start().
@@ -294,11 +322,11 @@
             status_t    setAuxEffectSendLevel(float level);
             void        getAuxEffectSendLevel(float* level) const;
 
-    /* Set sample rate for this track in Hz, mostly used for games' sound effects
+    /* Set source sample rate for this track in Hz, mostly used for games' sound effects
      */
             status_t    setSampleRate(uint32_t sampleRate);
 
-    /* Return current sample rate in Hz, or 0 if unknown */
+    /* Return current source sample rate in Hz, or 0 if unknown */
             uint32_t    getSampleRate() const;
 
     /* Enables looping and sets the start and end points of looping.
@@ -306,20 +334,24 @@
      *
      * Parameters:
      *
-     * loopStart:   loop start expressed as the number of PCM frames played since AudioTrack start.
-     * loopEnd:     loop end expressed as the number of PCM frames played since AudioTrack start.
+     * loopStart:   loop start in frames relative to start of buffer.
+     * loopEnd:     loop end in frames relative to start of buffer.
      * loopCount:   number of loops to execute. Calling setLoop() with loopCount == 0 cancels any
-     *              pending or active loop. loopCount = -1 means infinite looping.
+     *              pending or active loop. loopCount == -1 means infinite looping.
      *
      * For proper operation the following condition must be respected:
-     *          (loopEnd-loopStart) <= framecount()
+     *      loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount().
+     *
+     * If the loop period (loopEnd - loopStart) is too small for the implementation to support,
+     * setLoop() will return BAD_VALUE.  loopCount must be >= -1.
+     *
      */
             status_t    setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);
 
     /* Sets marker position. When playback reaches the number of frames specified, a callback with
      * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
      * notification callback.  To set a marker at a position which would compute as 0,
-     * a workaround is to the set the marker at a nearby position such as -1 or 1.
+     * a workaround is to the set the marker at a nearby position such as ~0 or 1.
      * If the AudioTrack has been opened with no callback function associated, the operation will
      * fail.
      *
@@ -354,18 +386,14 @@
             status_t    setPositionUpdatePeriod(uint32_t updatePeriod);
             status_t    getPositionUpdatePeriod(uint32_t *updatePeriod) const;
 
-    /* Sets playback head position within AudioTrack buffer. The new position is specified
-     * in number of frames.
-     * This method must be called with the AudioTrack in paused or stopped state.
-     * Note that the actual position set is <position> modulo the AudioTrack buffer size in frames.
-     * Therefore using this method makes sense only when playing a "static" audio buffer
-     * as opposed to streaming.
-     * The getPosition() method on the other hand returns the total number of frames played since
-     * playback start.
+    /* Sets playback head position.
+     * Only supported for static buffer mode.
      *
      * Parameters:
      *
-     * position:  New playback head position within AudioTrack buffer.
+     * position:  New playback head position in frames relative to start of buffer.
+     *            0 <= position <= frameCount().  Note that end of buffer is permitted,
+     *            but will result in an immediate underrun if started.
      *
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
@@ -378,8 +406,22 @@
     /* Return the total number of frames played since playback start.
      * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
      * It is reset to zero by flush(), reload(), and stop().
+     *
+     * Parameters:
+     *
+     *  position:  Address where to return play head position.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *  - NO_ERROR: successful operation
+     *  - BAD_VALUE:  position is NULL
      */
-            status_t    getPosition(uint32_t *position);
+            status_t    getPosition(uint32_t *position) const;
+
+    /* For static buffer mode only, this returns the current playback position in frames
+     * relative to start of buffer.  It is analogous to the position units used by
+     * setLoop() and setPosition().  After underrun, the position will be at end of buffer.
+     */
+            status_t    getBufferPosition(uint32_t *position);
 
     /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids
      * rewriting the buffer before restarting playback after a stop.
@@ -426,15 +468,19 @@
      */
             status_t    attachAuxEffect(int effectId);
 
-    /* Obtains a buffer of "frameCount" frames. The buffer must be
-     * filled entirely, and then released with releaseBuffer().
-     * If the track is stopped, obtainBuffer() returns
-     * STOPPED instead of NO_ERROR as long as there are buffers available,
-     * at which point NO_MORE_BUFFERS is returned.
+    /* Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
+     * After filling these slots with data, the caller should release them with releaseBuffer().
+     * If the track buffer is not full, obtainBuffer() returns as many contiguous
+     * [empty slots for] frames as are available immediately.
+     * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK
+     * regardless of the value of waitCount.
+     * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a
+     * maximum timeout based on waitCount; see chart below.
      * Buffers will be returned until the pool
      * is exhausted, at which point obtainBuffer() will either block
-     * or return WOULD_BLOCK depending on the value of the "blocking"
+     * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
+     * Each sample is 16-bit signed PCM.
      *
      * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
      * which should use write() or callback EVENT_MORE_DATA instead.
@@ -457,33 +503,66 @@
      *  raw         pointer to the buffer
      */
 
-        enum {
-            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
-            STOPPED = 1
-        };
+    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+                                __attribute__((__deprecated__));
 
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+private:
+    /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
+     * additional non-contiguous frames that are available immediately.
+     * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+     * in case the requested amount of frames is in two or more non-contiguous regions.
+     * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
+     */
+            status_t    obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+                                     struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
 
-    /* Release a filled buffer of "frameCount" frames for AudioFlinger to process. */
+//EL_FIXME to be reconciled with new obtainBuffer() return codes and control block proxy
+//            enum {
+//            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
+//            TEAR_DOWN       = 0x80000002,
+//            STOPPED = 1,
+//            STREAM_END_WAIT,
+//            STREAM_END
+//        };
+
+    /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */
+    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(Buffer* audioBuffer);
 
     /* As a convenience we provide a write() interface to the audio buffer.
+     * Input parameter 'size' is in byte units.
      * This is implemented on top of obtainBuffer/releaseBuffer. For best
      * performance use callbacks. Returns actual number of bytes written >= 0,
      * or one of the following negative status codes:
-     *      INVALID_OPERATION   AudioTrack is configured for shared buffer mode
+     *      INVALID_OPERATION   AudioTrack is configured for static buffer or streaming mode
      *      BAD_VALUE           size is invalid
-     *      STOPPED             AudioTrack was stopped during the write
-     *      NO_MORE_BUFFERS     when obtainBuffer() returns same
+     *      WOULD_BLOCK         when obtainBuffer() returns same, or
+     *                          AudioTrack was stopped during the write
      *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
-     * Not supported for static buffer mode.
      */
             ssize_t     write(const void* buffer, size_t size);
 
     /*
      * Dumps the state of an audio track.
      */
-            status_t dump(int fd, const Vector<String16>& args) const;
+            status_t    dump(int fd, const Vector<String16>& args) const;
+
+    /*
+     * Return the total number of frames which AudioFlinger desired but were unavailable,
+     * and thus which resulted in an underrun.  Reset to zero by stop().
+     */
+            uint32_t    getUnderrunFrames() const;
+
+    /* Get the flags */
+            audio_output_flags_t getFlags() const { return mFlags; }
+
+    /* Set parameters - only possible when using direct output */
+            status_t    setParameters(const String8& keyValuePairs);
+
+    /* Get parameters */
+            String8     getParameters(const String8& keys);
 
 protected:
     /* copying audio tracks is not allowed */
@@ -502,41 +581,61 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
+                void        pauseConditional();
+                                        // like pause(), but only if prior resume() wasn't latched
 
     private:
         friend class AudioTrack;
         virtual bool        threadLoop();
-        AudioTrack& mReceiver;
-        ~AudioTrackThread();
+        AudioTrack&         mReceiver;
+        virtual ~AudioTrackThread();
         Mutex               mMyLock;    // Thread::mLock is private
         Condition           mMyCond;    // Thread::mThreadExitedCondition is private
         bool                mPaused;    // whether thread is currently paused
+        bool                mResumeLatch;   // whether next pauseConditional() will be a nop
     };
 
             // body of AudioTrackThread::threadLoop()
-            bool processAudioBuffer(const sp<AudioTrackThread>& thread);
+            // returns the maximum amount of time before we would like to run again, where:
+            //      0           immediately
+            //      > 0         no later than this many nanoseconds from now
+            //      NS_WHENEVER still active but no particular deadline
+            //      NS_INACTIVE inactive so don't run again until re-started
+            //      NS_NEVER    never again
+            static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+            nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread);
+            status_t processStreamEnd(int32_t waitCount);
+
 
             // caller must hold lock on mLock for all _l methods
+
             status_t createTrack_l(audio_stream_type_t streamType,
                                  uint32_t sampleRate,
                                  audio_format_t format,
                                  size_t frameCount,
                                  audio_output_flags_t flags,
                                  const sp<IMemory>& sharedBuffer,
-                                 audio_io_handle_t output);
+                                 audio_io_handle_t output,
+                                 size_t epoch);
 
-            // can only be called when !mActive
+            // can only be called when mState != STATE_ACTIVE
             void flush_l();
 
-            status_t setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
+            void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
             audio_io_handle_t getOutput_l();
-            status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart);
-            bool stopped_l() const { return !mActive; }
 
+            // FIXME enum is faster than strcmp() for parameter 'from'
+            status_t restoreTrack_l(const char *from);
+
+            bool     isOffloaded() const
+                { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
+
+    // may be changed if IAudioTrack is re-created
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
-    sp<AudioTrackThread>    mAudioTrackThread;
+    audio_track_cblk_t*     mCblk;                  // re-load after mLock.unlock()
 
+    sp<AudioTrackThread>    mAudioTrackThread;
     float                   mVolume[2];
     float                   mSendLevel;
     uint32_t                mSampleRate;
@@ -544,62 +643,94 @@
     size_t                  mReqFrameCount;         // frame count to request the next time a new
                                                     // IAudioTrack is needed
 
-    audio_track_cblk_t*     mCblk;                  // re-load after mLock.unlock()
 
-            // Starting address of buffers in shared memory.  If there is a shared buffer, mBuffers
-            // is the value of pointer() for the shared buffer, otherwise mBuffers points
-            // immediately after the control block.  This address is for the mapping within client
-            // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
-    void*                   mBuffers;
-
+    // constant after constructor or set()
     audio_format_t          mFormat;                // as requested by client, not forced to 16-bit
     audio_stream_type_t     mStreamType;
     uint32_t                mChannelCount;
     audio_channel_mask_t    mChannelMask;
+    transfer_type           mTransfer;
 
-                // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.
-                // For 8-bit PCM data, mFrameSizeAF is
-                // twice as large because data is expanded to 16-bit before being stored in buffer.
+    // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.  For 8-bit PCM data, it's
+    // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer.
     size_t                  mFrameSize;             // app-level frame size
     size_t                  mFrameSizeAF;           // AudioFlinger frame size
 
     status_t                mStatus;
-    uint32_t                mLatency;
 
-    bool                    mActive;                // protected by mLock
+    // can change dynamically when IAudioTrack invalidated
+    uint32_t                mLatency;               // in ms
 
+    // Indicates the current track state.  Protected by mLock.
+    enum State {
+        STATE_ACTIVE,
+        STATE_STOPPED,
+        STATE_PAUSED,
+        STATE_PAUSED_STOPPING,
+        STATE_FLUSHED,
+        STATE_STOPPING,
+    }                       mState;
+
+    // for client callback handler
     callback_t              mCbf;                   // callback handler for events, or NULL
-    void*                   mUserData;              // for client callback handler
+    void*                   mUserData;
 
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
-                                                    // notification callback
+                                                    // notification callback,
+                                                    // at initial source sample rate
     uint32_t                mNotificationFramesAct; // actual number of frames between each
-                                                    // notification callback
+                                                    // notification callback,
+                                                    // at initial source sample rate
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh next 2
+
+    // These are private to processAudioBuffer(), and are not protected by a lock
+    uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
+    bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
+    uint32_t                mObservedSequence;      // last observed value of mSequence
+
     sp<IMemory>             mSharedBuffer;
-    int                     mLoopCount;
-    uint32_t                mRemainingFrames;
+    uint32_t                mLoopPeriod;            // in frames, zero means looping is disabled
     uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
     uint32_t                mNewPosition;           // in frames
-    uint32_t                mUpdatePeriod;          // in frames
+    uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
-    bool                    mFlushed; // FIXME will be made obsolete by making flush() synchronous
     audio_output_flags_t    mFlags;
     int                     mSessionId;
     int                     mAuxEffectId;
 
-    // When locking both mLock and mCblk->lock, must lock in this order to avoid deadlock:
-    //      1. mLock
-    //      2. mCblk->lock
-    // It is OK to lock only mCblk->lock.
     mutable Mutex           mLock;
 
     bool                    mIsTimed;
     int                     mPreviousPriority;          // before start()
     SchedPolicy             mPreviousSchedulingGroup;
-    AudioTrackClientProxy*  mProxy;
     bool                    mAwaitBoost;    // thread should wait for priority boost before running
+
+    // The proxy should only be referenced while a lock is held because the proxy isn't
+    // multi-thread safe, especially the SingleStateQueue part of the proxy.
+    // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+    // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+    // them around in case they are replaced during the obtainBuffer().
+    sp<StaticAudioTrackClientProxy> mStaticProxy;   // for type safety only
+    sp<AudioTrackClientProxy>       mProxy;         // primary owner of the memory
+
+    bool                    mInUnderrun;            // whether track is currently in underrun state
+    String8                 mName;                  // server's name for this IAudioTrack
+
+private:
+    class DeathNotifier : public IBinder::DeathRecipient {
+    public:
+        DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { }
+    protected:
+        virtual void        binderDied(const wp<IBinder>& who);
+    private:
+        const wp<AudioTrack> mAudioTrack;
+    };
+
+    sp<DeathNotifier>       mDeathNotifier;
+    uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
+    audio_io_handle_t       mOutput;                // cached output io handle
 };
 
 class TimedAudioTrack : public AudioTrack
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 9c3067e..49f921b 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -49,6 +49,7 @@
         TRACK_DEFAULT = 0,  // client requests a default AudioTrack
         TRACK_TIMED   = 1,  // client requests a TimedAudioTrack
         TRACK_FAST    = 2,  // client requests a fast AudioTrack or AudioRecord
+        TRACK_OFFLOAD = 4,  // client requests offload to hw codec
     };
     typedef uint32_t track_flags_t;
 
@@ -66,6 +67,10 @@
                                 audio_io_handle_t output,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
                                 int *sessionId,
+                                // input: ignored
+                                // output: server's description of IAudioTrack for display in logs.
+                                // Don't attempt to parse, as the format could change.
+                                String8& name,
                                 status_t *status) = 0;
 
     virtual sp<IAudioRecord> openRecord(
@@ -74,7 +79,7 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
-                                track_flags_t flags,
+                                track_flags_t *flags,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
                                 int *sessionId,
                                 status_t *status) = 0;
@@ -124,7 +129,9 @@
     virtual     String8     getParameters(audio_io_handle_t ioHandle, const String8& keys)
                                     const = 0;
 
-    // register a current process for audio output change notifications
+    // Register an object to receive audio input/output change and track notifications.
+    // For a given calling pid, AudioFlinger disregards any registrations after the first.
+    // Thus the IAudioFlingerClient must be a singleton per process.
     virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
 
     // retrieve the audio recording buffer size
@@ -137,7 +144,8 @@
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask,
                                          uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags) = 0;
+                                         audio_output_flags_t flags,
+                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
                                     audio_io_handle_t output2) = 0;
     virtual status_t closeOutput(audio_io_handle_t output) = 0;
@@ -193,6 +201,10 @@
     virtual uint32_t getPrimaryOutputSamplingRate() = 0;
     virtual size_t getPrimaryOutputFrameCount() = 0;
 
+    // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
+    // and should be called at most once.  For a definition of what "low RAM" means, see
+    // android.app.ActivityManager.isLowRamDevice().
+    virtual status_t setLowRamDevice(bool isLowRamDevice) = 0;
 };
 
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index b5ad4ef..09b9ea6 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -53,7 +53,8 @@
                                         uint32_t samplingRate = 0,
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = 0,
-                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0;
+                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                        const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0) = 0;
@@ -95,6 +96,9 @@
     virtual status_t queryDefaultPreProcessing(int audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count) = 0;
+   // Check if offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming or offload property is enabled
+    virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
 };
 
 
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index d6e3141..eccc2ca 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -34,6 +34,9 @@
 public:
     DECLARE_META_INTERFACE(AudioRecord);
 
+    /* get this tracks control block */
+    virtual sp<IMemory> getCblk() const = 0;
+
     /* After it's created the track is not active. Call start() to
      * make it active.
      */
@@ -44,9 +47,6 @@
      * will be processed, unless flush() is called.
      */
     virtual void        stop() = 0;
-
-    /* get this tracks control block */
-    virtual sp<IMemory> getCblk() const = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 144be0e..1014403 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -25,6 +25,7 @@
 #include <binder/IInterface.h>
 #include <binder/IMemory.h>
 #include <utils/LinearTransform.h>
+#include <utils/String8.h>
 
 namespace android {
 
@@ -82,6 +83,9 @@
        or Tungsten time. The values for target are defined in AudioTrack.h */
     virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
                                               int target) = 0;
+
+    /* Send parameters to the audio hardware */
+    virtual status_t    setParameters(const String8& keyValuePairs) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h
index 6d27b18..54fefa3 100644
--- a/include/media/IHDCP.h
+++ b/include/media/IHDCP.h
@@ -17,6 +17,7 @@
 #include <binder/IInterface.h>
 #include <media/hardware/HDCPAPI.h>
 #include <media/stagefright/foundation/ABase.h>
+#include <ui/GraphicBuffer.h>
 
 namespace android {
 
@@ -59,6 +60,20 @@
             const void *inData, size_t size, uint32_t streamCTR,
             uint64_t *outInputCTR, void *outData) = 0;
 
+    // Encrypt data according to the HDCP spec. "size" bytes of data starting
+    // at location "offset" are available in "buffer" (buffer handle). "size"
+    // may not be a multiple of 128 bits (16 bytes). An equal number of
+    // encrypted bytes should be written to the buffer at "outData" (virtual
+    // address). This operation is to be synchronous, i.e. this call does not
+    // return until outData contains size bytes of encrypted data.
+    // streamCTR will be assigned by the caller (to 0 for the first PES stream,
+    // 1 for the second and so on)
+    // inputCTR _will_be_maintained_by_the_callee_ for each PES stream.
+    virtual status_t encryptNative(
+            const sp<GraphicBuffer> &graphicBuffer,
+            size_t offset, size_t size, uint32_t streamCTR,
+            uint64_t *outInputCTR, void *outData) = 0;
+
     // DECRYPTION only:
     // Decrypt data according to the HDCP spec.
     // "size" bytes of encrypted data are available at "inData"
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 0b1d1e4..db9093a 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -97,6 +97,10 @@
             node_id node, OMX_U32 port_index,
             const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0;
 
+    virtual status_t updateGraphicBufferInMeta(
+            node_id node, OMX_U32 port_index,
+            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0;
+
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
             sp<IGraphicBufferProducer> *bufferProducer) = 0;
@@ -130,6 +134,17 @@
             node_id node,
             const char *parameter_name,
             OMX_INDEXTYPE *index) = 0;
+
+    enum InternalOptionType {
+        INTERNAL_OPTION_SUSPEND,  // data is a bool
+        INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY,  // data is an int64_t
+    };
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *data,
+            size_t size) = 0;
 };
 
 struct omx_message {
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
index 0616bf0..388f767 100644
--- a/include/media/JetPlayer.h
+++ b/include/media/JetPlayer.h
@@ -88,7 +88,7 @@
     EAS_DATA_HANDLE     mEasData;
     EAS_FILE_LOCATOR    mEasJetFileLoc;
     EAS_PCM*            mAudioBuffer;// EAS renders the MIDI data into this buffer,
-    AudioTrack*         mAudioTrack; // and we play it in this audio track
+    sp<AudioTrack>      mAudioTrack; // and we play it in this audio track
     int                 mTrackBufferSize;
     S_JET_STATUS        mJetStatus;
     S_JET_STATUS        mPreviousJetStatus;
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 9a75f81..3b151ef 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -74,9 +74,18 @@
     // AudioSink: abstraction layer for audio output
     class AudioSink : public RefBase {
     public:
+        enum cb_event_t {
+            CB_EVENT_FILL_BUFFER,   // Request to write more data to buffer.
+            CB_EVENT_STREAM_END,    // Sent after all the buffers queued in AF and HW are played
+                                    // back (after stop is called)
+            CB_EVENT_TEAR_DOWN      // The AudioTrack was invalidated due to use case change:
+                                    // Need to re-evaluate offloading options
+        };
+
         // Callback returns the number of bytes actually written to the buffer.
         typedef size_t (*AudioCallback)(
-                AudioSink *audioSink, void *buffer, size_t size, void *cookie);
+                AudioSink *audioSink, void *buffer, size_t size, void *cookie,
+                        cb_event_t event);
 
         virtual             ~AudioSink() {}
         virtual bool        ready() const = 0; // audio output is open and ready
@@ -99,9 +108,10 @@
                 int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
                 AudioCallback cb = NULL,
                 void *cookie = NULL,
-                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE) = 0;
+                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                const audio_offload_info_t *offloadInfo = NULL) = 0;
 
-        virtual void        start() = 0;
+        virtual status_t    start() = 0;
         virtual ssize_t     write(const void* buffer, size_t size) = 0;
         virtual void        stop() = 0;
         virtual void        flush() = 0;
@@ -110,6 +120,9 @@
 
         virtual status_t    setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; }
         virtual bool        needsTrailingPadding() { return true; }
+
+        virtual status_t    setParameters(const String8& keyValuePairs) { return NO_ERROR; };
+        virtual String8     getParameters(const String8& keys) { return String8::empty(); };
     };
 
                         MediaPlayerBase() : mCookie(0), mNotify(0) {}
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 7bf3069..9e5654f 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -118,7 +118,7 @@
 class SoundChannel : public SoundEvent {
 public:
     enum state { IDLE, RESUMING, STOPPING, PAUSED, PLAYING };
-    SoundChannel() : mAudioTrack(NULL), mState(IDLE), mNumChannels(1),
+    SoundChannel() : mState(IDLE), mNumChannels(1),
             mPos(0), mToggle(0), mAutoPaused(false) {}
     ~SoundChannel();
     void init(SoundPool* soundPool);
@@ -148,7 +148,7 @@
     bool doStop_l();
 
     SoundPool*          mSoundPool;
-    AudioTrack*         mAudioTrack;
+    sp<AudioTrack>      mAudioTrack;
     SoundEvent          mNextEvent;
     Mutex               mLock;
     int                 mState;
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 2183fbe..98c4332 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -160,7 +160,7 @@
     bool isInited() { return (mState == TONE_IDLE)?false:true;}
 
     // returns the audio session this ToneGenerator belongs to or 0 if an error occured.
-    int getSessionId() { return (mpAudioTrack == NULL) ? 0 : mpAudioTrack->getSessionId(); }
+    int getSessionId() { return (mpAudioTrack == 0) ? 0 : mpAudioTrack->getSessionId(); }
 
 private:
 
@@ -264,7 +264,7 @@
     unsigned short mLoopCounter; // Current tone loopback count
 
     uint32_t mSamplingRate;  // AudioFlinger Sampling rate
-    AudioTrack *mpAudioTrack;  // Pointer to audio track used for playback
+    sp<AudioTrack> mpAudioTrack;  // Pointer to audio track used for playback
     Mutex mLock;  // Mutex to control concurent access to ToneGenerator object from audio callback and application API
     Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond
     Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index aa58905..e429263 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -19,7 +19,7 @@
 
 #include <media/AudioEffect.h>
 #include <audio_effects/effect_visualizer.h>
-#include <string.h>
+#include <utils/Thread.h>
 
 /**
  * The Visualizer class enables application to retrieve part of the currently playing audio for
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 14381c7..1afd7f7 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -42,6 +42,9 @@
     MEDIA_BUFFERING_UPDATE  = 3,
     MEDIA_SEEK_COMPLETE     = 4,
     MEDIA_SET_VIDEO_SIZE    = 5,
+    MEDIA_STARTED           = 6,
+    MEDIA_PAUSED            = 7,
+    MEDIA_STOPPED           = 8,
     MEDIA_TIMED_TEXT        = 99,
     MEDIA_ERROR             = 100,
     MEDIA_INFO              = 200,
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index 107ba66..6d59ea7 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -90,6 +90,8 @@
     virtual ~Timeline();
 #endif
 
+    // Input parameter 'size' is the desired size of the timeline in byte units.
+    // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
     static size_t sharedSize(size_t size);
 
 #if 0
@@ -110,8 +112,12 @@
 class Writer : public RefBase {
 public:
     Writer();                   // dummy nop implementation without shared memory
+
+    // Input parameter 'size' is the desired size of the timeline in byte units.
+    // The size of the shared memory must be at least Timeline::sharedSize(size).
     Writer(size_t size, void *shared);
     Writer(size_t size, const sp<IMemory>& iMemory);
+
     virtual ~Writer() { }
 
     virtual void    log(const char *string);
@@ -165,8 +171,12 @@
 
 class Reader : public RefBase {
 public:
+
+    // Input parameter 'size' is the desired size of the timeline in byte units.
+    // The size of the shared memory must be at least Timeline::sharedSize(size).
     Reader(size_t size, const void *shared);
     Reader(size_t size, const sp<IMemory>& iMemory);
+
     virtual ~Reader() { }
 
     void    dump(int fd, size_t indent = 0);
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index df25d7b..a8ffd4a 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -124,7 +124,8 @@
     };
 
     enum {
-        kFlagIsSecure   = 1,
+        kFlagIsSecure                                 = 1,
+        kFlagPushBlankBuffersToNativeWindowOnShutdown = 2,
     };
 
     struct BufferInfo {
@@ -138,6 +139,7 @@
 
         IOMX::buffer_id mBufferID;
         Status mStatus;
+        unsigned mDequeuedAt;
 
         sp<ABuffer> mData;
         sp<GraphicBuffer> mGraphicBuffer;
@@ -182,7 +184,7 @@
 
     bool mSentFormat;
     bool mIsEncoder;
-
+    bool mUseMetadataOnEncoderOutput;
     bool mShutdownInProgress;
 
     // If "mKeepComponentAllocated" we only transition back to Loaded state
@@ -194,12 +196,22 @@
 
     bool mChannelMaskPresent;
     int32_t mChannelMask;
+    unsigned mDequeueCounter;
+    bool mStoreMetaDataInOutputBuffers;
+    int32_t mMetaDataBuffersToSubmit;
+
+    int64_t mRepeatFrameDelayUs;
 
     status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
     status_t allocateBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffer(OMX_U32 portIndex, size_t i);
 
+    status_t configureOutputBuffersFromNativeWindow(
+            OMX_U32 *nBufferCount, OMX_U32 *nBufferSize,
+            OMX_U32 *nMinUndequeuedBuffers);
+    status_t allocateOutputMetaDataBuffers();
+    status_t submitOutputMetaDataBuffer();
     status_t allocateOutputBuffersFromNativeWindow();
     status_t cancelBufferToNativeWindow(BufferInfo *info);
     status_t freeOutputBuffersNotOwnedByComponent();
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 1dc408f..912a43c 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -36,8 +36,16 @@
         SEEK_COMPLETE
     };
 
+    enum {
+        ALLOW_DEEP_BUFFERING = 0x01,
+        USE_OFFLOAD = 0x02,
+        HAS_VIDEO   = 0x1000,
+        IS_STREAMING = 0x2000
+
+    };
+
     AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink,
-                bool allowDeepBuffering = false,
+                uint32_t flags = 0,
                 AwesomePlayer *audioObserver = NULL);
 
     virtual ~AudioPlayer();
@@ -51,7 +59,7 @@
     status_t start(bool sourceAlreadyStarted = false);
 
     void pause(bool playPendingSamples = false);
-    void resume();
+    status_t resume();
 
     // Returns the timestamp of the last buffer played (in us).
     int64_t getMediaTimeUs();
@@ -67,10 +75,12 @@
 
     status_t setPlaybackRatePermille(int32_t ratePermille);
 
+    void notifyAudioEOS();
+
 private:
     friend class VideoEditorAudioPlayer;
     sp<MediaSource> mSource;
-    AudioTrack *mAudioTrack;
+    sp<AudioTrack> mAudioTrack;
 
     MediaBuffer *mInputBuffer;
 
@@ -97,17 +107,20 @@
     MediaBuffer *mFirstBuffer;
 
     sp<MediaPlayerBase::AudioSink> mAudioSink;
-    bool mAllowDeepBuffering;       // allow audio deep audio buffers. Helps with low power audio
-                                    // playback but implies high latency
     AwesomePlayer *mObserver;
     int64_t mPinnedTimeUs;
 
+    bool mPlaying;
+    int64_t mStartPosUs;
+    const uint32_t mCreateFlags;
+
     static void AudioCallback(int event, void *user, void *info);
     void AudioCallback(int event, void *info);
 
     static size_t AudioSinkCallback(
             MediaPlayerBase::AudioSink *audioSink,
-            void *data, size_t size, void *me);
+            void *data, size_t size, void *me,
+            MediaPlayerBase::AudioSink::cb_event_t event);
 
     size_t fillBuffer(void *data, size_t size);
 
@@ -116,6 +129,10 @@
     void reset();
 
     uint32_t getNumFramesPendingPlayout() const;
+    int64_t getOutputPlayPositionUs_l() const;
+
+    bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; }
+    bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; }
 
     AudioPlayer(const AudioPlayer &);
     AudioPlayer &operator=(const AudioPlayer &);
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 99f3c3b..4c9aaad 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -73,7 +73,7 @@
     Condition mFrameAvailableCondition;
     Condition mFrameEncodingCompletionCondition;
 
-    AudioRecord *mRecord;
+    sp<AudioRecord> mRecord;
     status_t mInitCheck;
     bool mStarted;
     int32_t mSampleRate;
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index dfb845b..590623b 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -50,7 +50,8 @@
     status_t getCodecCapabilities(
             size_t index, const char *type,
             Vector<ProfileLevel> *profileLevels,
-            Vector<uint32_t> *colorFormats) const;
+            Vector<uint32_t> *colorFormats,
+            uint32_t *flags) const;
 
 private:
     enum Section {
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 81de6e4..85693d4 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -22,7 +22,8 @@
 
 extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
 
-extern const char *MEDIA_MIMETYPE_VIDEO_VPX;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
 extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
 extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
 extern const char *MEDIA_MIMETYPE_VIDEO_H263;
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index ee5e4e2..686f286 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -56,14 +56,11 @@
     ERROR_DRM_TAMPER_DETECTED               = DRM_ERROR_BASE - 7,
     ERROR_DRM_NOT_PROVISIONED               = DRM_ERROR_BASE - 8,
     ERROR_DRM_DEVICE_REVOKED                = DRM_ERROR_BASE - 9,
+    ERROR_DRM_RESOURCE_BUSY                 = DRM_ERROR_BASE - 10,
 
     ERROR_DRM_VENDOR_MAX                    = DRM_ERROR_BASE - 500,
     ERROR_DRM_VENDOR_MIN                    = DRM_ERROR_BASE - 999,
 
-    // Deprecated
-    ERROR_DRM_WV_VENDOR_MAX                 = ERROR_DRM_VENDOR_MAX,
-    ERROR_DRM_WV_VENDOR_MIN                 = ERROR_DRM_VENDOR_MIN,
-
     // Heartbeat Error Codes
     HEARTBEAT_ERROR_BASE = -3000,
     ERROR_HEARTBEAT_TERMINATE_REQUESTED                     = HEARTBEAT_ERROR_BASE,
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 583c3b3..daaf20f 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -361,9 +361,14 @@
 };
 
 struct CodecCapabilities {
+    enum {
+        kFlagSupportsAdaptivePlayback = 1 << 0,
+    };
+
     String8 mComponentName;
     Vector<CodecProfileLevel> mProfileLevels;
     Vector<OMX_U32> mColorFormats;
+    uint32_t mFlags;
 };
 
 // Return a vector of componentNames with supported profile/level pairs
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 5f21da9..db5f947 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -56,7 +56,7 @@
 
 class SurfaceMediaSource : public MediaSource,
                                 public MediaBufferObserver,
-                                protected BufferQueue::ConsumerListener {
+                                protected ConsumerListener {
 public:
     enum { MIN_UNDEQUEUED_BUFFERS = 4};
 
@@ -146,9 +146,13 @@
     // this consumer
     sp<BufferQueue> mBufferQueue;
 
-    // mBufferSlot caches GraphicBuffers from the buffer queue
-    sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
+    struct SlotData {
+        sp<GraphicBuffer> mGraphicBuffer;
+        uint64_t mFrameNumber;
+    };
 
+    // mSlots caches GraphicBuffers and frameNumbers from the buffer queue
+    SlotData mSlots[BufferQueue::NUM_BUFFER_SLOTS];
 
     // The permenent width and height of SMS buffers
     int mWidth;
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 73940d3..c24f612 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -22,6 +22,8 @@
 #include <stdint.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
+#include <system/audio.h>
+#include <media/MediaPlayerInterface.h>
 
 namespace android {
 
@@ -48,6 +50,15 @@
 
 AString MakeUserAgent();
 
+// Convert a MIME type to a AudioSystem::audio_format
+status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime);
+
+// Send information from MetaData to the HAL via AudioSink
+status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink, const sp<MetaData>& meta);
+
+// Check whether the stream defined by meta can be offloaded to hardware
+bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming);
+
 }  // namespace android
 
 #endif  // UTILS_H_
diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h
index 2e5fd73..940fc55 100644
--- a/include/media/stagefright/foundation/ALooperRoster.h
+++ b/include/media/stagefright/foundation/ALooperRoster.h
@@ -30,6 +30,7 @@
             const sp<ALooper> looper, const sp<AHandler> &handler);
 
     void unregisterHandler(ALooper::handler_id handlerID);
+    void unregisterStaleHandlers();
 
     status_t postMessage(const sp<AMessage> &msg, int64_t delayUs = 0);
     void deliverMessage(const sp<AMessage> &msg);
diff --git a/media/libstagefright/wifi-display/ANetworkSession.h b/include/media/stagefright/foundation/ANetworkSession.h
similarity index 97%
rename from media/libstagefright/wifi-display/ANetworkSession.h
rename to include/media/stagefright/foundation/ANetworkSession.h
index 7c62b29..fd3ebaa 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.h
+++ b/include/media/stagefright/foundation/ANetworkSession.h
@@ -77,6 +77,8 @@
             int32_t sessionID, const void *data, ssize_t size = -1,
             bool timeValid = false, int64_t timeUs = -1ll);
 
+    status_t switchToWebSocketMode(int32_t sessionID);
+
     enum NotificationReason {
         kWhatError,
         kWhatConnected,
@@ -84,6 +86,7 @@
         kWhatData,
         kWhatDatagram,
         kWhatBinaryData,
+        kWhatWebSocketMessage,
         kWhatNetworkStall,
     };
 
diff --git a/media/libstagefright/wifi-display/ParsedMessage.h b/include/media/stagefright/foundation/ParsedMessage.h
similarity index 96%
rename from media/libstagefright/wifi-display/ParsedMessage.h
rename to include/media/stagefright/foundation/ParsedMessage.h
index e9a1859..9d43a93 100644
--- a/media/libstagefright/wifi-display/ParsedMessage.h
+++ b/include/media/stagefright/foundation/ParsedMessage.h
@@ -32,7 +32,7 @@
 
     const char *getContent() const;
 
-    void getRequestField(size_t index, AString *field) const;
+    bool getRequestField(size_t index, AString *field) const;
     bool getStatusCode(int32_t *statusCode) const;
 
     AString debugString() const;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 41e20f8..1379379 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -22,32 +22,51 @@
 
 #include <utils/threads.h>
 #include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <media/nbaio/roundup.h>
+#include <media/SingleStateQueue.h>
+#include <private/media/StaticAudioTrackState.h>
 
 namespace android {
 
 // ----------------------------------------------------------------------------
 
-// Maximum cumulated timeout milliseconds before restarting audioflinger thread
-#define MAX_STARTUP_TIMEOUT_MS  3000    // Longer timeout period at startup to cope with A2DP
-                                        // init time
-#define MAX_RUN_TIMEOUT_MS      1000
-#define WAIT_PERIOD_MS          10
-
-#define CBLK_UNDERRUN   0x01 // set: underrun (out) or overrrun (in), clear: no underrun or overrun
+// for audio_track_cblk_t::mFlags
+#define CBLK_UNDERRUN   0x01 // set by server immediately on output underrun, cleared by client
 #define CBLK_FORCEREADY 0x02 // set: track is considered ready immediately by AudioFlinger,
                              // clear: track is ready when buffer full
 #define CBLK_INVALID    0x04 // track buffer invalidated by AudioFlinger, need to re-create
-#define CBLK_DISABLED   0x08 // track disabled by AudioFlinger due to underrun, need to re-start
+#define CBLK_DISABLED   0x08 // output track disabled by AudioFlinger due to underrun,
+                             // need to re-start.  Unlike CBLK_UNDERRUN, this is not set
+                             // immediately, but only after a long string of underruns.
+// 0x10 unused
+#define CBLK_LOOP_CYCLE 0x20 // set by server each time a loop cycle other than final one completes
+#define CBLK_LOOP_FINAL 0x40 // set by server when the final loop cycle completes
+#define CBLK_BUFFER_END 0x80 // set by server when the position reaches end of buffer if not looping
+#define CBLK_OVERRUN   0x100 // set by server immediately on input overrun, cleared by client
+#define CBLK_INTERRUPT 0x200 // set by client on interrupt(), cleared by client in obtainBuffer()
+#define CBLK_STREAM_END_DONE 0x400 // set by server on render completion, cleared by client
+
+//EL_FIXME 20 seconds may not be enough and must be reconciled with new obtainBuffer implementation
+#define MAX_RUN_OFFLOADED_TIMEOUT_MS 20000 //assuming upto a maximum of 20 seconds of offloaded
 
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
-    volatile int32_t mFront;
-    volatile int32_t mRear;
+    // in continuously incrementing frame units, take modulo buffer size, which must be a power of 2
+    volatile int32_t mFront;    // read by server
+    volatile int32_t mRear;     // write by client
+    volatile int32_t mFlush;    // incremented by client to indicate a request to flush;
+                                // server notices and discards all data between mFront and mRear
+    volatile uint32_t mUnderrunFrames;  // server increments for each unavailable but desired frame
 };
 
-// future
+typedef SingleStateQueue<StaticAudioTrackState> StaticAudioTrackSingleStateQueue;
+
 struct AudioTrackSharedStatic {
-    int mReserved;
+    StaticAudioTrackSingleStateQueue::Shared
+                    mSingleStateQueue;
+    size_t          mBufferPosition;    // updated asynchronously by server,
+                                        // "for entertainment purposes only"
 };
 
 // ----------------------------------------------------------------------------
@@ -55,65 +74,63 @@
 // Important: do not add any virtual methods, including ~
 struct audio_track_cblk_t
 {
+                // Since the control block is always located in shared memory, this constructor
+                // is only used for placement new().  It is never used for regular new() or stack.
+                            audio_track_cblk_t();
+                /*virtual*/ ~audio_track_cblk_t() { }
+
                 friend class Proxy;
+                friend class ClientProxy;
                 friend class AudioTrackClientProxy;
                 friend class AudioRecordClientProxy;
                 friend class ServerProxy;
+                friend class AudioTrackServerProxy;
+                friend class AudioRecordServerProxy;
 
     // The data members are grouped so that members accessed frequently and in the same context
     // are in the same line of data cache.
-                Mutex       lock;           // sizeof(int)
-                Condition   cv;             // sizeof(int)
 
-                // next 4 are offsets within "buffers"
-    volatile    uint32_t    user;
-    volatile    uint32_t    server;
-                uint32_t    userBase;
-                uint32_t    serverBase;
-
-                int         mPad1;          // unused, but preserves cache line alignment
+                uint32_t    mServer;    // Number of filled frames consumed by server (mIsOut),
+                                        // or filled frames provided by server (!mIsOut).
+                                        // It is updated asynchronously by server without a barrier.
+                                        // The value should be used "for entertainment purposes only",
+                                        // which means don't make important decisions based on it.
 
                 size_t      frameCount_;    // used during creation to pass actual track buffer size
                                             // from AudioFlinger to client, and not referenced again
-                                            // FIXME remove here and replace by createTrack() in/out parameter
+                                            // FIXME remove here and replace by createTrack() in/out
+                                            // parameter
                                             // renamed to "_" to detect incorrect use
 
-                // Cache line boundary (32 bytes)
+    volatile    int32_t     mFutex;     // event flag: down (P) by client,
+                                        // up (V) by server or binderDied() or interrupt()
+#define CBLK_FUTEX_WAKE 1               // if event flag bit is set, then a deferred wake is pending
 
-                uint32_t    loopStart;
-                uint32_t    loopEnd;        // read-only for server, read/write for client
-                int         loopCount;      // read/write for client
+private:
+
+                size_t      mMinimum;       // server wakes up client if available >= mMinimum
 
                 // Channel volumes are fixed point U4.12, so 0x1000 means 1.0.
                 // Left channel is in [0:15], right channel is in [16:31].
                 // Always read and write the combined pair atomically.
                 // For AudioTrack only, not used by AudioRecord.
-private:
                 uint32_t    mVolumeLR;
 
                 uint32_t    mSampleRate;    // AudioTrack only: client's requested sample rate in Hz
                                             // or 0 == default. Write-only client, read-only server.
 
-                uint8_t     mPad2;           // unused
-
-public:
-                // read-only for client, server writes once at initialization and is then read-only
-                uint8_t     mName;           // normal tracks: track name, fast tracks: track index
-
-                // used by client only
-                uint16_t    bufferTimeoutMs; // Maximum cumulated timeout before restarting
-                                             // audioflinger
-
-                uint16_t    waitTimeMs;      // Cumulated wait time, used by client only
-private:
                 // client write-only, server read-only
                 uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
+
+                uint16_t    mPad2;           // unused
+
 public:
-    volatile    int32_t     flags;
+
+    volatile    int32_t     mFlags;         // combinations of CBLK_*
 
                 // Cache line boundary (32 bytes)
 
-#if 0
+public:
                 union {
                     AudioTrackSharedStreaming   mStreaming;
                     AudioTrackSharedStatic      mStatic;
@@ -121,25 +138,6 @@
                 } u;
 
                 // Cache line boundary (32 bytes)
-#endif
-
-                // Since the control block is always located in shared memory, this constructor
-                // is only used for placement new().  It is never used for regular new() or stack.
-                            audio_track_cblk_t();
-
-private:
-                // if there is a shared buffer, "buffers" is the value of pointer() for the shared
-                // buffer, otherwise "buffers" points immediately after the control block
-                void*       buffer(void *buffers, uint32_t frameSize, size_t offset) const;
-
-                bool        tryLock();
-
-                // isOut == true means AudioTrack, isOut == false means AudioRecord
-                bool        stepServer(size_t stepCount, size_t frameCount, bool isOut);
-                uint32_t    stepUser(size_t stepCount, size_t frameCount, bool isOut);
-                uint32_t    framesAvailable(size_t frameCount, bool isOut);
-                uint32_t    framesAvailable_l(size_t frameCount, bool isOut);
-                uint32_t    framesReady(bool isOut);
 };
 
 // ----------------------------------------------------------------------------
@@ -147,29 +145,32 @@
 // Proxy for shared memory control block, to isolate callers from needing to know the details.
 // There is exactly one ClientProxy and one ServerProxy per shared memory control block.
 // The proxies are located in normal memory, and are not multi-thread safe within a given side.
-class Proxy {
+class Proxy : public RefBase {
 protected:
-    Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize) { }
+    Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut,
+            bool clientInServer);
     virtual ~Proxy() { }
 
 public:
-    void*   buffer(size_t offset) const {
-        return mCblk->buffer(mBuffers, mFrameSize, offset);
-    }
+    struct Buffer {
+        size_t  mFrameCount;            // number of frames available in this buffer
+        void*   mRaw;                   // pointer to first frame
+        size_t  mNonContig;             // number of additional non-contiguous frames available
+    };
 
 protected:
     // These refer to shared memory, and are virtual addresses with respect to the current process.
     // They may have different virtual addresses within the other process.
-    audio_track_cblk_t* const   mCblk;          // the control block
-    void* const                 mBuffers;       // starting address of buffers
+    audio_track_cblk_t* const   mCblk;  // the control block
+    void* const     mBuffers;           // starting address of buffers
 
-    const size_t                mFrameCount;    // not necessarily a power of 2
-    const size_t                mFrameSize;     // in bytes
-#if 0
-    const size_t                mFrameCountP2;  // mFrameCount rounded to power of 2, streaming mode
-#endif
-
+    const size_t    mFrameCount;        // not necessarily a power of 2
+    const size_t    mFrameSize;         // in bytes
+    const size_t    mFrameCountP2;      // mFrameCount rounded to power of 2, streaming mode
+    const bool      mIsOut;             // true for AudioTrack, false for AudioRecord
+    const bool      mClientInServer;    // true for OutputTrack, false for AudioTrack & AudioRecord
+    bool            mIsShutdown;        // latch set to true when shared memory corruption detected
+    size_t          mUnreleased;        // unreleased frames remaining from most recent obtainBuffer
 };
 
 // ----------------------------------------------------------------------------
@@ -177,9 +178,86 @@
 // Proxy seen by AudioTrack client and AudioRecord client
 class ClientProxy : public Proxy {
 protected:
-    ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : Proxy(cblk, buffers, frameCount, frameSize) { }
+    ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+            bool isOut, bool clientInServer);
     virtual ~ClientProxy() { }
+
+public:
+    static const struct timespec kForever;
+    static const struct timespec kNonBlocking;
+
+    // Obtain a buffer with filled frames (reading) or empty frames (writing).
+    // It is permitted to call obtainBuffer() multiple times in succession, without any intervening
+    // calls to releaseBuffer().  In that case, the final obtainBuffer() is the one that effectively
+    // sets or extends the unreleased frame count.
+    // On entry:
+    //  buffer->mFrameCount should be initialized to maximum number of desired frames,
+    //      which must be > 0.
+    //  buffer->mNonContig is unused.
+    //  buffer->mRaw is unused.
+    //  requested is the requested timeout in local monotonic delta time units:
+    //      NULL or &kNonBlocking means non-blocking (zero timeout).
+    //      &kForever means block forever (infinite timeout).
+    //      Other values mean a specific timeout in local monotonic delta time units.
+    //  elapsed is a pointer to a location that will hold the total local monotonic time that
+    //      elapsed while blocked, or NULL if not needed.
+    // On exit:
+    //  buffer->mFrameCount has the actual number of contiguous available frames,
+    //      which is always 0 when the return status != NO_ERROR.
+    //  buffer->mNonContig is the number of additional non-contiguous available frames.
+    //  buffer->mRaw is a pointer to the first available frame,
+    //      or NULL when buffer->mFrameCount == 0.
+    // The return status is one of:
+    //  NO_ERROR    Success, buffer->mFrameCount > 0.
+    //  WOULD_BLOCK Non-blocking mode and no frames are available.
+    //  TIMED_OUT   Timeout occurred before any frames became available.
+    //              This can happen even for infinite timeout, due to a spurious wakeup.
+    //              In this case, the caller should investigate and then re-try as appropriate.
+    //  DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
+    //  -EINTR      Call has been interrupted.  Look around to see why, and then perhaps try again.
+    //  NO_INIT     Shared memory is corrupt.
+    // Assertion failure on entry, if buffer == NULL or buffer->mFrameCount == 0.
+    status_t    obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
+            struct timespec *elapsed = NULL);
+
+    // Release (some of) the frames last obtained.
+    // On entry, buffer->mFrameCount should have the number of frames to release,
+    // which must (cumulatively) be <= the number of frames last obtained but not yet released.
+    // buffer->mRaw is ignored, but is normally same pointer returned by last obtainBuffer().
+    // It is permitted to call releaseBuffer() multiple times to release the frames in chunks.
+    // On exit:
+    //  buffer->mFrameCount is zero.
+    //  buffer->mRaw is NULL.
+    void        releaseBuffer(Buffer* buffer);
+
+    // Call after detecting server's death
+    void        binderDied();
+
+    // Call to force an obtainBuffer() to return quickly with -EINTR
+    void        interrupt();
+
+    size_t      getPosition() {
+        return mEpoch + mCblk->mServer;
+    }
+
+    void        setEpoch(size_t epoch) {
+        mEpoch = epoch;
+    }
+
+    void        setMinimum(size_t minimum) {
+        mCblk->mMinimum = minimum;
+    }
+
+    // Return the number of frames that would need to be obtained and released
+    // in order for the client to be aligned at start of buffer
+    virtual size_t  getMisalignment();
+
+    size_t      getEpoch() const {
+        return mEpoch;
+    }
+
+private:
+    size_t      mEpoch;
 };
 
 // ----------------------------------------------------------------------------
@@ -187,8 +265,10 @@
 // Proxy used by AudioTrack client, which also includes AudioFlinger::PlaybackThread::OutputTrack
 class AudioTrackClientProxy : public ClientProxy {
 public:
-    AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : ClientProxy(cblk, buffers, frameCount, frameSize) { }
+    AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize, bool clientInServer = false)
+        : ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
+          clientInServer) { }
     virtual ~AudioTrackClientProxy() { }
 
     // No barriers on the following operations, so the ordering of loads/stores
@@ -208,27 +288,42 @@
         mCblk->mSampleRate = sampleRate;
     }
 
-    // called by:
-    //   PlaybackThread::OutputTrack::write
-    //   AudioTrack::createTrack_l
-    //   AudioTrack::releaseBuffer
-    //   AudioTrack::reload
-    //   AudioTrack::restoreTrack_l (2 places)
-    size_t      stepUser(size_t stepCount) {
-        return mCblk->stepUser(stepCount, mFrameCount, true /*isOut*/);
+    virtual void flush();
+
+    virtual uint32_t    getUnderrunFrames() const {
+        return mCblk->u.mStreaming.mUnderrunFrames;
     }
 
-    // called by AudioTrack::obtainBuffer and AudioTrack::processBuffer
-    size_t      framesAvailable() {
-        return mCblk->framesAvailable(mFrameCount, true /*isOut*/);
+    bool        clearStreamEndDone();   // and return previous value
+
+    bool        getStreamEndDone() const;
+
+    status_t    waitStreamEndDone(const struct timespec *requested);
+};
+
+class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
+public:
+    StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize);
+    virtual ~StaticAudioTrackClientProxy() { }
+
+    virtual void    flush();
+
+#define MIN_LOOP    16  // minimum length of each loop iteration in frames
+            void    setLoop(size_t loopStart, size_t loopEnd, int loopCount);
+            size_t  getBufferPosition();
+
+    virtual size_t  getMisalignment() {
+        return 0;
     }
 
-    // called by AudioTrack::obtainBuffer and PlaybackThread::OutputTrack::obtainBuffer
-    // FIXME remove this API since it assumes a lock that should be invisible to caller
-    size_t      framesAvailable_l() {
-        return mCblk->framesAvailable_l(mFrameCount, true /*isOut*/);
+    virtual uint32_t    getUnderrunFrames() const {
+        return 0;
     }
 
+private:
+    StaticAudioTrackSingleStateQueue::Mutator   mMutator;
+    size_t          mBufferPosition;    // so that getBufferPosition() appears to be synchronous
 };
 
 // ----------------------------------------------------------------------------
@@ -236,60 +331,132 @@
 // Proxy used by AudioRecord client
 class AudioRecordClientProxy : public ClientProxy {
 public:
-    AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : ClientProxy(cblk, buffers, frameCount, frameSize) { }
+    AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize)
+        : ClientProxy(cblk, buffers, frameCount, frameSize,
+            false /*isOut*/, false /*clientInServer*/) { }
     ~AudioRecordClientProxy() { }
-
-    // called by AudioRecord::releaseBuffer
-    size_t      stepUser(size_t stepCount) {
-        return mCblk->stepUser(stepCount, mFrameCount, false /*isOut*/);
-    }
-
-    // called by AudioRecord::processBuffer
-    size_t      framesAvailable() {
-        return mCblk->framesAvailable(mFrameCount, false /*isOut*/);
-    }
-
-    // called by AudioRecord::obtainBuffer
-    size_t      framesReady() {
-        return mCblk->framesReady(false /*isOut*/);
-    }
-
 };
 
 // ----------------------------------------------------------------------------
 
 // Proxy used by AudioFlinger server
 class ServerProxy : public Proxy {
+protected:
+    ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+            bool isOut, bool clientInServer);
 public:
-    ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut)
-        : Proxy(cblk, buffers, frameCount, frameSize), mIsOut(isOut) { }
     virtual ~ServerProxy() { }
 
-    // for AudioTrack and AudioRecord
-    bool        step(size_t stepCount) { return mCblk->stepServer(stepCount, mFrameCount, mIsOut); }
+    // Obtain a buffer with filled frames (writing) or empty frames (reading).
+    // It is permitted to call obtainBuffer() multiple times in succession, without any intervening
+    // calls to releaseBuffer().  In that case, the final obtainBuffer() is the one that effectively
+    // sets or extends the unreleased frame count.
+    // Always non-blocking.
+    // On entry:
+    //  buffer->mFrameCount should be initialized to maximum number of desired frames,
+    //      which must be > 0.
+    //  buffer->mNonContig is unused.
+    //  buffer->mRaw is unused.
+    // On exit:
+    //  buffer->mFrameCount has the actual number of contiguous available frames,
+    //      which is always 0 when the return status != NO_ERROR.
+    //  buffer->mNonContig is the number of additional non-contiguous available frames.
+    //  buffer->mRaw is a pointer to the first available frame,
+    //      or NULL when buffer->mFrameCount == 0.
+    // The return status is one of:
+    //  NO_ERROR    Success, buffer->mFrameCount > 0.
+    //  WOULD_BLOCK No frames are available.
+    //  NO_INIT     Shared memory is corrupt.
+    virtual status_t    obtainBuffer(Buffer* buffer);
 
+    // Release (some of) the frames last obtained.
+    // On entry, buffer->mFrameCount should have the number of frames to release,
+    // which must (cumulatively) be <= the number of frames last obtained but not yet released.
+    // It is permitted to call releaseBuffer() multiple times to release the frames in chunks.
+    // buffer->mRaw is ignored, but is normally same pointer returned by last obtainBuffer().
+    // On exit:
+    //  buffer->mFrameCount is zero.
+    //  buffer->mRaw is NULL.
+    virtual void        releaseBuffer(Buffer* buffer);
+
+protected:
+    size_t      mAvailToClient; // estimated frames available to client prior to releaseBuffer()
+    int32_t     mFlush;         // our copy of cblk->u.mStreaming.mFlush, for streaming output only
+private:
+    bool        mDeferWake;     // whether another releaseBuffer() is expected soon
+};
+
+// Proxy used by AudioFlinger for servicing AudioTrack
+class AudioTrackServerProxy : public ServerProxy {
+public:
+    AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize, bool clientInServer = false)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) { }
+protected:
+    virtual ~AudioTrackServerProxy() { }
+
+public:
     // return value of these methods must be validated by the caller
     uint32_t    getSampleRate() const { return mCblk->mSampleRate; }
     uint16_t    getSendLevel_U4_12() const { return mCblk->mSendLevel; }
     uint32_t    getVolumeLR() const { return mCblk->mVolumeLR; }
 
-    // for AudioTrack only
-    size_t      framesReady() {
-        ALOG_ASSERT(mIsOut);
-        return mCblk->framesReady(true);
-    }
+    // estimated total number of filled frames available to server to read,
+    // which may include non-contiguous frames
+    virtual size_t      framesReady();
 
-    // for AudioRecord only, called by RecordThread::RecordTrack::getNextBuffer
-    // FIXME remove this API since it assumes a lock that should be invisible to caller
-    size_t      framesAvailableIn_l() {
-        ALOG_ASSERT(!mIsOut);
-        return mCblk->framesAvailable_l(mFrameCount, false);
-    }
+    // Currently AudioFlinger will call framesReady() for a fast track from two threads:
+    // FastMixer thread, and normal mixer thread.  This is dangerous, as the proxy is intended
+    // to be called from at most one thread of server, and one thread of client.
+    // As a temporary workaround, this method informs the proxy implementation that it
+    // should avoid doing a state queue poll from within framesReady().
+    // FIXME Change AudioFlinger to not call framesReady() from normal mixer thread.
+    virtual void        framesReadyIsCalledByMultipleThreads() { }
+
+    bool     setStreamEndDone();    // and return previous value
+
+    // Add to the tally of underrun frames, and inform client of underrun
+    virtual void        tallyUnderrunFrames(uint32_t frameCount);
+
+    // Return the total number of frames which AudioFlinger desired but were unavailable,
+    // and thus which resulted in an underrun.
+    virtual uint32_t    getUnderrunFrames() const { return mCblk->u.mStreaming.mUnderrunFrames; }
+};
+
+class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
+public:
+    StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize);
+protected:
+    virtual ~StaticAudioTrackServerProxy() { }
+
+public:
+    virtual size_t      framesReady();
+    virtual void        framesReadyIsCalledByMultipleThreads();
+    virtual status_t    obtainBuffer(Buffer* buffer);
+    virtual void        releaseBuffer(Buffer* buffer);
+    virtual void        tallyUnderrunFrames(uint32_t frameCount);
+    virtual uint32_t    getUnderrunFrames() const { return 0; }
 
 private:
-    const bool  mIsOut;     // true for AudioTrack, false for AudioRecord
+    ssize_t             pollPosition(); // poll for state queue update, and return current position
+    StaticAudioTrackSingleStateQueue::Observer  mObserver;
+    size_t              mPosition;  // server's current play position in frames, relative to 0
+    size_t              mEnd;       // cached value computed from mState, safe for asynchronous read
+    bool                mFramesReadyIsCalledByMultipleThreads;
+    StaticAudioTrackState   mState;
+};
 
+// Proxy used by AudioFlinger for servicing AudioRecord
+class AudioRecordServerProxy : public ServerProxy {
+public:
+    AudioRecordServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/,
+            false /*clientInServer*/) { }
+protected:
+    virtual ~AudioRecordServerProxy() { }
 };
 
 // ----------------------------------------------------------------------------
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.cpp b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
index 702900b..8b362ef 100755
--- a/libvideoeditor/lvpp/NativeWindowRenderer.cpp
+++ b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
@@ -568,8 +568,9 @@
 RenderInput::RenderInput(NativeWindowRenderer* renderer, GLuint textureId)
     : mRenderer(renderer)
     , mTextureId(textureId) {
-    mST = new GLConsumer(mTextureId);
-    mSTC = new Surface(mST->getBufferQueue());
+    sp<BufferQueue> bq = new BufferQueue();
+    mST = new GLConsumer(bq, mTextureId);
+    mSTC = new Surface(bq);
     native_window_connect(mSTC.get(), NATIVE_WINDOW_API_MEDIA);
 }
 
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
index c111ba8..176f8e9 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
@@ -35,8 +35,7 @@
 VideoEditorAudioPlayer::VideoEditorAudioPlayer(
         const sp<MediaPlayerBase::AudioSink> &audioSink,
         PreviewPlayer *observer)
-    : mAudioTrack(NULL),
-      mInputBuffer(NULL),
+    : mInputBuffer(NULL),
       mSampleRate(0),
       mLatencyUs(0),
       mFrameSize(0),
@@ -111,8 +110,7 @@
     } else {
         mAudioTrack->stop();
 
-        delete mAudioTrack;
-        mAudioTrack = NULL;
+        mAudioTrack.clear();
     }
 
     // Make sure to release any buffer we hold onto so that the
@@ -151,7 +149,7 @@
     mStarted = false;
 }
 
-void VideoEditorAudioPlayer::resume() {
+status_t VideoEditorAudioPlayer::resume() {
     ALOGV("resume");
 
     AudioMixSettings audioMixSettings;
@@ -182,6 +180,7 @@
     } else {
         mAudioTrack->start();
     }
+    return OK;
 }
 
 status_t VideoEditorAudioPlayer::seekTo(int64_t time_us) {
@@ -538,8 +537,7 @@
                 0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
-            delete mAudioTrack;
-            mAudioTrack = NULL;
+            mAudioTrack.clear();
 
             if (mFirstBuffer != NULL) {
                 mFirstBuffer->release();
@@ -578,10 +576,15 @@
 
 size_t VideoEditorAudioPlayer::AudioSinkCallback(
         MediaPlayerBase::AudioSink *audioSink,
-        void *buffer, size_t size, void *cookie) {
+        void *buffer, size_t size, void *cookie,
+        MediaPlayerBase::AudioSink::cb_event_t event) {
     VideoEditorAudioPlayer *me = (VideoEditorAudioPlayer *)cookie;
 
-    return me->fillBuffer(buffer, size);
+    if (event == MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER ) {
+        return me->fillBuffer(buffer, size);
+    } else {
+        return 0;
+    }
 }
 
 
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
index 626df39..2caf5e8 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
@@ -58,7 +58,7 @@
 
     status_t start(bool sourceAlreadyStarted = false);
     void pause(bool playPendingSamples = false);
-    void resume();
+    status_t resume();
     status_t seekTo(int64_t time_us);
     bool isSeeking();
     bool reachedEOS(status_t *finalStatus);
@@ -91,7 +91,7 @@
     int64_t mBGAudioStoryBoardCurrentMediaVolumeVal;
 
     sp<MediaSource> mSource;
-    AudioTrack *mAudioTrack;
+    sp<AudioTrack> mAudioTrack;
 
     MediaBuffer *mInputBuffer;
 
@@ -124,7 +124,8 @@
     size_t fillBuffer(void *data, size_t size);
     static size_t AudioSinkCallback(
             MediaPlayerBase::AudioSink *audioSink,
-            void *data, size_t size, void *me);
+            void *data, size_t size, void *me,
+            MediaPlayerBase::AudioSink::cb_event_t event);
 
     void reset();
     void clear();
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
index 91a4415..5aeba4f 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
@@ -310,7 +310,6 @@
 VideoEditorPlayer::VeAudioOutput::VeAudioOutput()
     : mCallback(NULL),
       mCallbackCookie(NULL) {
-    mTrack = 0;
     mStreamType = AUDIO_STREAM_MUSIC;
     mLeftVolume = 1.0;
     mRightVolume = 1.0;
@@ -392,7 +391,8 @@
 status_t VideoEditorPlayer::VeAudioOutput::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
-        AudioCallback cb, void *cookie, audio_output_flags_t flags) {
+        AudioCallback cb, void *cookie, audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo) {
 
     mCallback = cb;
     mCallbackCookie = cookie;
@@ -405,7 +405,7 @@
 
     }
     ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount);
-    if (mTrack) close();
+    if (mTrack != 0) close();
     uint32_t afSampleRate;
     size_t afFrameCount;
     int frameCount;
@@ -434,7 +434,7 @@
         }
     }
 
-    AudioTrack *t;
+    sp<AudioTrack> t;
     if (mCallback != NULL) {
         t = new AudioTrack(
                 mStreamType,
@@ -457,7 +457,6 @@
 
     if ((t == 0) || (t->initCheck() != NO_ERROR)) {
         ALOGE("Unable to create audio track");
-        delete t;
         return NO_INIT;
     }
 
@@ -469,14 +468,18 @@
     return NO_ERROR;
 }
 
-void VideoEditorPlayer::VeAudioOutput::start() {
+status_t VideoEditorPlayer::VeAudioOutput::start() {
 
     ALOGV("start");
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
-        mTrack->start();
-        mTrack->getPosition(&mNumFramesWritten);
+        status_t status = mTrack->start();
+        if (status == NO_ERROR) {
+            mTrack->getPosition(&mNumFramesWritten);
+        }
+        return status;
     }
+    return NO_INIT;
 }
 
 void VideoEditorPlayer::VeAudioOutput::snoopWrite(
@@ -492,7 +495,7 @@
     LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
 
     //ALOGV("write(%p, %u)", buffer, size);
-    if (mTrack) {
+    if (mTrack != 0) {
         snoopWrite(buffer, size);
         ssize_t ret = mTrack->write(buffer, size);
         mNumFramesWritten += ret / 4; // assume 16 bit stereo
@@ -504,26 +507,25 @@
 void VideoEditorPlayer::VeAudioOutput::stop() {
 
     ALOGV("stop");
-    if (mTrack) mTrack->stop();
+    if (mTrack != 0) mTrack->stop();
 }
 
 void VideoEditorPlayer::VeAudioOutput::flush() {
 
     ALOGV("flush");
-    if (mTrack) mTrack->flush();
+    if (mTrack != 0) mTrack->flush();
 }
 
 void VideoEditorPlayer::VeAudioOutput::pause() {
 
     ALOGV("VeAudioOutput::pause");
-    if (mTrack) mTrack->pause();
+    if (mTrack != 0) mTrack->pause();
 }
 
 void VideoEditorPlayer::VeAudioOutput::close() {
 
     ALOGV("close");
-    delete mTrack;
-    mTrack = 0;
+    mTrack.clear();
 }
 
 void VideoEditorPlayer::VeAudioOutput::setVolume(float left, float right) {
@@ -531,7 +533,7 @@
     ALOGV("setVolume(%f, %f)", left, right);
     mLeftVolume = left;
     mRightVolume = right;
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(left, right);
     }
 }
@@ -548,7 +550,8 @@
     AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
 
     size_t actualSize = (*me->mCallback)(
-            me, buffer->raw, buffer->size, me->mCallbackCookie);
+            me, buffer->raw, buffer->size, me->mCallbackCookie,
+            MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER);
 
     buffer->size = actualSize;
 
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
index 77194ab..ab6d731 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.h
@@ -52,9 +52,10 @@
         virtual status_t        open(
                 uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
                 audio_format_t format, int bufferCount,
-                AudioCallback cb, void *cookie, audio_output_flags_t flags);
+                AudioCallback cb, void *cookie, audio_output_flags_t flags,
+                const audio_offload_info_t *offloadInfo);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush();
@@ -71,7 +72,7 @@
         static void             CallbackWrapper(
                 int event, void *me, void *info);
 
-        AudioTrack*             mTrack;
+        sp<AudioTrack>          mTrack;
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
         audio_stream_type_t     mStreamType;
diff --git a/media/libcpustats/Android.mk b/media/libcpustats/Android.mk
new file mode 100644
index 0000000..b506353
--- /dev/null
+++ b/media/libcpustats/Android.mk
@@ -0,0 +1,11 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES :=     \
+        CentralTendencyStatistics.cpp \
+        ThreadCpuUsage.cpp
+
+LOCAL_MODULE := libcpustats
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libcpustats/CentralTendencyStatistics.cpp b/media/libcpustats/CentralTendencyStatistics.cpp
new file mode 100644
index 0000000..42ab62b
--- /dev/null
+++ b/media/libcpustats/CentralTendencyStatistics.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <cpustats/CentralTendencyStatistics.h>
+
+void CentralTendencyStatistics::sample(double x)
+{
+    // update min and max
+    if (x < mMinimum)
+        mMinimum = x;
+    if (x > mMaximum)
+        mMaximum = x;
+    // Knuth
+    if (mN == 0) {
+        mMean = 0;
+    }
+    ++mN;
+    double delta = x - mMean;
+    mMean += delta / mN;
+    mM2 += delta * (x - mMean);
+}
+
+void CentralTendencyStatistics::reset()
+{
+    mMean = NAN;
+    mMedian = NAN;
+    mMinimum = INFINITY;
+    mMaximum = -INFINITY;
+    mN = 0;
+    mM2 = 0;
+    mVariance = NAN;
+    mVarianceKnownForN = 0;
+    mStddev = NAN;
+    mStddevKnownForN = 0;
+}
+
+double CentralTendencyStatistics::variance() const
+{
+    double variance;
+    if (mVarianceKnownForN != mN) {
+        if (mN > 1) {
+            // double variance_n = M2/n;
+            variance = mM2 / (mN - 1);
+        } else {
+            variance = NAN;
+        }
+        mVariance = variance;
+        mVarianceKnownForN = mN;
+    } else {
+        variance = mVariance;
+    }
+    return variance;
+}
+
+double CentralTendencyStatistics::stddev() const
+{
+    double stddev;
+    if (mStddevKnownForN != mN) {
+        stddev = sqrt(variance());
+        mStddev = stddev;
+        mStddevKnownForN = mN;
+    } else {
+        stddev = mStddev;
+    }
+    return stddev;
+}
diff --git a/media/libcpustats/ThreadCpuUsage.cpp b/media/libcpustats/ThreadCpuUsage.cpp
new file mode 100644
index 0000000..637402a
--- /dev/null
+++ b/media/libcpustats/ThreadCpuUsage.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ThreadCpuUsage"
+//#define LOG_NDEBUG 0
+
+#include <errno.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <utils/Debug.h>
+#include <utils/Log.h>
+
+#include <cpustats/ThreadCpuUsage.h>
+
+namespace android {
+
+bool ThreadCpuUsage::setEnabled(bool isEnabled)
+{
+    bool wasEnabled = mIsEnabled;
+    // only do something if there is a change
+    if (isEnabled != wasEnabled) {
+        ALOGV("setEnabled(%d)", isEnabled);
+        int rc;
+        // enabling
+        if (isEnabled) {
+            rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &mPreviousTs);
+            if (rc) {
+                ALOGE("clock_gettime(CLOCK_THREAD_CPUTIME_ID) errno=%d", errno);
+                isEnabled = false;
+            } else {
+                mWasEverEnabled = true;
+                // record wall clock time at first enable
+                if (!mMonotonicKnown) {
+                    rc = clock_gettime(CLOCK_MONOTONIC, &mMonotonicTs);
+                    if (rc) {
+                        ALOGE("clock_gettime(CLOCK_MONOTONIC) errno=%d", errno);
+                    } else {
+                        mMonotonicKnown = true;
+                    }
+                }
+            }
+        // disabling
+        } else {
+            struct timespec ts;
+            rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts);
+            if (rc) {
+                ALOGE("clock_gettime(CLOCK_THREAD_CPUTIME_ID) errno=%d", errno);
+            } else {
+                long long delta = (ts.tv_sec - mPreviousTs.tv_sec) * 1000000000LL +
+                        (ts.tv_nsec - mPreviousTs.tv_nsec);
+                mAccumulator += delta;
+#if 0
+                mPreviousTs = ts;
+#endif
+            }
+        }
+        mIsEnabled = isEnabled;
+    }
+    return wasEnabled;
+}
+
+bool ThreadCpuUsage::sampleAndEnable(double& ns)
+{
+    bool ret;
+    bool wasEverEnabled = mWasEverEnabled;
+    if (enable()) {
+        // already enabled, so add a new sample relative to previous
+        return sample(ns);
+    } else if (wasEverEnabled) {
+        // was disabled, but add sample for accumulated time while enabled
+        ns = (double) mAccumulator;
+        mAccumulator = 0;
+        ALOGV("sampleAndEnable %.0f", ns);
+        return true;
+    } else {
+        // first time called
+        ns = 0.0;
+        ALOGV("sampleAndEnable false");
+        return false;
+    }
+}
+
+bool ThreadCpuUsage::sample(double &ns)
+{
+    if (mWasEverEnabled) {
+        if (mIsEnabled) {
+            struct timespec ts;
+            int rc;
+            rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts);
+            if (rc) {
+                ALOGE("clock_gettime(CLOCK_THREAD_CPUTIME_ID) errno=%d", errno);
+                ns = 0.0;
+                return false;
+            } else {
+                long long delta = (ts.tv_sec - mPreviousTs.tv_sec) * 1000000000LL +
+                        (ts.tv_nsec - mPreviousTs.tv_nsec);
+                mAccumulator += delta;
+                mPreviousTs = ts;
+            }
+        } else {
+            mWasEverEnabled = false;
+        }
+        ns = (double) mAccumulator;
+        ALOGV("sample %.0f", ns);
+        mAccumulator = 0;
+        return true;
+    } else {
+        ALOGW("Can't add sample because measurements have never been enabled");
+        ns = 0.0;
+        return false;
+    }
+}
+
+long long ThreadCpuUsage::elapsed() const
+{
+    long long elapsed;
+    if (mMonotonicKnown) {
+        struct timespec ts;
+        int rc;
+        rc = clock_gettime(CLOCK_MONOTONIC, &ts);
+        if (rc) {
+            ALOGE("clock_gettime(CLOCK_MONOTONIC) errno=%d", errno);
+            elapsed = 0;
+        } else {
+            // mMonotonicTs is updated only at first enable and resetStatistics
+            elapsed = (ts.tv_sec - mMonotonicTs.tv_sec) * 1000000000LL +
+                    (ts.tv_nsec - mMonotonicTs.tv_nsec);
+        }
+    } else {
+        ALOGW("Can't compute elapsed time because measurements have never been enabled");
+        elapsed = 0;
+    }
+    ALOGV("elapsed %lld", elapsed);
+    return elapsed;
+}
+
+void ThreadCpuUsage::resetElapsed()
+{
+    ALOGV("resetElapsed");
+    if (mMonotonicKnown) {
+        int rc;
+        rc = clock_gettime(CLOCK_MONOTONIC, &mMonotonicTs);
+        if (rc) {
+            ALOGE("clock_gettime(CLOCK_MONOTONIC) errno=%d", errno);
+            mMonotonicKnown = false;
+        }
+    }
+}
+
+/*static*/
+int ThreadCpuUsage::sScalingFds[ThreadCpuUsage::MAX_CPU];
+pthread_once_t ThreadCpuUsage::sOnceControl = PTHREAD_ONCE_INIT;
+int ThreadCpuUsage::sKernelMax;
+pthread_mutex_t ThreadCpuUsage::sMutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*static*/
+void ThreadCpuUsage::init()
+{
+    // read the number of CPUs
+    sKernelMax = 1;
+    int fd = open("/sys/devices/system/cpu/kernel_max", O_RDONLY);
+    if (fd >= 0) {
+#define KERNEL_MAX_SIZE 12
+        char kernelMax[KERNEL_MAX_SIZE];
+        ssize_t actual = read(fd, kernelMax, sizeof(kernelMax));
+        if (actual >= 2 && kernelMax[actual-1] == '\n') {
+            sKernelMax = atoi(kernelMax);
+            if (sKernelMax >= MAX_CPU - 1) {
+                ALOGW("kernel_max %d but MAX_CPU %d", sKernelMax, MAX_CPU);
+                sKernelMax = MAX_CPU;
+            } else if (sKernelMax < 0) {
+                ALOGW("kernel_max invalid %d", sKernelMax);
+                sKernelMax = 1;
+            } else {
+                ++sKernelMax;
+                ALOGV("number of CPUs %d", sKernelMax);
+            }
+        } else {
+            ALOGW("Can't read number of CPUs");
+        }
+        (void) close(fd);
+    } else {
+        ALOGW("Can't open number of CPUs");
+    }
+    int i;
+    for (i = 0; i < MAX_CPU; ++i) {
+        sScalingFds[i] = -1;
+    }
+}
+
+uint32_t ThreadCpuUsage::getCpukHz(int cpuNum)
+{
+    if (cpuNum < 0 || cpuNum >= MAX_CPU) {
+        ALOGW("getCpukHz called with invalid CPU %d", cpuNum);
+        return 0;
+    }
+    // double-checked locking idiom is not broken for atomic values such as fd
+    int fd = sScalingFds[cpuNum];
+    if (fd < 0) {
+        // some kernels can't open a scaling file until hot plug complete
+        pthread_mutex_lock(&sMutex);
+        fd = sScalingFds[cpuNum];
+        if (fd < 0) {
+#define FREQ_SIZE 64
+            char freq_path[FREQ_SIZE];
+#define FREQ_DIGIT 27
+            COMPILE_TIME_ASSERT_FUNCTION_SCOPE(MAX_CPU <= 10);
+#define FREQ_PATH "/sys/devices/system/cpu/cpu?/cpufreq/scaling_cur_freq"
+            strlcpy(freq_path, FREQ_PATH, sizeof(freq_path));
+            freq_path[FREQ_DIGIT] = cpuNum + '0';
+            fd = open(freq_path, O_RDONLY | O_CLOEXEC);
+            // keep this fd until process exit or exec
+            sScalingFds[cpuNum] = fd;
+        }
+        pthread_mutex_unlock(&sMutex);
+        if (fd < 0) {
+            ALOGW("getCpukHz can't open CPU %d", cpuNum);
+            return 0;
+        }
+    }
+#define KHZ_SIZE 12
+    char kHz[KHZ_SIZE];   // kHz base 10
+    ssize_t actual = pread(fd, kHz, sizeof(kHz), (off_t) 0);
+    uint32_t ret;
+    if (actual >= 2 && kHz[actual-1] == '\n') {
+        ret = atoi(kHz);
+    } else {
+        ret = 0;
+    }
+    if (ret != mCurrentkHz[cpuNum]) {
+        if (ret > 0) {
+            ALOGV("CPU %d frequency %u kHz", cpuNum, ret);
+        } else {
+            ALOGW("Can't read CPU %d frequency", cpuNum);
+        }
+        mCurrentkHz[cpuNum] = ret;
+    }
+    return ret;
+}
+
+}   // namespace android
diff --git a/media/libeffects/testlibs/AudioFormatAdapter.h b/media/libeffects/testlibs/AudioFormatAdapter.h
index 41f1810..dea2734 100644
--- a/media/libeffects/testlibs/AudioFormatAdapter.h
+++ b/media/libeffects/testlibs/AudioFormatAdapter.h
@@ -75,6 +75,7 @@
         while (numSamples > 0) {
             uint32_t numSamplesIter = min(numSamples, mMaxSamplesPerCall);
             uint32_t nSamplesChannels = numSamplesIter * mNumChannels;
+            // This branch of "if" is untested
             if (mPcmFormat == AUDIO_FORMAT_PCM_8_24_BIT) {
                 if (mBehavior == EFFECT_BUFFER_ACCESS_WRITE) {
                     mpProcessor->process(
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index c35453b..8d00206 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -234,8 +234,7 @@
               (pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO));
     CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
               || pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
-    CHECK_ARG(pConfig->inputCfg.format == AUDIO_FORMAT_PCM_8_24_BIT
-              || pConfig->inputCfg.format == AUDIO_FORMAT_PCM_16_BIT);
+    CHECK_ARG(pConfig->inputCfg.format == AUDIO_FORMAT_PCM_16_BIT);
 
     int channelCount;
     if (pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_MONO) {
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 2c0c3a5..96755bb 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -53,7 +53,8 @@
     Visualizer.cpp \
     MemoryLeakTrackUtil.cpp \
     SoundPool.cpp \
-    SoundPoolThread.cpp
+    SoundPoolThread.cpp \
+    StringArray.cpp
 
 LOCAL_SRC_FILES += ../libnbaio/roundup.c
 
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 40ff1bf..e934a3e 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -19,17 +19,13 @@
 #define LOG_TAG "AudioRecord"
 
 #include <sys/resource.h>
-#include <sys/types.h>
-
 #include <binder/IPCThreadState.h>
-#include <cutils/atomic.h>
-#include <cutils/compiler.h>
 #include <media/AudioRecord.h>
-#include <media/AudioSystem.h>
-#include <system/audio.h>
 #include <utils/Log.h>
-
 #include <private/media/AudioTrackShared.h>
+#include <media/IAudioFlinger.h>
+
+#define WAIT_PERIOD_MS          10
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -41,7 +37,9 @@
         audio_format_t format,
         audio_channel_mask_t channelMask)
 {
-    if (frameCount == NULL) return BAD_VALUE;
+    if (frameCount == NULL) {
+        return BAD_VALUE;
+    }
 
     // default to 0 in case of error
     *frameCount = 0;
@@ -62,10 +60,9 @@
     // We double the size of input buffer for ping pong use of record buffer.
     size <<= 1;
 
-    if (audio_is_linear_pcm(format)) {
-        uint32_t channelCount = popcount(channelMask);
-        size /= channelCount * audio_bytes_per_sample(format);
-    }
+    // Assumes audio_is_linear_pcm(format)
+    uint32_t channelCount = popcount(channelMask);
+    size /= channelCount * audio_bytes_per_sample(format);
 
     *frameCount = size;
     return NO_ERROR;
@@ -75,8 +72,7 @@
 
 AudioRecord::AudioRecord()
     : mStatus(NO_INIT), mSessionId(0),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
 {
 }
 
@@ -89,14 +85,16 @@
         callback_t cbf,
         void* user,
         int notificationFrames,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags)
     : mStatus(NO_INIT), mSessionId(0),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mProxy(NULL)
 {
-    mStatus = set(inputSource, sampleRate, format, channelMask,
-            frameCount, cbf, user, notificationFrames, false /*threadCanCallJava*/, sessionId);
+    mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+            notificationFrames, false /*threadCanCallJava*/, sessionId, transferType);
 }
 
 AudioRecord::~AudioRecord()
@@ -111,11 +109,13 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
-        mAudioRecord.clear();
+        if (mAudioRecord != 0) {
+            mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
+            mAudioRecord.clear();
+        }
         IPCThreadState::self()->flushCommands();
         AudioSystem::releaseAudioSessionId(mSessionId);
     }
-    delete mProxy;
 }
 
 status_t AudioRecord::set(
@@ -128,8 +128,33 @@
         void* user,
         int notificationFrames,
         bool threadCanCallJava,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags)
 {
+    switch (transferType) {
+    case TRANSFER_DEFAULT:
+        if (cbf == NULL || threadCanCallJava) {
+            transferType = TRANSFER_SYNC;
+        } else {
+            transferType = TRANSFER_CALLBACK;
+        }
+        break;
+    case TRANSFER_CALLBACK:
+        if (cbf == NULL) {
+            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
+            return BAD_VALUE;
+        }
+        break;
+    case TRANSFER_OBTAIN:
+    case TRANSFER_SYNC:
+        break;
+    default:
+        ALOGE("Invalid transfer type %d", transferType);
+        return BAD_VALUE;
+    }
+    mTransfer = transferType;
+
     // FIXME "int" here is legacy and will be replaced by size_t later
     if (frameCountInt < 0) {
         ALOGE("Invalid frame count %d", frameCountInt);
@@ -143,15 +168,18 @@
     AutoMutex lock(mLock);
 
     if (mAudioRecord != 0) {
+        ALOGE("Track already in use");
         return INVALID_OPERATION;
     }
 
     if (inputSource == AUDIO_SOURCE_DEFAULT) {
         inputSource = AUDIO_SOURCE_MIC;
     }
+    mInputSource = inputSource;
 
     if (sampleRate == 0) {
-        sampleRate = DEFAULT_SAMPLE_RATE;
+        ALOGE("Invalid sample rate %u", sampleRate);
+        return BAD_VALUE;
     }
     mSampleRate = sampleRate;
 
@@ -159,25 +187,50 @@
     if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
+
     // validate parameters
     if (!audio_is_valid_format(format)) {
-        ALOGE("Invalid format");
+        ALOGE("Invalid format %d", format);
+        return BAD_VALUE;
+    }
+    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
+    if (format != AUDIO_FORMAT_PCM_16_BIT) {
+        ALOGE("Format %d is not supported", format);
         return BAD_VALUE;
     }
     mFormat = format;
 
     if (!audio_is_input_channel(channelMask)) {
+        ALOGE("Invalid channel mask %#x", channelMask);
         return BAD_VALUE;
     }
     mChannelMask = channelMask;
     uint32_t channelCount = popcount(channelMask);
     mChannelCount = channelCount;
 
-    if (audio_is_linear_pcm(format)) {
-        mFrameSize = channelCount * audio_bytes_per_sample(format);
-    } else {
-        mFrameSize = sizeof(uint8_t);
+    // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t)
+    mFrameSize = channelCount * audio_bytes_per_sample(format);
+
+    // validate framecount
+    size_t minFrameCount = 0;
+    status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
+            sampleRate, format, channelMask);
+    if (status != NO_ERROR) {
+        ALOGE("getMinFrameCount() failed; status %d", status);
+        return status;
     }
+    ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);
+
+    if (frameCount == 0) {
+        frameCount = minFrameCount;
+    } else if (frameCount < minFrameCount) {
+        ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount);
+        return BAD_VALUE;
+    }
+    mFrameCount = frameCount;
+
+    mNotificationFramesReq = notificationFrames;
+    mNotificationFramesAct = 0;
 
     if (sessionId == 0 ) {
         mSessionId = AudioSystem::newAudioSessionId();
@@ -186,37 +239,11 @@
     }
     ALOGV("set(): mSessionId %d", mSessionId);
 
-    audio_io_handle_t input = AudioSystem::getInput(inputSource,
-                                                    sampleRate,
-                                                    format,
-                                                    channelMask,
-                                                    mSessionId);
-    if (input == 0) {
-        ALOGE("Could not get audio input for record source %d", inputSource);
-        return BAD_VALUE;
-    }
-
-    // validate framecount
-    size_t minFrameCount = 0;
-    status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelMask);
-    if (status != NO_ERROR) {
-        return status;
-    }
-    ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);
-
-    if (frameCount == 0) {
-        frameCount = minFrameCount;
-    } else if (frameCount < minFrameCount) {
-        return BAD_VALUE;
-    }
-
-    if (notificationFrames == 0) {
-        notificationFrames = frameCount/2;
-    }
+    mFlags = flags;
 
     // create the IAudioRecord
-    status = openRecord_l(sampleRate, format, frameCount, input);
-    if (status != NO_ERROR) {
+    status = openRecord_l(0 /*epoch*/);
+    if (status) {
         return status;
     }
 
@@ -232,8 +259,7 @@
 
     mActive = false;
     mCbf = cbf;
-    mNotificationFrames = notificationFrames;
-    mRemainingFrames = notificationFrames;
+    mRefreshRemaining = true;
     mUserData = user;
     // TODO: add audio hardware input latency here
     mLatency = (1000*mFrameCount) / sampleRate;
@@ -241,120 +267,79 @@
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
-    mInputSource = inputSource;
-    mInput = input;
     AudioSystem::acquireAudioSessionId(mSessionId);
+    mSequence = 1;
+    mObservedSequence = mSequence;
+    mInOverrun = false;
 
     return NO_ERROR;
 }
 
-status_t AudioRecord::initCheck() const
-{
-    return mStatus;
-}
-
-// -------------------------------------------------------------------------
-
-uint32_t AudioRecord::latency() const
-{
-    return mLatency;
-}
-
-audio_format_t AudioRecord::format() const
-{
-    return mFormat;
-}
-
-uint32_t AudioRecord::channelCount() const
-{
-    return mChannelCount;
-}
-
-size_t AudioRecord::frameCount() const
-{
-    return mFrameCount;
-}
-
-audio_source_t AudioRecord::inputSource() const
-{
-    return mInputSource;
-}
-
 // -------------------------------------------------------------------------
 
 status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
 {
-    status_t ret = NO_ERROR;
-    sp<AudioRecordThread> t = mAudioRecordThread;
-
     ALOGV("start, sync event %d trigger session %d", event, triggerSession);
 
     AutoMutex lock(mLock);
-    // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioRecord> audioRecord = mAudioRecord;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
+    if (mActive) {
+        return NO_ERROR;
+    }
 
-    if (!mActive) {
+    // reset current position as seen by client to 0
+    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
+
+    status_t status = NO_ERROR;
+    if (!(flags & CBLK_INVALID)) {
+        ALOGV("mAudioRecord->start()");
+        status = mAudioRecord->start(event, triggerSession);
+        if (status == DEAD_OBJECT) {
+            flags |= CBLK_INVALID;
+        }
+    }
+    if (flags & CBLK_INVALID) {
+        status = restoreRecord_l("start");
+    }
+
+    if (status != NO_ERROR) {
+        ALOGE("start() status %d", status);
+    } else {
         mActive = true;
-
-        cblk->lock.lock();
-        if (!(cblk->flags & CBLK_INVALID)) {
-            cblk->lock.unlock();
-            ALOGV("mAudioRecord->start()");
-            ret = mAudioRecord->start(event, triggerSession);
-            cblk->lock.lock();
-            if (ret == DEAD_OBJECT) {
-                android_atomic_or(CBLK_INVALID, &cblk->flags);
-            }
-        }
-        if (cblk->flags & CBLK_INVALID) {
-            audio_track_cblk_t* temp = cblk;
-            ret = restoreRecord_l(temp);
-            cblk = temp;
-        }
-        cblk->lock.unlock();
-        if (ret == NO_ERROR) {
-            mNewPosition = cblk->user + mUpdatePeriod;
-            cblk->bufferTimeoutMs = (event == AudioSystem::SYNC_EVENT_NONE) ? MAX_RUN_TIMEOUT_MS :
-                                            AudioSystem::kSyncRecordStartTimeOutMs;
-            cblk->waitTimeMs = 0;
-            if (t != 0) {
-                t->resume();
-            } else {
-                mPreviousPriority = getpriority(PRIO_PROCESS, 0);
-                get_sched_policy(0, &mPreviousSchedulingGroup);
-                androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
-            }
+        sp<AudioRecordThread> t = mAudioRecordThread;
+        if (t != 0) {
+            t->resume();
         } else {
-            mActive = false;
+            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+            get_sched_policy(0, &mPreviousSchedulingGroup);
+            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
         }
     }
 
-    return ret;
+    return status;
 }
 
 void AudioRecord::stop()
 {
-    sp<AudioRecordThread> t = mAudioRecordThread;
-
-    ALOGV("stop");
-
     AutoMutex lock(mLock);
-    if (mActive) {
-        mActive = false;
-        mCblk->cv.signal();
-        mAudioRecord->stop();
-        // the record head position will reset to 0, so if a marker is set, we need
-        // to activate it again
-        mMarkerReached = false;
-        if (t != 0) {
-            t->pause();
-        } else {
-            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
-            set_sched_policy(0, mPreviousSchedulingGroup);
-        }
+    if (!mActive) {
+        return;
+    }
+
+    mActive = false;
+    mProxy->interrupt();
+    mAudioRecord->stop();
+    // the record head position will reset to 0, so if a marker is set, we need
+    // to activate it again
+    mMarkerReached = false;
+    sp<AudioRecordThread> t = mAudioRecordThread;
+    if (t != 0) {
+        t->pause();
+    } else {
+        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+        set_sched_policy(0, mPreviousSchedulingGroup);
     }
 }
 
@@ -364,14 +349,11 @@
     return !mActive;
 }
 
-uint32_t AudioRecord::getSampleRate() const
-{
-    return mSampleRate;
-}
-
 status_t AudioRecord::setMarkerPosition(uint32_t marker)
 {
-    if (mCbf == NULL) return INVALID_OPERATION;
+    if (mCbf == NULL) {
+        return INVALID_OPERATION;
+    }
 
     AutoMutex lock(mLock);
     mMarkerPosition = marker;
@@ -382,7 +364,9 @@
 
 status_t AudioRecord::getMarkerPosition(uint32_t *marker) const
 {
-    if (marker == NULL) return BAD_VALUE;
+    if (marker == NULL) {
+        return BAD_VALUE;
+    }
 
     AutoMutex lock(mLock);
     *marker = mMarkerPosition;
@@ -392,13 +376,12 @@
 
 status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
-    if (mCbf == NULL) return INVALID_OPERATION;
-
-    uint32_t curPosition;
-    getPosition(&curPosition);
+    if (mCbf == NULL) {
+        return INVALID_OPERATION;
+    }
 
     AutoMutex lock(mLock);
-    mNewPosition = curPosition + updatePeriod;
+    mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
     return NO_ERROR;
@@ -406,7 +389,9 @@
 
 status_t AudioRecord::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
-    if (updatePeriod == NULL) return BAD_VALUE;
+    if (updatePeriod == NULL) {
+        return BAD_VALUE;
+    }
 
     AutoMutex lock(mLock);
     *updatePeriod = mUpdatePeriod;
@@ -416,10 +401,12 @@
 
 status_t AudioRecord::getPosition(uint32_t *position) const
 {
-    if (position == NULL) return BAD_VALUE;
+    if (position == NULL) {
+        return BAD_VALUE;
+    }
 
     AutoMutex lock(mLock);
-    *position = mCblk->user;
+    *position = mProxy->getPosition();
 
     return NO_ERROR;
 }
@@ -427,17 +414,13 @@
 unsigned int AudioRecord::getInputFramesLost() const
 {
     // no need to check mActive, because if inactive this will return 0, which is what we want
-    return AudioSystem::getInputFramesLost(mInput);
+    return AudioSystem::getInputFramesLost(getInput());
 }
 
 // -------------------------------------------------------------------------
 
 // must be called with mLock held
-status_t AudioRecord::openRecord_l(
-        uint32_t sampleRate,
-        audio_format_t format,
-        size_t frameCount,
-        audio_io_handle_t input)
+status_t AudioRecord::openRecord_l(size_t epoch)
 {
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -446,15 +429,44 @@
         return NO_INIT;
     }
 
+    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
     pid_t tid = -1;
-    // FIXME see similar logic at AudioTrack
+
+    // Client can only express a preference for FAST.  Server will perform additional tests.
+    // The only supported use case for FAST is callback transfer mode.
+    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+        if ((mTransfer != TRANSFER_CALLBACK) || (mAudioRecordThread == 0)) {
+            ALOGW("AUDIO_INPUT_FLAG_FAST denied by client");
+            // once denied, do not request again if IAudioRecord is re-created
+            mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
+        } else {
+            trackFlags |= IAudioFlinger::TRACK_FAST;
+            tid = mAudioRecordThread->getTid();
+        }
+    }
+
+    mNotificationFramesAct = mNotificationFramesReq;
+
+    if (!(mFlags & AUDIO_INPUT_FLAG_FAST)) {
+        // Make sure that application is notified with sufficient margin before overrun
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount/2) {
+            mNotificationFramesAct = mFrameCount/2;
+        }
+    }
+
+    audio_io_handle_t input = AudioSystem::getInput(mInputSource, mSampleRate, mFormat,
+            mChannelMask, mSessionId);
+    if (input == 0) {
+        ALOGE("Could not get audio input for record source %d", mInputSource);
+        return BAD_VALUE;
+    }
 
     int originalSessionId = mSessionId;
     sp<IAudioRecord> record = audioFlinger->openRecord(input,
-                                                       sampleRate, format,
+                                                       mSampleRate, mFormat,
                                                        mChannelMask,
-                                                       frameCount,
-                                                       IAudioFlinger::TRACK_DEFAULT,
+                                                       mFrameCount,
+                                                       &trackFlags,
                                                        tid,
                                                        &mSessionId,
                                                        &status);
@@ -463,6 +475,7 @@
 
     if (record == 0) {
         ALOGE("AudioFlinger could not create record track, status: %d", status);
+        AudioSystem::releaseInput(input);
         return status;
     }
     sp<IMemory> iMem = record->getCblk();
@@ -470,133 +483,158 @@
         ALOGE("Could not get control block");
         return NO_INIT;
     }
-    mAudioRecord.clear();
+    if (mAudioRecord != 0) {
+        mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
+        mDeathNotifier.clear();
+    }
+    mInput = input;
     mAudioRecord = record;
-    mCblkMemory.clear();
     mCblkMemory = iMem;
     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
     mCblk = cblk;
-    mBuffers = (char*)cblk + sizeof(audio_track_cblk_t);
-    cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
-    cblk->waitTimeMs = 0;
+    // FIXME missing fast track frameCount logic
+    mAwaitBoost = false;
+    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+        if (trackFlags & IAudioFlinger::TRACK_FAST) {
+            ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %u", mFrameCount);
+            mAwaitBoost = true;
+            // double-buffering is not required for fast tracks, due to tighter scheduling
+            if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount) {
+                mNotificationFramesAct = mFrameCount;
+            }
+        } else {
+            ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %u", mFrameCount);
+            // once denied, do not request again if IAudioRecord is re-created
+            mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
+            if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount/2) {
+                mNotificationFramesAct = mFrameCount/2;
+            }
+        }
+    }
+
+    // starting address of buffers in shared memory
+    void *buffers = (char*)cblk + sizeof(audio_track_cblk_t);
 
     // update proxy
-    delete mProxy;
-    mProxy = new AudioRecordClientProxy(cblk, mBuffers, frameCount, mFrameSize);
+    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
+    mProxy->setEpoch(epoch);
+    mProxy->setMinimum(mNotificationFramesAct);
+
+    mDeathNotifier = new DeathNotifier(this);
+    mAudioRecord->asBinder()->linkToDeath(mDeathNotifier, this);
 
     return NO_ERROR;
 }
 
 status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
+    if (audioBuffer == NULL) {
+        return BAD_VALUE;
+    }
+    if (mTransfer != TRANSFER_OBTAIN) {
+        audioBuffer->frameCount = 0;
+        audioBuffer->size = 0;
+        audioBuffer->raw = NULL;
+        return INVALID_OPERATION;
+    }
 
-    AutoMutex lock(mLock);
-    bool active;
-    status_t result = NO_ERROR;
-    audio_track_cblk_t* cblk = mCblk;
-    uint32_t framesReq = audioBuffer->frameCount;
-    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;
+    const struct timespec *requested;
+    if (waitCount == -1) {
+        requested = &ClientProxy::kForever;
+    } else if (waitCount == 0) {
+        requested = &ClientProxy::kNonBlocking;
+    } else if (waitCount > 0) {
+        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
+        struct timespec timeout;
+        timeout.tv_sec = ms / 1000;
+        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
+        requested = &timeout;
+    } else {
+        ALOGE("%s invalid waitCount %d", __func__, waitCount);
+        requested = NULL;
+    }
+    return obtainBuffer(audioBuffer, requested);
+}
 
-    audioBuffer->frameCount  = 0;
-    audioBuffer->size        = 0;
+status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+        struct timespec *elapsed, size_t *nonContig)
+{
+    // previous and new IAudioRecord sequence numbers are used to detect track re-creation
+    uint32_t oldSequence = 0;
+    uint32_t newSequence;
 
-    size_t framesReady = mProxy->framesReady();
+    Proxy::Buffer buffer;
+    status_t status = NO_ERROR;
 
-    if (framesReady == 0) {
-        cblk->lock.lock();
-        goto start_loop_here;
-        while (framesReady == 0) {
-            active = mActive;
-            if (CC_UNLIKELY(!active)) {
-                cblk->lock.unlock();
-                return NO_MORE_BUFFERS;
-            }
-            if (CC_UNLIKELY(!waitCount)) {
-                cblk->lock.unlock();
-                return WOULD_BLOCK;
-            }
-            if (!(cblk->flags & CBLK_INVALID)) {
-                mLock.unlock();
-                // this condition is in shared memory, so if IAudioRecord and control block
-                // are replaced due to mediaserver death or IAudioRecord invalidation then
-                // cv won't be signalled, but fortunately the timeout will limit the wait
-                result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
-                cblk->lock.unlock();
-                mLock.lock();
-                if (!mActive) {
-                    return status_t(STOPPED);
-                }
-                // IAudioRecord may have been re-created while mLock was unlocked
-                cblk = mCblk;
-                cblk->lock.lock();
-            }
-            if (cblk->flags & CBLK_INVALID) {
-                goto create_new_record;
-            }
-            if (CC_UNLIKELY(result != NO_ERROR)) {
-                cblk->waitTimeMs += waitTimeMs;
-                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
-                    ALOGW(   "obtainBuffer timed out (is the CPU pegged?) "
-                            "user=%08x, server=%08x", cblk->user, cblk->server);
-                    cblk->lock.unlock();
-                    // callback thread or sync event hasn't changed
-                    result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
-                    cblk->lock.lock();
-                    if (result == DEAD_OBJECT) {
-                        android_atomic_or(CBLK_INVALID, &cblk->flags);
-create_new_record:
-                        audio_track_cblk_t* temp = cblk;
-                        result = AudioRecord::restoreRecord_l(temp);
-                        cblk = temp;
+    static const int32_t kMaxTries = 5;
+    int32_t tryCounter = kMaxTries;
+
+    do {
+        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
+        // keep them from going away if another thread re-creates the track during obtainBuffer()
+        sp<AudioRecordClientProxy> proxy;
+        sp<IMemory> iMem;
+        {
+            // start of lock scope
+            AutoMutex lock(mLock);
+
+            newSequence = mSequence;
+            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
+            if (status == DEAD_OBJECT) {
+                // re-create track, unless someone else has already done so
+                if (newSequence == oldSequence) {
+                    status = restoreRecord_l("obtainBuffer");
+                    if (status != NO_ERROR) {
+                        break;
                     }
-                    if (result != NO_ERROR) {
-                        ALOGW("obtainBuffer create Track error %d", result);
-                        cblk->lock.unlock();
-                        return result;
-                    }
-                    cblk->waitTimeMs = 0;
-                }
-                if (--waitCount == 0) {
-                    cblk->lock.unlock();
-                    return TIMED_OUT;
                 }
             }
-            // read the server count again
-        start_loop_here:
-            framesReady = mProxy->framesReady();
-        }
-        cblk->lock.unlock();
+            oldSequence = newSequence;
+
+            // Keep the extra references
+            proxy = mProxy;
+            iMem = mCblkMemory;
+
+            // Non-blocking if track is stopped
+            if (!mActive) {
+                requested = &ClientProxy::kNonBlocking;
+            }
+
+        }   // end of lock scope
+
+        buffer.mFrameCount = audioBuffer->frameCount;
+        // FIXME starts the requested timeout and elapsed over from scratch
+        status = proxy->obtainBuffer(&buffer, requested, elapsed);
+
+    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
+
+    audioBuffer->frameCount = buffer.mFrameCount;
+    audioBuffer->size = buffer.mFrameCount * mFrameSize;
+    audioBuffer->raw = buffer.mRaw;
+    if (nonContig != NULL) {
+        *nonContig = buffer.mNonContig;
     }
-
-    cblk->waitTimeMs = 0;
-    // reset time out to running value after obtaining a buffer
-    cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
-
-    if (framesReq > framesReady) {
-        framesReq = framesReady;
-    }
-
-    uint32_t u = cblk->user;
-    uint32_t bufferEnd = cblk->userBase + mFrameCount;
-
-    if (framesReq > bufferEnd - u) {
-        framesReq = bufferEnd - u;
-    }
-
-    audioBuffer->frameCount  = framesReq;
-    audioBuffer->size        = framesReq * mFrameSize;
-    audioBuffer->raw         = mProxy->buffer(u);
-    active = mActive;
-    return active ? status_t(NO_ERROR) : status_t(STOPPED);
+    return status;
 }
 
 void AudioRecord::releaseBuffer(Buffer* audioBuffer)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
+    // all TRANSFER_* are valid
+
+    size_t stepCount = audioBuffer->size / mFrameSize;
+    if (stepCount == 0) {
+        return;
+    }
+
+    Proxy::Buffer buffer;
+    buffer.mFrameCount = stepCount;
+    buffer.mRaw = audioBuffer->raw;
 
     AutoMutex lock(mLock);
-    (void) mProxy->stepUser(audioBuffer->frameCount);
+    mInOverrun = false;
+    mProxy->releaseBuffer(&buffer);
+
+    // the server does not automatically disable recorder on overrun, so no need to restart
 }
 
 audio_io_handle_t AudioRecord::getInput() const
@@ -605,226 +643,324 @@
     return mInput;
 }
 
-// must be called with mLock held
-audio_io_handle_t AudioRecord::getInput_l()
-{
-    mInput = AudioSystem::getInput(mInputSource,
-                                mSampleRate,
-                                mFormat,
-                                mChannelMask,
-                                mSessionId);
-    return mInput;
-}
-
-int AudioRecord::getSessionId() const
-{
-    // no lock needed because session ID doesn't change after first set()
-    return mSessionId;
-}
-
 // -------------------------------------------------------------------------
 
 ssize_t AudioRecord::read(void* buffer, size_t userSize)
 {
-    ssize_t read = 0;
-    Buffer audioBuffer;
-    int8_t *dst = static_cast<int8_t*>(buffer);
+    if (mTransfer != TRANSFER_SYNC) {
+        return INVALID_OPERATION;
+    }
 
-    if (ssize_t(userSize) < 0) {
-        // sanity-check. user is most-likely passing an error code.
-        ALOGE("AudioRecord::read(buffer=%p, size=%u (%d)",
-                buffer, userSize, userSize);
+    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
+        // sanity-check. user is most-likely passing an error code, and it would
+        // make the return value ambiguous (actualSize vs error).
+        ALOGE("AudioRecord::read(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
         return BAD_VALUE;
     }
 
-    mLock.lock();
-    // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioRecord> audioRecord = mAudioRecord;
-    sp<IMemory> iMem = mCblkMemory;
-    mLock.unlock();
+    ssize_t read = 0;
+    Buffer audioBuffer;
 
-    do {
+    while (userSize >= mFrameSize) {
+        audioBuffer.frameCount = userSize / mFrameSize;
 
-        audioBuffer.frameCount = userSize/frameSize();
-
-        // By using a wait count corresponding to twice the timeout period in
-        // obtainBuffer() we give a chance to recover once for a read timeout
-        // (if media_server crashed for instance) before returning a length of
-        // 0 bytes read to the client
-        status_t err = obtainBuffer(&audioBuffer, ((2 * MAX_RUN_TIMEOUT_MS) / WAIT_PERIOD_MS));
+        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
         if (err < 0) {
-            // out of buffers, return #bytes written
-            if (err == status_t(NO_MORE_BUFFERS)) {
+            if (read > 0) {
                 break;
             }
-            if (err == status_t(TIMED_OUT)) {
-                // return partial transfer count
-                return read;
-            }
             return ssize_t(err);
         }
 
         size_t bytesRead = audioBuffer.size;
-        memcpy(dst, audioBuffer.i8, bytesRead);
-
-        dst += bytesRead;
+        memcpy(buffer, audioBuffer.i8, bytesRead);
+        buffer = ((char *) buffer) + bytesRead;
         userSize -= bytesRead;
         read += bytesRead;
 
         releaseBuffer(&audioBuffer);
-    } while (userSize);
+    }
 
     return read;
 }
 
 // -------------------------------------------------------------------------
 
-bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
+nsecs_t AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
 {
-    Buffer audioBuffer;
-    uint32_t frames = mRemainingFrames;
-    size_t readSize;
-
     mLock.lock();
-    // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioRecord> audioRecord = mAudioRecord;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
-    bool active = mActive;
-    uint32_t markerPosition = mMarkerPosition;
-    uint32_t newPosition = mNewPosition;
-    uint32_t user = cblk->user;
-    // determine whether a marker callback will be needed, while locked
-    bool needMarker = !mMarkerReached && (mMarkerPosition > 0) && (user >= mMarkerPosition);
-    if (needMarker) {
-        mMarkerReached = true;
+    if (mAwaitBoost) {
+        mAwaitBoost = false;
+        mLock.unlock();
+        static const int32_t kMaxTries = 5;
+        int32_t tryCounter = kMaxTries;
+        uint32_t pollUs = 10000;
+        do {
+            int policy = sched_getscheduler(0);
+            if (policy == SCHED_FIFO || policy == SCHED_RR) {
+                break;
+            }
+            usleep(pollUs);
+            pollUs <<= 1;
+        } while (tryCounter-- > 0);
+        if (tryCounter < 0) {
+            ALOGE("did not receive expected priority boost on time");
+        }
+        // Run again immediately
+        return 0;
     }
-    // determine the number of new position callback(s) that will be needed, while locked
+
+    // Can only reference mCblk while locked
+    int32_t flags = android_atomic_and(~CBLK_OVERRUN, &mCblk->mFlags);
+
+    // Check for track invalidation
+    if (flags & CBLK_INVALID) {
+        (void) restoreRecord_l("processAudioBuffer");
+        mLock.unlock();
+        // Run again immediately, but with a new IAudioRecord
+        return 0;
+    }
+
+    bool active = mActive;
+
+    // Manage overrun callback, must be done under lock to avoid race with releaseBuffer()
+    bool newOverrun = false;
+    if (flags & CBLK_OVERRUN) {
+        if (!mInOverrun) {
+            mInOverrun = true;
+            newOverrun = true;
+        }
+    }
+
+    // Get current position of server
+    size_t position = mProxy->getPosition();
+
+    // Manage marker callback
+    bool markerReached = false;
+    size_t markerPosition = mMarkerPosition;
+    // FIXME fails for wraparound, need 64 bits
+    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+        mMarkerReached = markerReached = true;
+    }
+
+    // Determine the number of new position callback(s) that will be needed, while locked
+    size_t newPosCount = 0;
+    size_t newPosition = mNewPosition;
     uint32_t updatePeriod = mUpdatePeriod;
-    uint32_t needNewPos = updatePeriod > 0 && user >= newPosition ?
-            ((user - newPosition) / updatePeriod) + 1 : 0;
-    mNewPosition = newPosition + updatePeriod * needNewPos;
+    // FIXME fails for wraparound, need 64 bits
+    if (updatePeriod > 0 && position >= newPosition) {
+        newPosCount = ((position - newPosition) / updatePeriod) + 1;
+        mNewPosition += updatePeriod * newPosCount;
+    }
+
+    // Cache other fields that will be needed soon
+    size_t notificationFrames = mNotificationFramesAct;
+    if (mRefreshRemaining) {
+        mRefreshRemaining = false;
+        mRemainingFrames = notificationFrames;
+        mRetryOnPartialBuffer = false;
+    }
+    size_t misalignment = mProxy->getMisalignment();
+    int32_t sequence = mSequence;
+
+    // These fields don't need to be cached, because they are assigned only by set():
+    //      mTransfer, mCbf, mUserData, mSampleRate
+
     mLock.unlock();
 
-    // perform marker callback, while unlocked
-    if (needMarker) {
+    // perform callbacks while unlocked
+    if (newOverrun) {
+        mCbf(EVENT_OVERRUN, mUserData, NULL);
+    }
+    if (markerReached) {
         mCbf(EVENT_MARKER, mUserData, &markerPosition);
     }
-
-    // perform new position callback(s), while unlocked
-    for (; needNewPos > 0; --needNewPos) {
-        uint32_t temp = newPosition;
+    while (newPosCount > 0) {
+        size_t temp = newPosition;
         mCbf(EVENT_NEW_POS, mUserData, &temp);
         newPosition += updatePeriod;
+        newPosCount--;
+    }
+    if (mObservedSequence != sequence) {
+        mObservedSequence = sequence;
+        mCbf(EVENT_NEW_IAUDIORECORD, mUserData, NULL);
     }
 
-    do {
-        audioBuffer.frameCount = frames;
-        // Calling obtainBuffer() with a wait count of 1
-        // limits wait time to WAIT_PERIOD_MS. This prevents from being
-        // stuck here not being able to handle timed events (position, markers).
-        status_t err = obtainBuffer(&audioBuffer, 1);
-        if (err < NO_ERROR) {
-            if (err != TIMED_OUT) {
-                ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
-                        "Error obtaining an audio buffer, giving up.");
-                return false;
+    // if inactive, then don't run me again until re-started
+    if (!active) {
+        return NS_INACTIVE;
+    }
+
+    // Compute the estimated time until the next timed event (position, markers)
+    uint32_t minFrames = ~0;
+    if (!markerReached && position < markerPosition) {
+        minFrames = markerPosition - position;
+    }
+    if (updatePeriod > 0 && updatePeriod < minFrames) {
+        minFrames = updatePeriod;
+    }
+
+    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
+    static const uint32_t kPoll = 0;
+    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
+        minFrames = kPoll * notificationFrames;
+    }
+
+    // Convert frame units to time units
+    nsecs_t ns = NS_WHENEVER;
+    if (minFrames != (uint32_t) ~0) {
+        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
+        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
+        ns = ((minFrames * 1000000000LL) / mSampleRate) + kFudgeNs;
+    }
+
+    // If not supplying data by EVENT_MORE_DATA, then we're done
+    if (mTransfer != TRANSFER_CALLBACK) {
+        return ns;
+    }
+
+    struct timespec timeout;
+    const struct timespec *requested = &ClientProxy::kForever;
+    if (ns != NS_WHENEVER) {
+        timeout.tv_sec = ns / 1000000000LL;
+        timeout.tv_nsec = ns % 1000000000LL;
+        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
+        requested = &timeout;
+    }
+
+    while (mRemainingFrames > 0) {
+
+        Buffer audioBuffer;
+        audioBuffer.frameCount = mRemainingFrames;
+        size_t nonContig;
+        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
+        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
+                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
+        requested = &ClientProxy::kNonBlocking;
+        size_t avail = audioBuffer.frameCount + nonContig;
+        ALOGV("obtainBuffer(%u) returned %u = %u + %u",
+                mRemainingFrames, avail, audioBuffer.frameCount, nonContig);
+        if (err != NO_ERROR) {
+            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
+                break;
             }
-            break;
+            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
+            return NS_NEVER;
         }
-        if (err == status_t(STOPPED)) return false;
+
+        if (mRetryOnPartialBuffer) {
+            mRetryOnPartialBuffer = false;
+            if (avail < mRemainingFrames) {
+                int64_t myns = ((mRemainingFrames - avail) *
+                        1100000000LL) / mSampleRate;
+                if (ns < 0 || myns < ns) {
+                    ns = myns;
+                }
+                return ns;
+            }
+        }
 
         size_t reqSize = audioBuffer.size;
         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
-        readSize = audioBuffer.size;
+        size_t readSize = audioBuffer.size;
 
         // Sanity check on returned size
-        if (ssize_t(readSize) <= 0) {
-            // The callback is done filling buffers
-            // Keep this thread going to handle timed events and
-            // still try to get more data in intervals of WAIT_PERIOD_MS
-            // but don't just loop and block the CPU, so wait
-            usleep(WAIT_PERIOD_MS*1000);
-            break;
+        if (ssize_t(readSize) < 0 || readSize > reqSize) {
+            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
+                    reqSize, (int) readSize);
+            return NS_NEVER;
         }
-        if (readSize > reqSize) readSize = reqSize;
 
-        audioBuffer.size = readSize;
-        audioBuffer.frameCount = readSize/frameSize();
-        frames -= audioBuffer.frameCount;
+        if (readSize == 0) {
+            // The callback is done consuming buffers
+            // Keep this thread going to handle timed events and
+            // still try to provide more data in intervals of WAIT_PERIOD_MS
+            // but don't just loop and block the CPU, so wait
+            return WAIT_PERIOD_MS * 1000000LL;
+        }
+
+        size_t releasedFrames = readSize / mFrameSize;
+        audioBuffer.frameCount = releasedFrames;
+        mRemainingFrames -= releasedFrames;
+        if (misalignment >= releasedFrames) {
+            misalignment -= releasedFrames;
+        } else {
+            misalignment = 0;
+        }
 
         releaseBuffer(&audioBuffer);
 
-    } while (frames);
-
-
-    // Manage overrun callback
-    if (active && (mProxy->framesAvailable() == 0)) {
-        // The value of active is stale, but we are almost sure to be active here because
-        // otherwise we would have exited when obtainBuffer returned STOPPED earlier.
-        ALOGV("Overrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags);
-        if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) {
-            mCbf(EVENT_OVERRUN, mUserData, NULL);
+        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
+        // if callback doesn't like to accept the full chunk
+        if (readSize < reqSize) {
+            continue;
         }
-    }
 
-    if (frames == 0) {
-        mRemainingFrames = mNotificationFrames;
-    } else {
-        mRemainingFrames = frames;
+        // There could be enough non-contiguous frames available to satisfy the remaining request
+        if (mRemainingFrames <= nonContig) {
+            continue;
+        }
+
+#if 0
+        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
+        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
+        // that total to a sum == notificationFrames.
+        if (0 < misalignment && misalignment <= mRemainingFrames) {
+            mRemainingFrames = misalignment;
+            return (mRemainingFrames * 1100000000LL) / mSampleRate;
+        }
+#endif
+
     }
-    return true;
+    mRemainingFrames = notificationFrames;
+    mRetryOnPartialBuffer = true;
+
+    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
+    return 0;
 }
 
-// must be called with mLock and cblk.lock held. Callers must also hold strong references on
-// the IAudioRecord and IMemory in case they are recreated here.
-// If the IAudioRecord is successfully restored, the cblk pointer is updated
-status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& refCblk)
+status_t AudioRecord::restoreRecord_l(const char *from)
 {
+    ALOGW("dead IAudioRecord, creating a new one from %s()", from);
+    ++mSequence;
     status_t result;
 
-    audio_track_cblk_t* cblk = refCblk;
-    audio_track_cblk_t* newCblk = cblk;
-    ALOGW("dead IAudioRecord, creating a new one");
-
-    // signal old cblk condition so that other threads waiting for available buffers stop
-    // waiting now
-    cblk->cv.broadcast();
-    cblk->lock.unlock();
-
     // if the new IAudioRecord is created, openRecord_l() will modify the
     // following member variables: mAudioRecord, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioRecord and IMemory
-    result = openRecord_l(mSampleRate, mFormat, mFrameCount, getInput_l());
+    size_t position = mProxy->getPosition();
+    mNewPosition = position + mUpdatePeriod;
+    result = openRecord_l(position);
     if (result == NO_ERROR) {
-        newCblk = mCblk;
-        // callback thread or sync event hasn't changed
-        result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
+        if (mActive) {
+            // callback thread or sync event hasn't changed
+            // FIXME this fails if we have a new AudioFlinger instance
+            result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
+        }
     }
     if (result != NO_ERROR) {
+        ALOGW("restoreRecord_l() failed status %d", result);
         mActive = false;
     }
 
-    ALOGV("restoreRecord_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
-        result, mActive, newCblk, cblk, newCblk->flags, cblk->flags);
-
-    if (result == NO_ERROR) {
-        // from now on we switch to the newly created cblk
-        refCblk = newCblk;
-    }
-    newCblk->lock.lock();
-
-    ALOGW_IF(result != NO_ERROR, "restoreRecord_l() error %d", result);
-
     return result;
 }
 
 // =========================================================================
 
+void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who)
+{
+    sp<AudioRecord> audioRecord = mAudioRecord.promote();
+    if (audioRecord != 0) {
+        AutoMutex lock(audioRecord->mLock);
+        audioRecord->mProxy->binderDied();
+    }
+}
+
+// =========================================================================
+
 AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true)
+    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
 {
 }
 
@@ -842,10 +978,26 @@
             return true;
         }
     }
-    if (!mReceiver.processAudioBuffer(this)) {
-        pause();
+    nsecs_t ns =  mReceiver.processAudioBuffer(this);
+    switch (ns) {
+    case 0:
+        return true;
+    case NS_WHENEVER:
+        sleep(1);
+        return true;
+    case NS_INACTIVE:
+        pauseConditional();
+        return true;
+    case NS_NEVER:
+        return false;
+    default:
+        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
+        struct timespec req;
+        req.tv_sec = ns / 1000000000LL;
+        req.tv_nsec = ns % 1000000000LL;
+        nanosleep(&req, NULL /*rem*/);
+        return true;
     }
-    return true;
 }
 
 void AudioRecord::AudioRecordThread::requestExit()
@@ -859,6 +1011,17 @@
 {
     AutoMutex _l(mMyLock);
     mPaused = true;
+    mResumeLatch = false;
+}
+
+void AudioRecord::AudioRecordThread::pauseConditional()
+{
+    AutoMutex _l(mMyLock);
+    if (mResumeLatch) {
+        mResumeLatch = false;
+    } else {
+        mPaused = true;
+    }
 }
 
 void AudioRecord::AudioRecordThread::resume()
@@ -866,7 +1029,10 @@
     AutoMutex _l(mMyLock);
     if (mPaused) {
         mPaused = false;
+        mResumeLatch = false;
         mMyCond.signal();
+    } else {
+        mResumeLatch = true;
     }
 }
 
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 693df60..a571fe4 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -20,6 +20,7 @@
 #include <utils/Log.h>
 #include <binder/IServiceManager.h>
 #include <media/AudioSystem.h>
+#include <media/IAudioFlinger.h>
 #include <media/IAudioPolicyService.h>
 #include <math.h>
 
@@ -361,8 +362,8 @@
     return af->setVoiceVolume(value);
 }
 
-status_t AudioSystem::getRenderPosition(size_t *halFrames, size_t *dspFrames,
-        audio_stream_type_t stream)
+status_t AudioSystem::getRenderPosition(audio_io_handle_t output, size_t *halFrames,
+                                        size_t *dspFrames, audio_stream_type_t stream)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
@@ -371,7 +372,11 @@
         stream = AUDIO_STREAM_MUSIC;
     }
 
-    return af->getRenderPosition(halFrames, dspFrames, getOutput(stream));
+    if (output == 0) {
+        output = getOutput(stream);
+    }
+
+    return af->getRenderPosition(halFrames, dspFrames, output);
 }
 
 size_t AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
@@ -442,14 +447,14 @@
 
         OutputDescriptor *outputDesc =  new OutputDescriptor(*desc);
         gOutputs.add(ioHandle, outputDesc);
-        ALOGV("ioConfigChanged() new output samplingRate %u, format %d channels %#x frameCount %u "
+        ALOGV("ioConfigChanged() new output samplingRate %u, format %d channel mask %#x frameCount %u "
                 "latency %d",
-                outputDesc->samplingRate, outputDesc->format, outputDesc->channels,
+                outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask,
                 outputDesc->frameCount, outputDesc->latency);
         } break;
     case OUTPUT_CLOSED: {
         if (gOutputs.indexOfKey(ioHandle) < 0) {
-            ALOGW("ioConfigChanged() closing unknow output! %d", ioHandle);
+            ALOGW("ioConfigChanged() closing unknown output! %d", ioHandle);
             break;
         }
         ALOGV("ioConfigChanged() output %d closed", ioHandle);
@@ -460,16 +465,16 @@
     case OUTPUT_CONFIG_CHANGED: {
         int index = gOutputs.indexOfKey(ioHandle);
         if (index < 0) {
-            ALOGW("ioConfigChanged() modifying unknow output! %d", ioHandle);
+            ALOGW("ioConfigChanged() modifying unknown output! %d", ioHandle);
             break;
         }
         if (param2 == NULL) break;
         desc = (const OutputDescriptor *)param2;
 
-        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %d channels %#x "
+        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %d channel mask %#x "
                 "frameCount %d latency %d",
                 ioHandle, desc->samplingRate, desc->format,
-                desc->channels, desc->frameCount, desc->latency);
+                desc->channelMask, desc->frameCount, desc->latency);
         OutputDescriptor *outputDesc = gOutputs.valueAt(index);
         delete outputDesc;
         outputDesc =  new OutputDescriptor(*desc);
@@ -532,6 +537,8 @@
     return gAudioPolicyService;
 }
 
+// ---------------------------------------------------------------------------
+
 status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
                                                audio_policy_dev_state_t state,
                                                const char *device_address)
@@ -585,11 +592,12 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags)
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return 0;
-    return aps->getOutput(stream, samplingRate, format, channelMask, flags);
+    return aps->getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo);
 }
 
 status_t AudioSystem::startOutput(audio_io_handle_t output,
@@ -764,6 +772,13 @@
     return af->getPrimaryOutputFrameCount();
 }
 
+status_t AudioSystem::setLowRamDevice(bool isLowRamDevice)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return PERMISSION_DENIED;
+    return af->setLowRamDevice(isLowRamDevice);
+}
+
 void AudioSystem::clearAudioConfigCache()
 {
     Mutex::Autolock _l(gLock);
@@ -771,6 +786,14 @@
     gOutputs.clear();
 }
 
+bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info)
+{
+    ALOGV("isOffloadSupported()");
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return false;
+    return aps->isOffloadSupported(info);
+}
+
 // ---------------------------------------------------------------------------
 
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 7eeb4f8..dd0ec73 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -19,31 +19,17 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioTrack"
 
-#include <stdint.h>
-#include <sys/types.h>
-#include <limits.h>
-
-#include <sched.h>
 #include <sys/resource.h>
-
-#include <private/media/AudioTrackShared.h>
-
-#include <media/AudioSystem.h>
-#include <media/AudioTrack.h>
-
-#include <utils/Log.h>
-#include <binder/Parcel.h>
-#include <binder/IPCThreadState.h>
-#include <utils/Timers.h>
-#include <utils/Atomic.h>
-
-#include <cutils/bitops.h>
-#include <cutils/compiler.h>
-
-#include <system/audio.h>
-#include <system/audio_policy.h>
-
 #include <audio_utils/primitives.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+#include <utils/Log.h>
+#include <private/media/AudioTrackShared.h>
+#include <media/IAudioFlinger.h>
+
+#define WAIT_PERIOD_MS                  10
+#define WAIT_STREAM_END_TIMEOUT_SEC     120
+
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -82,7 +68,9 @@
 
     // Ensure that buffer depth covers at least audio hardware latency
     uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
-    if (minBufCount < 2) minBufCount = 2;
+    if (minBufCount < 2) {
+        minBufCount = 2;
+    }
 
     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
             afFrameCount * minBufCount * sampleRate / afSampleRate;
@@ -97,8 +85,7 @@
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousSchedulingGroup(SP_DEFAULT)
 {
 }
 
@@ -112,16 +99,17 @@
         callback_t cbf,
         void* user,
         int notificationFrames,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousSchedulingGroup(SP_DEFAULT)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
-            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId);
+            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
 }
 
 AudioTrack::AudioTrack(
@@ -134,42 +122,39 @@
         callback_t cbf,
         void* user,
         int notificationFrames,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousSchedulingGroup(SP_DEFAULT)
 {
-    if (sharedBuffer == 0) {
-        ALOGE("sharedBuffer must be non-0");
-        mStatus = BAD_VALUE;
-        return;
-    }
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
-            sharedBuffer, false /*threadCanCallJava*/, sessionId);
+            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
 }
 
 AudioTrack::~AudioTrack()
 {
-    ALOGV_IF(mSharedBuffer != 0, "Destructor sharedBuffer: %p", mSharedBuffer->pointer());
-
     if (mStatus == NO_ERROR) {
         // Make sure that callback function exits in the case where
         // it is looping on buffer full condition in obtainBuffer().
         // Otherwise the callback thread will never exit.
         stop();
         if (mAudioTrackThread != 0) {
+            mProxy->interrupt();
             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
-        mAudioTrack.clear();
+        if (mAudioTrack != 0) {
+            mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
+            mAudioTrack.clear();
+        }
         IPCThreadState::self()->flushCommands();
         AudioSystem::releaseAudioSessionId(mSessionId);
     }
-    delete mProxy;
 }
 
 status_t AudioTrack::set(
@@ -184,8 +169,45 @@
         int notificationFrames,
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType,
+        const audio_offload_info_t *offloadInfo)
 {
+    switch (transferType) {
+    case TRANSFER_DEFAULT:
+        if (sharedBuffer != 0) {
+            transferType = TRANSFER_SHARED;
+        } else if (cbf == NULL || threadCanCallJava) {
+            transferType = TRANSFER_SYNC;
+        } else {
+            transferType = TRANSFER_CALLBACK;
+        }
+        break;
+    case TRANSFER_CALLBACK:
+        if (cbf == NULL || sharedBuffer != 0) {
+            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
+            return BAD_VALUE;
+        }
+        break;
+    case TRANSFER_OBTAIN:
+    case TRANSFER_SYNC:
+        if (sharedBuffer != 0) {
+            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
+            return BAD_VALUE;
+        }
+        break;
+    case TRANSFER_SHARED:
+        if (sharedBuffer == 0) {
+            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
+            return BAD_VALUE;
+        }
+        break;
+    default:
+        ALOGE("Invalid transfer type %d", transferType);
+        return BAD_VALUE;
+    }
+    mTransfer = transferType;
+
     // FIXME "int" here is legacy and will be replaced by size_t later
     if (frameCountInt < 0) {
         ALOGE("Invalid frame count %d", frameCountInt);
@@ -199,11 +221,14 @@
     ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
 
     AutoMutex lock(mLock);
+
     if (mAudioTrack != 0) {
         ALOGE("Track already in use");
         return INVALID_OPERATION;
     }
 
+    mOutput = 0;
+
     // handle default values first.
     if (streamType == AUDIO_STREAM_DEFAULT) {
         streamType = AUDIO_STREAM_MUSIC;
@@ -228,7 +253,7 @@
 
     // validate parameters
     if (!audio_is_valid_format(format)) {
-        ALOGE("Invalid format");
+        ALOGE("Invalid format %d", format);
         return BAD_VALUE;
     }
 
@@ -239,7 +264,12 @@
     }
 
     // force direct flag if format is not linear PCM
-    if (!audio_is_linear_pcm(format)) {
+    // or offload was requested
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+            || !audio_is_linear_pcm(format)) {
+        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+                    ? "Offload request, forcing to Direct Output"
+                    : "Not linear PCM, forcing to Direct Output");
         flags = (audio_output_flags_t)
                 // FIXME why can't we allow direct AND fast?
                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
@@ -268,7 +298,8 @@
     audio_io_handle_t output = AudioSystem::getOutput(
                                     streamType,
                                     sampleRate, format, channelMask,
-                                    flags);
+                                    flags,
+                                    offloadInfo);
 
     if (output == 0) {
         ALOGE("Could not get audio output for stream type %d", streamType);
@@ -281,6 +312,7 @@
     mFrameCount = frameCount;
     mReqFrameCount = frameCount;
     mNotificationFramesReq = notificationFrames;
+    mNotificationFramesAct = 0;
     mSessionId = sessionId;
     mAuxEffectId = 0;
     mFlags = flags;
@@ -298,178 +330,198 @@
                                   frameCount,
                                   flags,
                                   sharedBuffer,
-                                  output);
+                                  output,
+                                  0 /*epoch*/);
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
-            mAudioTrackThread->requestExit();
+            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
+            mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
+        //Use of direct and offloaded output streams is ref counted by audio policy manager.
+        // As getOutput was called above and resulted in an output stream to be opened,
+        // we need to release it.
+        AudioSystem::releaseOutput(output);
         return status;
     }
 
     mStatus = NO_ERROR;
-
     mStreamType = streamType;
     mFormat = format;
-
     mSharedBuffer = sharedBuffer;
-    mActive = false;
+    mState = STATE_STOPPED;
     mUserData = user;
-    mLoopCount = 0;
+    mLoopPeriod = 0;
     mMarkerPosition = 0;
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
-    mFlushed = false;
     AudioSystem::acquireAudioSessionId(mSessionId);
+    mSequence = 1;
+    mObservedSequence = mSequence;
+    mInUnderrun = false;
+    mOutput = output;
+
     return NO_ERROR;
 }
 
 // -------------------------------------------------------------------------
 
-void AudioTrack::start()
+status_t AudioTrack::start()
 {
-    sp<AudioTrackThread> t = mAudioTrackThread;
-
-    ALOGV("start %p", this);
-
     AutoMutex lock(mLock);
-    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioTrack> audioTrack = mAudioTrack;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
 
-    if (!mActive) {
-        mFlushed = false;
-        mActive = true;
-        mNewPosition = cblk->server + mUpdatePeriod;
-        cblk->lock.lock();
-        cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
-        cblk->waitTimeMs = 0;
-        android_atomic_and(~CBLK_DISABLED, &cblk->flags);
-        if (t != 0) {
-            t->resume();
-        } else {
-            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
-            get_sched_policy(0, &mPreviousSchedulingGroup);
-            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
-        }
-
-        ALOGV("start %p before lock cblk %p", this, cblk);
-        status_t status = NO_ERROR;
-        if (!(cblk->flags & CBLK_INVALID)) {
-            cblk->lock.unlock();
-            ALOGV("mAudioTrack->start()");
-            status = mAudioTrack->start();
-            cblk->lock.lock();
-            if (status == DEAD_OBJECT) {
-                android_atomic_or(CBLK_INVALID, &cblk->flags);
-            }
-        }
-        if (cblk->flags & CBLK_INVALID) {
-            audio_track_cblk_t* temp = cblk;
-            status = restoreTrack_l(temp, true /*fromStart*/);
-            cblk = temp;
-        }
-        cblk->lock.unlock();
-        if (status != NO_ERROR) {
-            ALOGV("start() failed");
-            mActive = false;
-            if (t != 0) {
-                t->pause();
-            } else {
-                setpriority(PRIO_PROCESS, 0, mPreviousPriority);
-                set_sched_policy(0, mPreviousSchedulingGroup);
-            }
-        }
+    if (mState == STATE_ACTIVE) {
+        return INVALID_OPERATION;
     }
 
-}
+    mInUnderrun = true;
 
-void AudioTrack::stop()
-{
+    State previousState = mState;
+    if (previousState == STATE_PAUSED_STOPPING) {
+        mState = STATE_STOPPING;
+    } else {
+        mState = STATE_ACTIVE;
+    }
+    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
+        // reset current position as seen by client to 0
+        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+    }
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+
     sp<AudioTrackThread> t = mAudioTrackThread;
-
-    ALOGV("stop %p", this);
-
-    AutoMutex lock(mLock);
-    if (mActive) {
-        mActive = false;
-        mCblk->cv.signal();
-        mAudioTrack->stop();
-        // Cancel loops (If we are in the middle of a loop, playback
-        // would not stop until loopCount reaches 0).
-        setLoop_l(0, 0, 0);
-        // the playback head position will reset to 0, so if a marker is set, we need
-        // to activate it again
-        mMarkerReached = false;
-        // Force flush if a shared buffer is used otherwise audioflinger
-        // will not stop before end of buffer is reached.
-        // It may be needed to make sure that we stop playback, likely in case looping is on.
-        if (mSharedBuffer != 0) {
-            flush_l();
+    if (t != 0) {
+        if (previousState == STATE_STOPPING) {
+            mProxy->interrupt();
+        } else {
+            t->resume();
         }
+    } else {
+        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+        get_sched_policy(0, &mPreviousSchedulingGroup);
+        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+    }
+
+    status_t status = NO_ERROR;
+    if (!(flags & CBLK_INVALID)) {
+        status = mAudioTrack->start();
+        if (status == DEAD_OBJECT) {
+            flags |= CBLK_INVALID;
+        }
+    }
+    if (flags & CBLK_INVALID) {
+        status = restoreTrack_l("start");
+    }
+
+    if (status != NO_ERROR) {
+        ALOGE("start() status %d", status);
+        mState = previousState;
         if (t != 0) {
-            t->pause();
+            if (previousState != STATE_STOPPING) {
+                t->pause();
+            }
         } else {
             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
             set_sched_policy(0, mPreviousSchedulingGroup);
         }
     }
 
+    return status;
+}
+
+void AudioTrack::stop()
+{
+    AutoMutex lock(mLock);
+    // FIXME pause then stop should not be a nop
+    if (mState != STATE_ACTIVE) {
+        return;
+    }
+
+    if (isOffloaded()) {
+        mState = STATE_STOPPING;
+    } else {
+        mState = STATE_STOPPED;
+    }
+
+    mProxy->interrupt();
+    mAudioTrack->stop();
+    // the playback head position will reset to 0, so if a marker is set, we need
+    // to activate it again
+    mMarkerReached = false;
+#if 0
+    // Force flush if a shared buffer is used otherwise audioflinger
+    // will not stop before end of buffer is reached.
+    // It may be needed to make sure that we stop playback, likely in case looping is on.
+    if (mSharedBuffer != 0) {
+        flush_l();
+    }
+#endif
+
+    sp<AudioTrackThread> t = mAudioTrackThread;
+    if (t != 0) {
+        if (!isOffloaded()) {
+            t->pause();
+        }
+    } else {
+        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+        set_sched_policy(0, mPreviousSchedulingGroup);
+    }
 }
 
 bool AudioTrack::stopped() const
 {
     AutoMutex lock(mLock);
-    return stopped_l();
+    return mState != STATE_ACTIVE;
 }
 
 void AudioTrack::flush()
 {
-    AutoMutex lock(mLock);
-    if (!mActive && mSharedBuffer == 0) {
-        flush_l();
+    if (mSharedBuffer != 0) {
+        return;
     }
+    AutoMutex lock(mLock);
+    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
+        return;
+    }
+    flush_l();
 }
 
 void AudioTrack::flush_l()
 {
-    ALOGV("flush");
-    ALOG_ASSERT(!mActive);
+    ALOG_ASSERT(mState != STATE_ACTIVE);
 
     // clear playback marker and periodic update counter
     mMarkerPosition = 0;
     mMarkerReached = false;
     mUpdatePeriod = 0;
+    mRefreshRemaining = true;
 
-    mFlushed = true;
+    mState = STATE_FLUSHED;
+    if (isOffloaded()) {
+        mProxy->interrupt();
+    }
+    mProxy->flush();
     mAudioTrack->flush();
-    // Release AudioTrack callback thread in case it was waiting for new buffers
-    // in AudioTrack::obtainBuffer()
-    mCblk->cv.signal();
 }
 
 void AudioTrack::pause()
 {
-    ALOGV("pause");
     AutoMutex lock(mLock);
-    if (mActive) {
-        mActive = false;
-        mCblk->cv.signal();
-        mAudioTrack->pause();
+    if (mState == STATE_ACTIVE) {
+        mState = STATE_PAUSED;
+    } else if (mState == STATE_STOPPING) {
+        mState = STATE_PAUSED_STOPPING;
+    } else {
+        return;
     }
+    mProxy->interrupt();
+    mAudioTrack->pause();
 }
 
 status_t AudioTrack::setVolume(float left, float right)
 {
-    if (mStatus != NO_ERROR) {
-        return mStatus;
-    }
-    ALOG_ASSERT(mProxy != NULL);
-
     if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
         return BAD_VALUE;
     }
@@ -490,18 +542,11 @@
 
 status_t AudioTrack::setAuxEffectSendLevel(float level)
 {
-    ALOGV("setAuxEffectSendLevel(%f)", level);
-
-    if (mStatus != NO_ERROR) {
-        return mStatus;
-    }
-    ALOG_ASSERT(mProxy != NULL);
-
     if (level < 0.0f || level > 1.0f) {
         return BAD_VALUE;
     }
-    AutoMutex lock(mLock);
 
+    AutoMutex lock(mLock);
     mSendLevel = level;
     mProxy->setSendLevel(level);
 
@@ -511,18 +556,17 @@
 void AudioTrack::getAuxEffectSendLevel(float* level) const
 {
     if (level != NULL) {
-        *level  = mSendLevel;
+        *level = mSendLevel;
     }
 }
 
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
-    uint32_t afSamplingRate;
-
-    if (mIsTimed) {
+    if (mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
+    uint32_t afSamplingRate;
     if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
         return NO_INIT;
     }
@@ -550,58 +594,45 @@
 
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
-    AutoMutex lock(mLock);
-    return setLoop_l(loopStart, loopEnd, loopCount);
-}
-
-// must be called with mLock held
-status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
-{
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
-    audio_track_cblk_t* cblk = mCblk;
-
-    Mutex::Autolock _l(cblk->lock);
-
     if (loopCount == 0) {
-        cblk->loopStart = UINT_MAX;
-        cblk->loopEnd = UINT_MAX;
-        cblk->loopCount = 0;
-        mLoopCount = 0;
-        return NO_ERROR;
-    }
-
-    if (loopStart >= loopEnd ||
-        loopEnd - loopStart > mFrameCount ||
-        cblk->server > loopStart) {
-        ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, "
-              "user %d", loopStart, loopEnd, loopCount, mFrameCount, cblk->user);
+        ;
+    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
+            loopEnd - loopStart >= MIN_LOOP) {
+        ;
+    } else {
         return BAD_VALUE;
     }
 
-    if ((mSharedBuffer != 0) && (loopEnd > mFrameCount)) {
-        ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, "
-            "framecount %d",
-            loopStart, loopEnd, mFrameCount);
-        return BAD_VALUE;
+    AutoMutex lock(mLock);
+    // See setPosition() regarding setting parameters such as loop points or position while active
+    if (mState == STATE_ACTIVE) {
+        return INVALID_OPERATION;
     }
-
-    cblk->loopStart = loopStart;
-    cblk->loopEnd = loopEnd;
-    cblk->loopCount = loopCount;
-    mLoopCount = loopCount;
-
+    setLoop_l(loopStart, loopEnd, loopCount);
     return NO_ERROR;
 }
 
+void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
+{
+    // FIXME If setting a loop also sets position to start of loop, then
+    //       this is correct.  Otherwise it should be removed.
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
+    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
+}
+
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
-    if (mCbf == NULL) {
+    // The only purpose of setting marker position is to get a callback
+    if (mCbf == NULL || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
+    AutoMutex lock(mLock);
     mMarkerPosition = marker;
     mMarkerReached = false;
 
@@ -610,10 +641,14 @@
 
 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
 {
+    if (isOffloaded()) {
+        return INVALID_OPERATION;
+    }
     if (marker == NULL) {
         return BAD_VALUE;
     }
 
+    AutoMutex lock(mLock);
     *marker = mMarkerPosition;
 
     return NO_ERROR;
@@ -621,24 +656,27 @@
 
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
-    if (mCbf == NULL) {
+    // The only purpose of setting position update period is to get a callback
+    if (mCbf == NULL || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
-    uint32_t curPosition;
-    getPosition(&curPosition);
-    mNewPosition = curPosition + updatePeriod;
+    AutoMutex lock(mLock);
+    mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
-
     return NO_ERROR;
 }
 
 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
+    if (isOffloaded()) {
+        return INVALID_OPERATION;
+    }
     if (updatePeriod == NULL) {
         return BAD_VALUE;
     }
 
+    AutoMutex lock(mLock);
     *updatePeriod = mUpdatePeriod;
 
     return NO_ERROR;
@@ -646,80 +684,108 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
-
-    AutoMutex lock(mLock);
-
-    if (!stopped_l()) {
-        return INVALID_OPERATION;
-    }
-
-    audio_track_cblk_t* cblk = mCblk;
-    Mutex::Autolock _l(cblk->lock);
-
-    if (position > cblk->user) {
+    if (position > mFrameCount) {
         return BAD_VALUE;
     }
 
-    cblk->server = position;
-    android_atomic_or(CBLK_FORCEREADY, &cblk->flags);
+    AutoMutex lock(mLock);
+    // Currently we require that the player is inactive before setting parameters such as position
+    // or loop points.  Otherwise, there could be a race condition: the application could read the
+    // current position, compute a new position or loop parameters, and then set that position or
+    // loop parameters but it would do the "wrong" thing since the position has continued to advance
+    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
+    // to specify how it wants to handle such scenarios.
+    if (mState == STATE_ACTIVE) {
+        return INVALID_OPERATION;
+    }
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mLoopPeriod = 0;
+    // FIXME Check whether loops and setting position are incompatible in old code.
+    // If we use setLoop for both purposes we lose the capability to set the position while looping.
+    mStaticProxy->setLoop(position, mFrameCount, 0);
 
     return NO_ERROR;
 }
 
-status_t AudioTrack::getPosition(uint32_t *position)
+status_t AudioTrack::getPosition(uint32_t *position) const
 {
     if (position == NULL) {
         return BAD_VALUE;
     }
-    AutoMutex lock(mLock);
-    *position = mFlushed ? 0 : mCblk->server;
 
+    AutoMutex lock(mLock);
+    if (isOffloaded()) {
+        uint32_t dspFrames = 0;
+
+        if (mOutput != 0) {
+            uint32_t halFrames;
+            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
+        }
+        *position = dspFrames;
+    } else {
+        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
+        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
+                mProxy->getPosition();
+    }
+    return NO_ERROR;
+}
+
+status_t AudioTrack::getBufferPosition(size_t *position)
+{
+    if (mSharedBuffer == 0 || mIsTimed) {
+        return INVALID_OPERATION;
+    }
+    if (position == NULL) {
+        return BAD_VALUE;
+    }
+
+    AutoMutex lock(mLock);
+    *position = mStaticProxy->getBufferPosition();
     return NO_ERROR;
 }
 
 status_t AudioTrack::reload()
 {
-    if (mStatus != NO_ERROR) {
-        return mStatus;
-    }
-    ALOG_ASSERT(mProxy != NULL);
-
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
     AutoMutex lock(mLock);
-
-    if (!stopped_l()) {
+    // See setPosition() regarding setting parameters such as loop points or position while active
+    if (mState == STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
-
-    flush_l();
-
-    (void) mProxy->stepUser(mFrameCount);
-
+    mNewPosition = mUpdatePeriod;
+    mLoopPeriod = 0;
+    // FIXME The new code cannot reload while keeping a loop specified.
+    // Need to check how the old code handled this, and whether it's a significant change.
+    mStaticProxy->setLoop(0, mFrameCount, 0);
     return NO_ERROR;
 }
 
 audio_io_handle_t AudioTrack::getOutput()
 {
     AutoMutex lock(mLock);
-    return getOutput_l();
+    return mOutput;
 }
 
 // must be called with mLock held
 audio_io_handle_t AudioTrack::getOutput_l()
 {
-    return AudioSystem::getOutput(mStreamType,
-            mSampleRate, mFormat, mChannelMask, mFlags);
+    if (mOutput) {
+        return mOutput;
+    } else {
+        return AudioSystem::getOutput(mStreamType,
+                                      mSampleRate, mFormat, mChannelMask, mFlags);
+    }
 }
 
 status_t AudioTrack::attachAuxEffect(int effectId)
 {
-    ALOGV("attachAuxEffect(%d)", effectId);
+    AutoMutex lock(mLock);
     status_t status = mAudioTrack->attachAuxEffect(effectId);
     if (status == NO_ERROR) {
         mAuxEffectId = effectId;
@@ -737,7 +803,8 @@
         size_t frameCount,
         audio_output_flags_t flags,
         const sp<IMemory>& sharedBuffer,
-        audio_io_handle_t output)
+        audio_io_handle_t output,
+        size_t epoch)
 {
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -747,7 +814,8 @@
     }
 
     uint32_t afLatency;
-    if (AudioSystem::getLatency(output, streamType, &afLatency) != NO_ERROR) {
+    if ((status = AudioSystem::getLatency(output, streamType, &afLatency)) != NO_ERROR) {
+        ALOGE("getLatency(%d) failed status %d", output, status);
         return NO_INIT;
     }
 
@@ -775,12 +843,17 @@
             frameCount = sharedBuffer->size();
         } else if (frameCount == 0) {
             size_t afFrameCount;
-            if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
+            status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
+            if (status != NO_ERROR) {
+                ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType,
+                        status);
                 return NO_INIT;
             }
             frameCount = afFrameCount;
         }
-
+        if (mNotificationFramesAct != frameCount) {
+            mNotificationFramesAct = frameCount;
+        }
     } else if (sharedBuffer != 0) {
 
         // Ensure that buffer alignment matches channel count
@@ -806,17 +879,26 @@
 
         // FIXME move these calculations and associated checks to server
         uint32_t afSampleRate;
-        if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) {
+        status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
+        if (status != NO_ERROR) {
+            ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType,
+                    status);
             return NO_INIT;
         }
         size_t afFrameCount;
-        if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
+        status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
+        if (status != NO_ERROR) {
+            ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
             return NO_INIT;
         }
 
         // Ensure that buffer depth covers at least audio hardware latency
         uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
-        if (minBufCount < 2) minBufCount = 2;
+        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
+                afFrameCount, minBufCount, afSampleRate, afLatency);
+        if (minBufCount <= 2) {
+            minBufCount = sampleRate == afSampleRate ? 2 : 3;
+        }
 
         size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
         ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
@@ -826,12 +908,9 @@
         if (frameCount == 0) {
             frameCount = minFrameCount;
         }
-        if (mNotificationFramesAct == 0) {
-            mNotificationFramesAct = frameCount/2;
-        }
         // Make sure that application is notified with sufficient margin
         // before underrun
-        if (mNotificationFramesAct > frameCount/2) {
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
             mNotificationFramesAct = frameCount/2;
         }
         if (frameCount < minFrameCount) {
@@ -858,6 +937,10 @@
         }
     }
 
+    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
+    }
+
     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                       sampleRate,
                                                       // AudioFlinger only sees 16-bit PCM
@@ -870,6 +953,7 @@
                                                       output,
                                                       tid,
                                                       &mSessionId,
+                                                      mName,
                                                       &status);
 
     if (track == 0) {
@@ -881,6 +965,10 @@
         ALOGE("Could not get control block");
         return NO_INIT;
     }
+    if (mAudioTrack != 0) {
+        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
+        mDeathNotifier.clear();
+    }
     mAudioTrack = track;
     mCblkMemory = iMem;
     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
@@ -898,26 +986,49 @@
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
             mAwaitBoost = true;
+            if (sharedBuffer == 0) {
+                // double-buffering is not required for fast tracks, due to tighter scheduling
+                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount) {
+                    mNotificationFramesAct = frameCount;
+                }
+            }
         } else {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
             // once denied, do not request again if IAudioTrack is re-created
             flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
             mFlags = flags;
-        }
-        if (sharedBuffer == 0) {
-            mNotificationFramesAct = frameCount/2;
+            if (sharedBuffer == 0) {
+                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
+                    mNotificationFramesAct = frameCount/2;
+                }
+            }
         }
     }
+    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
+            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
+        } else {
+            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
+            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+            mFlags = flags;
+            return NO_INIT;
+        }
+    }
+
+    mRefreshRemaining = true;
+
+    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
+    // is the value of pointer() for the shared buffer, otherwise buffers points
+    // immediately after the control block.  This address is for the mapping within client
+    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
+    void* buffers;
     if (sharedBuffer == 0) {
-        mBuffers = (char*)cblk + sizeof(audio_track_cblk_t);
+        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
     } else {
-        mBuffers = sharedBuffer->pointer();
+        buffers = sharedBuffer->pointer();
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
-    cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
-    cblk->waitTimeMs = 0;
-    mRemainingFrames = mNotificationFramesAct;
     // FIXME don't believe this lie
     mLatency = afLatency + (1000*frameCount) / sampleRate;
     mFrameCount = frameCount;
@@ -928,147 +1039,154 @@
     }
 
     // update proxy
-    delete mProxy;
-    mProxy = new AudioTrackClientProxy(cblk, mBuffers, frameCount, mFrameSizeAF);
+    if (sharedBuffer == 0) {
+        mStaticProxy.clear();
+        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+    } else {
+        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+        mProxy = mStaticProxy;
+    }
     mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
             uint16_t(mVolume[LEFT] * 0x1000));
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
-    if (sharedBuffer != 0) {
-        // Force buffer full condition as data is already present in shared memory
-        mProxy->stepUser(frameCount);
-    }
+    mProxy->setEpoch(epoch);
+    mProxy->setMinimum(mNotificationFramesAct);
+
+    mDeathNotifier = new DeathNotifier(this);
+    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
 
     return NO_ERROR;
 }
 
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
-
-    AutoMutex lock(mLock);
-    bool active;
-    status_t result = NO_ERROR;
-    audio_track_cblk_t* cblk = mCblk;
-    uint32_t framesReq = audioBuffer->frameCount;
-    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;
-
-    audioBuffer->frameCount  = 0;
-    audioBuffer->size = 0;
-
-    size_t framesAvail = mProxy->framesAvailable();
-
-    cblk->lock.lock();
-    if (cblk->flags & CBLK_INVALID) {
-        goto create_new_track;
+    if (audioBuffer == NULL) {
+        return BAD_VALUE;
     }
-    cblk->lock.unlock();
+    if (mTransfer != TRANSFER_OBTAIN) {
+        audioBuffer->frameCount = 0;
+        audioBuffer->size = 0;
+        audioBuffer->raw = NULL;
+        return INVALID_OPERATION;
+    }
 
-    if (framesAvail == 0) {
-        cblk->lock.lock();
-        goto start_loop_here;
-        while (framesAvail == 0) {
-            active = mActive;
-            if (CC_UNLIKELY(!active)) {
-                ALOGV("Not active and NO_MORE_BUFFERS");
-                cblk->lock.unlock();
-                return NO_MORE_BUFFERS;
-            }
-            if (CC_UNLIKELY(!waitCount)) {
-                cblk->lock.unlock();
-                return WOULD_BLOCK;
-            }
-            if (!(cblk->flags & CBLK_INVALID)) {
-                mLock.unlock();
-                // this condition is in shared memory, so if IAudioTrack and control block
-                // are replaced due to mediaserver death or IAudioTrack invalidation then
-                // cv won't be signalled, but fortunately the timeout will limit the wait
-                result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
-                cblk->lock.unlock();
-                mLock.lock();
-                if (!mActive) {
-                    return status_t(STOPPED);
-                }
-                // IAudioTrack may have been re-created while mLock was unlocked
-                cblk = mCblk;
-                cblk->lock.lock();
-            }
+    const struct timespec *requested;
+    if (waitCount == -1) {
+        requested = &ClientProxy::kForever;
+    } else if (waitCount == 0) {
+        requested = &ClientProxy::kNonBlocking;
+    } else if (waitCount > 0) {
+        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
+        struct timespec timeout;
+        timeout.tv_sec = ms / 1000;
+        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
+        requested = &timeout;
+    } else {
+        ALOGE("%s invalid waitCount %d", __func__, waitCount);
+        requested = NULL;
+    }
+    return obtainBuffer(audioBuffer, requested);
+}
 
-            if (cblk->flags & CBLK_INVALID) {
-                goto create_new_track;
-            }
-            if (CC_UNLIKELY(result != NO_ERROR)) {
-                cblk->waitTimeMs += waitTimeMs;
-                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
-                    // timing out when a loop has been set and we have already written upto loop end
-                    // is a normal condition: no need to wake AudioFlinger up.
-                    if (cblk->user < cblk->loopEnd) {
-                        ALOGW("obtainBuffer timed out (is the CPU pegged?) %p name=%#x user=%08x, "
-                              "server=%08x", this, cblk->mName, cblk->user, cblk->server);
-                        //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
-                        cblk->lock.unlock();
-                        result = mAudioTrack->start();
-                        cblk->lock.lock();
-                        if (result == DEAD_OBJECT) {
-                            android_atomic_or(CBLK_INVALID, &cblk->flags);
-create_new_track:
-                            audio_track_cblk_t* temp = cblk;
-                            result = restoreTrack_l(temp, false /*fromStart*/);
-                            cblk = temp;
-                        }
-                        if (result != NO_ERROR) {
-                            ALOGW("obtainBuffer create Track error %d", result);
-                            cblk->lock.unlock();
-                            return result;
-                        }
+status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+        struct timespec *elapsed, size_t *nonContig)
+{
+    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
+    uint32_t oldSequence = 0;
+    uint32_t newSequence;
+
+    Proxy::Buffer buffer;
+    status_t status = NO_ERROR;
+
+    static const int32_t kMaxTries = 5;
+    int32_t tryCounter = kMaxTries;
+
+    do {
+        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
+        // keep them from going away if another thread re-creates the track during obtainBuffer()
+        sp<AudioTrackClientProxy> proxy;
+        sp<IMemory> iMem;
+
+        {   // start of lock scope
+            AutoMutex lock(mLock);
+
+            newSequence = mSequence;
+            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
+            if (status == DEAD_OBJECT) {
+                // re-create track, unless someone else has already done so
+                if (newSequence == oldSequence) {
+                    status = restoreTrack_l("obtainBuffer");
+                    if (status != NO_ERROR) {
+                        buffer.mFrameCount = 0;
+                        buffer.mRaw = NULL;
+                        buffer.mNonContig = 0;
+                        break;
                     }
-                    cblk->waitTimeMs = 0;
-                }
-
-                if (--waitCount == 0) {
-                    cblk->lock.unlock();
-                    return TIMED_OUT;
                 }
             }
-            // read the server count again
-        start_loop_here:
-            framesAvail = mProxy->framesAvailable_l();
-        }
-        cblk->lock.unlock();
+            oldSequence = newSequence;
+
+            // Keep the extra references
+            proxy = mProxy;
+            iMem = mCblkMemory;
+
+            if (mState == STATE_STOPPING) {
+                status = -EINTR;
+                buffer.mFrameCount = 0;
+                buffer.mRaw = NULL;
+                buffer.mNonContig = 0;
+                break;
+            }
+
+            // Non-blocking if track is stopped or paused
+            if (mState != STATE_ACTIVE) {
+                requested = &ClientProxy::kNonBlocking;
+            }
+
+        }   // end of lock scope
+
+        buffer.mFrameCount = audioBuffer->frameCount;
+        // FIXME starts the requested timeout and elapsed over from scratch
+        status = proxy->obtainBuffer(&buffer, requested, elapsed);
+
+    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
+
+    audioBuffer->frameCount = buffer.mFrameCount;
+    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
+    audioBuffer->raw = buffer.mRaw;
+    if (nonContig != NULL) {
+        *nonContig = buffer.mNonContig;
     }
-
-    cblk->waitTimeMs = 0;
-
-    if (framesReq > framesAvail) {
-        framesReq = framesAvail;
-    }
-
-    uint32_t u = cblk->user;
-    uint32_t bufferEnd = cblk->userBase + mFrameCount;
-
-    if (framesReq > bufferEnd - u) {
-        framesReq = bufferEnd - u;
-    }
-
-    audioBuffer->frameCount = framesReq;
-    audioBuffer->size = framesReq * mFrameSizeAF;
-    audioBuffer->raw = mProxy->buffer(u);
-    active = mActive;
-    return active ? status_t(NO_ERROR) : status_t(STOPPED);
+    return status;
 }
 
 void AudioTrack::releaseBuffer(Buffer* audioBuffer)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
+    if (mTransfer == TRANSFER_SHARED) {
+        return;
+    }
+
+    size_t stepCount = audioBuffer->size / mFrameSizeAF;
+    if (stepCount == 0) {
+        return;
+    }
+
+    Proxy::Buffer buffer;
+    buffer.mFrameCount = stepCount;
+    buffer.mRaw = audioBuffer->raw;
 
     AutoMutex lock(mLock);
-    audio_track_cblk_t* cblk = mCblk;
-    (void) mProxy->stepUser(audioBuffer->frameCount);
-    if (audioBuffer->frameCount > 0) {
-        // restart track if it was disabled by audioflinger due to previous underrun
-        if (mActive && (cblk->flags & CBLK_DISABLED)) {
-            android_atomic_and(~CBLK_DISABLED, &cblk->flags);
-            ALOGW("releaseBuffer() track %p name=%#x disabled, restarting", this, cblk->mName);
+    mInUnderrun = false;
+    mProxy->releaseBuffer(&buffer);
+
+    // restart track if it was disabled by audioflinger due to previous underrun
+    if (mState == STATE_ACTIVE) {
+        audio_track_cblk_t* cblk = mCblk;
+        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
+            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
+                    this, mName.string());
+            // FIXME ignoring status
             mAudioTrack->start();
         }
     }
@@ -1078,68 +1196,46 @@
 
 ssize_t AudioTrack::write(const void* buffer, size_t userSize)
 {
-
-    if (mSharedBuffer != 0 || mIsTimed) {
+    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
         return INVALID_OPERATION;
     }
 
-    if (ssize_t(userSize) < 0) {
+    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
         // Sanity-check: user is most-likely passing an error code, and it would
         // make the return value ambiguous (actualSize vs error).
-        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)",
-                buffer, userSize, userSize);
+        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
         return BAD_VALUE;
     }
 
-    ALOGV("write %p: %d bytes, mActive=%d", this, userSize, mActive);
-
-    if (userSize == 0) {
-        return 0;
-    }
-
-    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
-    // while we are accessing the cblk
-    mLock.lock();
-    sp<IAudioTrack> audioTrack = mAudioTrack;
-    sp<IMemory> iMem = mCblkMemory;
-    mLock.unlock();
-
-    // since mLock is unlocked the IAudioTrack and shared memory may be re-created,
-    // so all cblk references might still refer to old shared memory, but that should be benign
-
-    ssize_t written = 0;
-    const int8_t *src = (const int8_t *)buffer;
+    size_t written = 0;
     Buffer audioBuffer;
-    size_t frameSz = frameSize();
 
-    do {
-        audioBuffer.frameCount = userSize/frameSz;
+    while (userSize >= mFrameSize) {
+        audioBuffer.frameCount = userSize / mFrameSize;
 
-        status_t err = obtainBuffer(&audioBuffer, -1);
+        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
         if (err < 0) {
-            // out of buffers, return #bytes written
-            if (err == status_t(NO_MORE_BUFFERS)) {
+            if (written > 0) {
                 break;
             }
             return ssize_t(err);
         }
 
         size_t toWrite;
-
         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
             // Divide capacity by 2 to take expansion into account
-            toWrite = audioBuffer.size>>1;
-            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) src, toWrite);
+            toWrite = audioBuffer.size >> 1;
+            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
         } else {
             toWrite = audioBuffer.size;
-            memcpy(audioBuffer.i8, src, toWrite);
+            memcpy(audioBuffer.i8, buffer, toWrite);
         }
-        src += toWrite;
+        buffer = ((const char *) buffer) + toWrite;
         userSize -= toWrite;
         written += toWrite;
 
         releaseBuffer(&audioBuffer);
-    } while (userSize >= frameSz);
+    }
 
     return written;
 }
@@ -1155,32 +1251,30 @@
     AutoMutex lock(mLock);
     status_t result = UNKNOWN_ERROR;
 
+#if 1
     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
     // while we are accessing the cblk
     sp<IAudioTrack> audioTrack = mAudioTrack;
     sp<IMemory> iMem = mCblkMemory;
+#endif
 
     // If the track is not invalid already, try to allocate a buffer.  alloc
     // fails indicating that the server is dead, flag the track as invalid so
     // we can attempt to restore in just a bit.
     audio_track_cblk_t* cblk = mCblk;
-    if (!(cblk->flags & CBLK_INVALID)) {
+    if (!(cblk->mFlags & CBLK_INVALID)) {
         result = mAudioTrack->allocateTimedBuffer(size, buffer);
         if (result == DEAD_OBJECT) {
-            android_atomic_or(CBLK_INVALID, &cblk->flags);
+            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
         }
     }
 
     // If the track is invalid at this point, attempt to restore it. and try the
     // allocation one more time.
-    if (cblk->flags & CBLK_INVALID) {
-        cblk->lock.lock();
-        audio_track_cblk_t* temp = cblk;
-        result = restoreTrack_l(temp, false /*fromStart*/);
-        cblk = temp;
-        cblk->lock.unlock();
+    if (cblk->mFlags & CBLK_INVALID) {
+        result = restoreTrack_l("allocateTimedBuffer");
 
-        if (result == OK) {
+        if (result == NO_ERROR) {
             result = mAudioTrack->allocateTimedBuffer(size, buffer);
         }
     }
@@ -1197,9 +1291,10 @@
         audio_track_cblk_t* cblk = mCblk;
         // restart track if it was disabled by audioflinger due to previous underrun
         if (buffer->size() != 0 && status == NO_ERROR &&
-                mActive && (cblk->flags & CBLK_DISABLED)) {
-            android_atomic_and(~CBLK_DISABLED, &cblk->flags);
+                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
+            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
             ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
+            // FIXME ignoring status
             mAudioTrack->start();
         }
     }
@@ -1214,11 +1309,12 @@
 
 // -------------------------------------------------------------------------
 
-bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
+nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
 {
-    Buffer audioBuffer;
-    uint32_t frames;
-    size_t writtenSize;
+    // Currently the AudioTrack thread is not created if there are no callbacks.
+    // Would it ever make sense to run the thread, even without callbacks?
+    // If so, then replace this by checks at each use for mCbf != NULL.
+    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
 
     mLock.lock();
     if (mAwaitBoost) {
@@ -1238,88 +1334,228 @@
         if (tryCounter < 0) {
             ALOGE("did not receive expected priority boost on time");
         }
-        return true;
+        // Run again immediately
+        return 0;
     }
-    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioTrack> audioTrack = mAudioTrack;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
-    bool active = mActive;
-    mLock.unlock();
 
-    // since mLock is unlocked the IAudioTrack and shared memory may be re-created,
-    // so all cblk references might still refer to old shared memory, but that should be benign
+    // Can only reference mCblk while locked
+    int32_t flags = android_atomic_and(
+        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
 
-    // Manage underrun callback
-    if (active && (mProxy->framesAvailable() == mFrameCount)) {
-        ALOGV("Underrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags);
-        if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) {
-            mCbf(EVENT_UNDERRUN, mUserData, 0);
-            if (cblk->server == mFrameCount) {
-                mCbf(EVENT_BUFFER_END, mUserData, 0);
-            }
-            if (mSharedBuffer != 0) {
-                return false;
-            }
+    // Check for track invalidation
+    if (flags & CBLK_INVALID) {
+        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
+        // AudioSystem cache. We should not exit here but after calling the callback so
+        // that the upper layers can recreate the track
+        if (!isOffloaded() || (mSequence == mObservedSequence)) {
+            status_t status = restoreTrack_l("processAudioBuffer");
+            mLock.unlock();
+            // Run again immediately, but with a new IAudioTrack
+            return 0;
         }
     }
 
-    // Manage loop end callback
-    while (mLoopCount > cblk->loopCount) {
-        int loopCount = -1;
-        mLoopCount--;
-        if (mLoopCount >= 0) loopCount = mLoopCount;
+    bool waitStreamEnd = mState == STATE_STOPPING;
+    bool active = mState == STATE_ACTIVE;
 
-        mCbf(EVENT_LOOP_END, mUserData, (void *)&loopCount);
+    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
+    bool newUnderrun = false;
+    if (flags & CBLK_UNDERRUN) {
+#if 0
+        // Currently in shared buffer mode, when the server reaches the end of buffer,
+        // the track stays active in continuous underrun state.  It's up to the application
+        // to pause or stop the track, or set the position to a new offset within buffer.
+        // This was some experimental code to auto-pause on underrun.   Keeping it here
+        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
+        if (mTransfer == TRANSFER_SHARED) {
+            mState = STATE_PAUSED;
+            active = false;
+        }
+#endif
+        if (!mInUnderrun) {
+            mInUnderrun = true;
+            newUnderrun = true;
+        }
     }
 
+    // Get current position of server
+    size_t position = mProxy->getPosition();
+
     // Manage marker callback
-    if (!mMarkerReached && (mMarkerPosition > 0)) {
-        if (cblk->server >= mMarkerPosition) {
-            mCbf(EVENT_MARKER, mUserData, (void *)&mMarkerPosition);
-            mMarkerReached = true;
-        }
+    bool markerReached = false;
+    size_t markerPosition = mMarkerPosition;
+    // FIXME fails for wraparound, need 64 bits
+    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+        mMarkerReached = markerReached = true;
     }
 
-    // Manage new position callback
-    if (mUpdatePeriod > 0) {
-        while (cblk->server >= mNewPosition) {
-            mCbf(EVENT_NEW_POS, mUserData, (void *)&mNewPosition);
-            mNewPosition += mUpdatePeriod;
-        }
+    // Determine number of new position callback(s) that will be needed, while locked
+    size_t newPosCount = 0;
+    size_t newPosition = mNewPosition;
+    size_t updatePeriod = mUpdatePeriod;
+    // FIXME fails for wraparound, need 64 bits
+    if (updatePeriod > 0 && position >= newPosition) {
+        newPosCount = ((position - newPosition) / updatePeriod) + 1;
+        mNewPosition += updatePeriod * newPosCount;
     }
 
-    // If Shared buffer is used, no data is requested from client.
-    if (mSharedBuffer != 0) {
-        frames = 0;
-    } else {
-        frames = mRemainingFrames;
+    // Cache other fields that will be needed soon
+    uint32_t loopPeriod = mLoopPeriod;
+    uint32_t sampleRate = mSampleRate;
+    size_t notificationFrames = mNotificationFramesAct;
+    if (mRefreshRemaining) {
+        mRefreshRemaining = false;
+        mRemainingFrames = notificationFrames;
+        mRetryOnPartialBuffer = false;
     }
+    size_t misalignment = mProxy->getMisalignment();
+    uint32_t sequence = mSequence;
 
-    // See description of waitCount parameter at declaration of obtainBuffer().
-    // The logic below prevents us from being stuck below at obtainBuffer()
-    // not being able to handle timed events (position, markers, loops).
-    int32_t waitCount = -1;
-    if (mUpdatePeriod || (!mMarkerReached && mMarkerPosition) || mLoopCount) {
-        waitCount = 1;
-    }
+    // These fields don't need to be cached, because they are assigned only by set():
+    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
+    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
 
-    do {
+    mLock.unlock();
 
-        audioBuffer.frameCount = frames;
+    if (waitStreamEnd) {
+        AutoMutex lock(mLock);
 
-        status_t err = obtainBuffer(&audioBuffer, waitCount);
-        if (err < NO_ERROR) {
-            if (err != TIMED_OUT) {
-                ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
-                        "Error obtaining an audio buffer, giving up.");
-                return false;
+        sp<AudioTrackClientProxy> proxy = mProxy;
+        sp<IMemory> iMem = mCblkMemory;
+
+        struct timespec timeout;
+        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
+        timeout.tv_nsec = 0;
+
+        mLock.unlock();
+        status_t status = mProxy->waitStreamEndDone(&timeout);
+        mLock.lock();
+        switch (status) {
+        case NO_ERROR:
+        case DEAD_OBJECT:
+        case TIMED_OUT:
+            mLock.unlock();
+            mCbf(EVENT_STREAM_END, mUserData, NULL);
+            mLock.lock();
+            if (mState == STATE_STOPPING) {
+                mState = STATE_STOPPED;
+                if (status != DEAD_OBJECT) {
+                   return NS_INACTIVE;
+                }
             }
-            break;
+            return 0;
+        default:
+            return 0;
         }
-        if (err == status_t(STOPPED)) {
-            return false;
+    }
+
+    // perform callbacks while unlocked
+    if (newUnderrun) {
+        mCbf(EVENT_UNDERRUN, mUserData, NULL);
+    }
+    // FIXME we will miss loops if loop cycle was signaled several times since last call
+    //       to processAudioBuffer()
+    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
+        mCbf(EVENT_LOOP_END, mUserData, NULL);
+    }
+    if (flags & CBLK_BUFFER_END) {
+        mCbf(EVENT_BUFFER_END, mUserData, NULL);
+    }
+    if (markerReached) {
+        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+    }
+    while (newPosCount > 0) {
+        size_t temp = newPosition;
+        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        newPosition += updatePeriod;
+        newPosCount--;
+    }
+
+    if (mObservedSequence != sequence) {
+        mObservedSequence = sequence;
+        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+        // for offloaded tracks, just wait for the upper layers to recreate the track
+        if (isOffloaded()) {
+            return NS_INACTIVE;
+        }
+    }
+
+    // if inactive, then don't run me again until re-started
+    if (!active) {
+        return NS_INACTIVE;
+    }
+
+    // Compute the estimated time until the next timed event (position, markers, loops)
+    // FIXME only for non-compressed audio
+    uint32_t minFrames = ~0;
+    if (!markerReached && position < markerPosition) {
+        minFrames = markerPosition - position;
+    }
+    if (loopPeriod > 0 && loopPeriod < minFrames) {
+        minFrames = loopPeriod;
+    }
+    if (updatePeriod > 0 && updatePeriod < minFrames) {
+        minFrames = updatePeriod;
+    }
+
+    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
+    static const uint32_t kPoll = 0;
+    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
+        minFrames = kPoll * notificationFrames;
+    }
+
+    // Convert frame units to time units
+    nsecs_t ns = NS_WHENEVER;
+    if (minFrames != (uint32_t) ~0) {
+        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
+        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
+        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
+    }
+
+    // If not supplying data by EVENT_MORE_DATA, then we're done
+    if (mTransfer != TRANSFER_CALLBACK) {
+        return ns;
+    }
+
+    struct timespec timeout;
+    const struct timespec *requested = &ClientProxy::kForever;
+    if (ns != NS_WHENEVER) {
+        timeout.tv_sec = ns / 1000000000LL;
+        timeout.tv_nsec = ns % 1000000000LL;
+        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
+        requested = &timeout;
+    }
+
+    while (mRemainingFrames > 0) {
+
+        Buffer audioBuffer;
+        audioBuffer.frameCount = mRemainingFrames;
+        size_t nonContig;
+        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
+        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
+                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
+        requested = &ClientProxy::kNonBlocking;
+        size_t avail = audioBuffer.frameCount + nonContig;
+        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
+                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
+        if (err != NO_ERROR) {
+            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
+                    (isOffloaded() && (err == DEAD_OBJECT))) {
+                return 0;
+            }
+            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
+            return NS_NEVER;
+        }
+
+        if (mRetryOnPartialBuffer) {
+            mRetryOnPartialBuffer = false;
+            if (avail < mRemainingFrames) {
+                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
+                if (ns < 0 || myns < ns) {
+                    ns = myns;
+                }
+                return ns;
+            }
         }
 
         // Divide buffer size by 2 to take into account the expansion
@@ -1331,139 +1567,160 @@
 
         size_t reqSize = audioBuffer.size;
         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
-        writtenSize = audioBuffer.size;
+        size_t writtenSize = audioBuffer.size;
+        size_t writtenFrames = writtenSize / mFrameSize;
 
         // Sanity check on returned size
-        if (ssize_t(writtenSize) <= 0) {
+        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
+            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
+                    reqSize, (int) writtenSize);
+            return NS_NEVER;
+        }
+
+        if (writtenSize == 0) {
             // The callback is done filling buffers
             // Keep this thread going to handle timed events and
             // still try to get more data in intervals of WAIT_PERIOD_MS
             // but don't just loop and block the CPU, so wait
-            usleep(WAIT_PERIOD_MS*1000);
-            break;
-        }
-
-        if (writtenSize > reqSize) {
-            writtenSize = reqSize;
+            return WAIT_PERIOD_MS * 1000000LL;
         }
 
         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
             // 8 to 16 bit conversion, note that source and destination are the same address
             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
-            writtenSize <<= 1;
+            audioBuffer.size <<= 1;
         }
 
-        audioBuffer.size = writtenSize;
-        // NOTE: cblk->frameSize is not equal to AudioTrack::frameSize() for
-        // 8 bit PCM data: in this case,  cblk->frameSize is based on a sample size of
-        // 16 bit.
-        audioBuffer.frameCount = writtenSize / mFrameSizeAF;
-
-        frames -= audioBuffer.frameCount;
+        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
+        audioBuffer.frameCount = releasedFrames;
+        mRemainingFrames -= releasedFrames;
+        if (misalignment >= releasedFrames) {
+            misalignment -= releasedFrames;
+        } else {
+            misalignment = 0;
+        }
 
         releaseBuffer(&audioBuffer);
-    }
-    while (frames);
 
-    if (frames == 0) {
-        mRemainingFrames = mNotificationFramesAct;
-    } else {
-        mRemainingFrames = frames;
+        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
+        // if callback doesn't like to accept the full chunk
+        if (writtenSize < reqSize) {
+            continue;
+        }
+
+        // There could be enough non-contiguous frames available to satisfy the remaining request
+        if (mRemainingFrames <= nonContig) {
+            continue;
+        }
+
+#if 0
+        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
+        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
+        // that total to a sum == notificationFrames.
+        if (0 < misalignment && misalignment <= mRemainingFrames) {
+            mRemainingFrames = misalignment;
+            return (mRemainingFrames * 1100000000LL) / sampleRate;
+        }
+#endif
+
     }
-    return true;
+    mRemainingFrames = notificationFrames;
+    mRetryOnPartialBuffer = true;
+
+    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
+    return 0;
 }
 
-// must be called with mLock and refCblk.lock held. Callers must also hold strong references on
-// the IAudioTrack and IMemory in case they are recreated here.
-// If the IAudioTrack is successfully restored, the refCblk pointer is updated
-// FIXME Don't depend on caller to hold strong references.
-status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& refCblk, bool fromStart)
+status_t AudioTrack::restoreTrack_l(const char *from)
 {
+    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
+          isOffloaded() ? "Offloaded" : "PCM", from);
+    ++mSequence;
     status_t result;
 
-    audio_track_cblk_t* cblk = refCblk;
-    audio_track_cblk_t* newCblk = cblk;
-    ALOGW("dead IAudioTrack, creating a new one from %s",
-        fromStart ? "start()" : "obtainBuffer()");
-
-    // signal old cblk condition so that other threads waiting for available buffers stop
-    // waiting now
-    cblk->cv.broadcast();
-    cblk->lock.unlock();
-
     // refresh the audio configuration cache in this process to make sure we get new
     // output parameters in getOutput_l() and createTrack_l()
     AudioSystem::clearAudioConfigCache();
 
+    if (isOffloaded()) {
+        return DEAD_OBJECT;
+    }
+
+    // force new output query from audio policy manager;
+    mOutput = 0;
+    audio_io_handle_t output = getOutput_l();
+
     // if the new IAudioTrack is created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory
+    size_t position = mProxy->getPosition();
+    mNewPosition = position + mUpdatePeriod;
+    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
     result = createTrack_l(mStreamType,
                            mSampleRate,
                            mFormat,
                            mReqFrameCount,  // so that frame count never goes down
                            mFlags,
                            mSharedBuffer,
-                           getOutput_l());
+                           output,
+                           position /*epoch*/);
 
     if (result == NO_ERROR) {
-        uint32_t user = cblk->user;
-        uint32_t server = cblk->server;
+        // continue playback from last known position, but
+        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
+        if (mStaticProxy != NULL) {
+            mLoopPeriod = 0;
+            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
+        }
+        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
+        //       track destruction have been played? This is critical for SoundPool implementation
+        //       This must be broken, and needs to be tested/debugged.
+#if 0
         // restore write index and set other indexes to reflect empty buffer status
-        newCblk = mCblk;
-        newCblk->user = user;
-        newCblk->server = user;
-        newCblk->userBase = user;
-        newCblk->serverBase = user;
-        // restore loop: this is not guaranteed to succeed if new frame count is not
-        // compatible with loop length
-        setLoop_l(cblk->loopStart, cblk->loopEnd, cblk->loopCount);
-        size_t frames = 0;
-        if (!fromStart) {
-            newCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
+        if (!strcmp(from, "start")) {
             // Make sure that a client relying on callback events indicating underrun or
             // the actual amount of audio frames played (e.g SoundPool) receives them.
             if (mSharedBuffer == 0) {
-                if (user > server) {
-                    frames = ((user - server) > mFrameCount) ?
-                            mFrameCount : (user - server);
-                    memset(mBuffers, 0, frames * mFrameSizeAF);
-                }
                 // restart playback even if buffer is not completely filled.
-                android_atomic_or(CBLK_FORCEREADY, &newCblk->flags);
+                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
             }
         }
-        if (mSharedBuffer != 0) {
-            frames = mFrameCount;
-        }
-        if (frames > 0) {
-            // stepUser() clears CBLK_UNDERRUN flag enabling underrun callbacks to
-            // the client
-            mProxy->stepUser(frames);
-        }
-        if (mActive) {
+#endif
+        if (mState == STATE_ACTIVE) {
             result = mAudioTrack->start();
-            ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result);
-        }
-        if (fromStart && result == NO_ERROR) {
-            mNewPosition = newCblk->server + mUpdatePeriod;
         }
     }
-    ALOGW_IF(result != NO_ERROR, "restoreTrack_l() failed status %d", result);
-    ALOGV("restoreTrack_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
-        result, mActive, newCblk, cblk, newCblk->flags, cblk->flags);
-
-    if (result == NO_ERROR) {
-        // from now on we switch to the newly created cblk
-        refCblk = newCblk;
+    if (result != NO_ERROR) {
+        //Use of direct and offloaded output streams is ref counted by audio policy manager.
+        // As getOutput was called above and resulted in an output stream to be opened,
+        // we need to release it.
+        AudioSystem::releaseOutput(output);
+        ALOGW("restoreTrack_l() failed status %d", result);
+        mState = STATE_STOPPED;
     }
-    newCblk->lock.lock();
-
-    ALOGW_IF(result != NO_ERROR, "restoreTrack_l() error %d", result);
 
     return result;
 }
 
+status_t AudioTrack::setParameters(const String8& keyValuePairs)
+{
+    AutoMutex lock(mLock);
+    if (mAudioTrack != 0) {
+        return mAudioTrack->setParameters(keyValuePairs);
+    } else {
+        return NO_INIT;
+    }
+}
+
+String8 AudioTrack::getParameters(const String8& keys)
+{
+    if (mOutput) {
+        return AudioSystem::getParameters(mOutput, keys);
+    } else {
+        return String8::empty();
+    }
+}
+
 status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
 {
 
@@ -1480,16 +1737,33 @@
     result.append(buffer);
     snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
     result.append(buffer);
-    snprintf(buffer, 255, "  active(%d), latency (%d)\n", mActive, mLatency);
+    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
     result.append(buffer);
     ::write(fd, result.string(), result.size());
     return NO_ERROR;
 }
 
+uint32_t AudioTrack::getUnderrunFrames() const
+{
+    AutoMutex lock(mLock);
+    return mProxy->getUnderrunFrames();
+}
+
+// =========================================================================
+
+void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
+{
+    sp<AudioTrack> audioTrack = mAudioTrack.promote();
+    if (audioTrack != 0) {
+        AutoMutex lock(audioTrack->mLock);
+        audioTrack->mProxy->binderDied();
+    }
+}
+
 // =========================================================================
 
 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true)
+    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
 {
 }
 
@@ -1507,10 +1781,26 @@
             return true;
         }
     }
-    if (!mReceiver.processAudioBuffer(this)) {
-        pause();
+    nsecs_t ns = mReceiver.processAudioBuffer(this);
+    switch (ns) {
+    case 0:
+        return true;
+    case NS_WHENEVER:
+        sleep(1);
+        return true;
+    case NS_INACTIVE:
+        pauseConditional();
+        return true;
+    case NS_NEVER:
+        return false;
+    default:
+        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
+        struct timespec req;
+        req.tv_sec = ns / 1000000000LL;
+        req.tv_nsec = ns % 1000000000LL;
+        nanosleep(&req, NULL /*rem*/);
+        return true;
     }
-    return true;
 }
 
 void AudioTrack::AudioTrackThread::requestExit()
@@ -1524,6 +1814,17 @@
 {
     AutoMutex _l(mMyLock);
     mPaused = true;
+    mResumeLatch = false;
+}
+
+void AudioTrack::AudioTrackThread::pauseConditional()
+{
+    AutoMutex _l(mMyLock);
+    if (mResumeLatch) {
+        mResumeLatch = false;
+    } else {
+        mPaused = true;
+    }
 }
 
 void AudioTrack::AudioTrackThread::resume()
@@ -1531,7 +1832,10 @@
     AutoMutex _l(mMyLock);
     if (mPaused) {
         mPaused = false;
+        mResumeLatch = false;
         mMyCond.signal();
+    } else {
+        mResumeLatch = true;
     }
 }
 
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 13d47c9..e7abb40 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -19,178 +19,823 @@
 
 #include <private/media/AudioTrackShared.h>
 #include <utils/Log.h>
+extern "C" {
+#include "../private/bionic_futex.h"
+}
 
 namespace android {
 
 audio_track_cblk_t::audio_track_cblk_t()
-    : lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0),
-    userBase(0), serverBase(0), frameCount_(0),
-    loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000),
-    mSampleRate(0), mSendLevel(0), flags(0)
+    : mServer(0), frameCount_(0), mFutex(0), mMinimum(0),
+    mVolumeLR(0x10001000), mSampleRate(0), mSendLevel(0), mFlags(0)
+{
+    memset(&u, 0, sizeof(u));
+}
+
+// ---------------------------------------------------------------------------
+
+Proxy::Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+        bool isOut, bool clientInServer)
+    : mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize),
+      mFrameCountP2(roundup(frameCount)), mIsOut(isOut), mClientInServer(clientInServer),
+      mIsShutdown(false), mUnreleased(0)
 {
 }
 
-uint32_t audio_track_cblk_t::stepUser(size_t stepCount, size_t frameCount, bool isOut)
+// ---------------------------------------------------------------------------
+
+ClientProxy::ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+        size_t frameSize, bool isOut, bool clientInServer)
+    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer), mEpoch(0)
 {
-    ALOGV("stepuser %08x %08x %d", user, server, stepCount);
-
-    uint32_t u = user;
-    u += stepCount;
-    // Ensure that user is never ahead of server for AudioRecord
-    if (isOut) {
-        // If stepServer() has been called once, switch to normal obtainBuffer() timeout period
-        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) {
-            bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
-        }
-    } else if (u > server) {
-        ALOGW("stepUser occurred after track reset");
-        u = server;
-    }
-
-    if (u >= frameCount) {
-        // common case, user didn't just wrap
-        if (u - frameCount >= userBase ) {
-            userBase += frameCount;
-        }
-    } else if (u >= userBase + frameCount) {
-        // user just wrapped
-        userBase += frameCount;
-    }
-
-    user = u;
-
-    // Clear flow control error condition as new data has been written/read to/from buffer.
-    if (flags & CBLK_UNDERRUN) {
-        android_atomic_and(~CBLK_UNDERRUN, &flags);
-    }
-
-    return u;
 }
 
-bool audio_track_cblk_t::stepServer(size_t stepCount, size_t frameCount, bool isOut)
+const struct timespec ClientProxy::kForever = {INT_MAX /*tv_sec*/, 0 /*tv_nsec*/};
+const struct timespec ClientProxy::kNonBlocking = {0 /*tv_sec*/, 0 /*tv_nsec*/};
+
+#define MEASURE_NS 10000000 // attempt to provide accurate timeouts if requested >= MEASURE_NS
+
+// To facilitate quicker recovery from server failure, this value limits the timeout per each futex
+// wait.  However it does not protect infinite timeouts.  If defined to be zero, there is no limit.
+// FIXME May not be compatible with audio tunneling requirements where timeout should be in the
+// order of minutes.
+#define MAX_SEC    5
+
+status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
+        struct timespec *elapsed)
 {
-    ALOGV("stepserver %08x %08x %d", user, server, stepCount);
+    LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
+    struct timespec total;          // total elapsed time spent waiting
+    total.tv_sec = 0;
+    total.tv_nsec = 0;
+    bool measure = elapsed != NULL; // whether to measure total elapsed time spent waiting
 
-    if (!tryLock()) {
-        ALOGW("stepServer() could not lock cblk");
-        return false;
-    }
-
-    uint32_t s = server;
-    bool flushed = (s == user);
-
-    s += stepCount;
-    if (isOut) {
-        // Mark that we have read the first buffer so that next time stepUser() is called
-        // we switch to normal obtainBuffer() timeout period
-        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) {
-            bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS - 1;
-        }
-        // It is possible that we receive a flush()
-        // while the mixer is processing a block: in this case,
-        // stepServer() is called After the flush() has reset u & s and
-        // we have s > u
-        if (flushed) {
-            ALOGW("stepServer occurred after track reset");
-            s = user;
-        }
-    }
-
-    if (s >= loopEnd) {
-        ALOGW_IF(s > loopEnd, "stepServer: s %u > loopEnd %u", s, loopEnd);
-        s = loopStart;
-        if (--loopCount == 0) {
-            loopEnd = UINT_MAX;
-            loopStart = UINT_MAX;
-        }
-    }
-
-    if (s >= frameCount) {
-        // common case, server didn't just wrap
-        if (s - frameCount >= serverBase ) {
-            serverBase += frameCount;
-        }
-    } else if (s >= serverBase + frameCount) {
-        // server just wrapped
-        serverBase += frameCount;
-    }
-
-    server = s;
-
-    if (!(flags & CBLK_INVALID)) {
-        cv.signal();
-    }
-    lock.unlock();
-    return true;
-}
-
-void* audio_track_cblk_t::buffer(void *buffers, size_t frameSize, uint32_t offset) const
-{
-    return (int8_t *)buffers + (offset - userBase) * frameSize;
-}
-
-uint32_t audio_track_cblk_t::framesAvailable(size_t frameCount, bool isOut)
-{
-    Mutex::Autolock _l(lock);
-    return framesAvailable_l(frameCount, isOut);
-}
-
-uint32_t audio_track_cblk_t::framesAvailable_l(size_t frameCount, bool isOut)
-{
-    uint32_t u = user;
-    uint32_t s = server;
-
-    if (isOut) {
-        uint32_t limit = (s < loopStart) ? s : loopStart;
-        return limit + frameCount - u;
+    status_t status;
+    enum {
+        TIMEOUT_ZERO,       // requested == NULL || *requested == 0
+        TIMEOUT_INFINITE,   // *requested == infinity
+        TIMEOUT_FINITE,     // 0 < *requested < infinity
+        TIMEOUT_CONTINUE,   // additional chances after TIMEOUT_FINITE
+    } timeout;
+    if (requested == NULL) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == INT_MAX) {
+        timeout = TIMEOUT_INFINITE;
     } else {
-        return frameCount + u - s;
+        timeout = TIMEOUT_FINITE;
+        if (requested->tv_sec > 0 || requested->tv_nsec >= MEASURE_NS) {
+            measure = true;
+        }
     }
-}
-
-uint32_t audio_track_cblk_t::framesReady(bool isOut)
-{
-    uint32_t u = user;
-    uint32_t s = server;
-
-    if (isOut) {
-        if (u < loopEnd) {
-            return u - s;
+    struct timespec before;
+    bool beforeIsValid = false;
+    audio_track_cblk_t* cblk = mCblk;
+    bool ignoreInitialPendingInterrupt = true;
+    // check for shared memory corruption
+    if (mIsShutdown) {
+        status = NO_INIT;
+        goto end;
+    }
+    for (;;) {
+        int32_t flags = android_atomic_and(~CBLK_INTERRUPT, &cblk->mFlags);
+        // check for track invalidation by server, or server death detection
+        if (flags & CBLK_INVALID) {
+            ALOGV("Track invalidated");
+            status = DEAD_OBJECT;
+            goto end;
+        }
+        // check for obtainBuffer interrupted by client
+        if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {
+            ALOGV("obtainBuffer() interrupted by client");
+            status = -EINTR;
+            goto end;
+        }
+        ignoreInitialPendingInterrupt = false;
+        // compute number of frames available to write (AudioTrack) or read (AudioRecord)
+        int32_t front;
+        int32_t rear;
+        if (mIsOut) {
+            // The barrier following the read of mFront is probably redundant.
+            // We're about to perform a conditional branch based on 'filled',
+            // which will force the processor to observe the read of mFront
+            // prior to allowing data writes starting at mRaw.
+            // However, the processor may support speculative execution,
+            // and be unable to undo speculative writes into shared memory.
+            // The barrier will prevent such speculative execution.
+            front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
+            rear = cblk->u.mStreaming.mRear;
         } else {
-            // do not block on mutex shared with client on AudioFlinger side
-            if (!tryLock()) {
-                ALOGW("framesReady() could not lock cblk");
-                return 0;
+            // On the other hand, this barrier is required.
+            rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+            front = cblk->u.mStreaming.mFront;
+        }
+        ssize_t filled = rear - front;
+        // pipe should not be overfull
+        if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+            ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+            mIsShutdown = true;
+            status = NO_INIT;
+            goto end;
+        }
+        // don't allow filling pipe beyond the nominal size
+        size_t avail = mIsOut ? mFrameCount - filled : filled;
+        if (avail > 0) {
+            // 'avail' may be non-contiguous, so return only the first contiguous chunk
+            size_t part1;
+            if (mIsOut) {
+                rear &= mFrameCountP2 - 1;
+                part1 = mFrameCountP2 - rear;
+            } else {
+                front &= mFrameCountP2 - 1;
+                part1 = mFrameCountP2 - front;
             }
-            uint32_t frames = UINT_MAX;
-            if (loopCount >= 0) {
-                frames = (loopEnd - loopStart)*loopCount + u - s;
+            if (part1 > avail) {
+                part1 = avail;
             }
-            lock.unlock();
-            return frames;
+            if (part1 > buffer->mFrameCount) {
+                part1 = buffer->mFrameCount;
+            }
+            buffer->mFrameCount = part1;
+            buffer->mRaw = part1 > 0 ?
+                    &((char *) mBuffers)[(mIsOut ? rear : front) * mFrameSize] : NULL;
+            buffer->mNonContig = avail - part1;
+            mUnreleased = part1;
+            status = NO_ERROR;
+            break;
+        }
+        struct timespec remaining;
+        const struct timespec *ts;
+        switch (timeout) {
+        case TIMEOUT_ZERO:
+            status = WOULD_BLOCK;
+            goto end;
+        case TIMEOUT_INFINITE:
+            ts = NULL;
+            break;
+        case TIMEOUT_FINITE:
+            timeout = TIMEOUT_CONTINUE;
+            if (MAX_SEC == 0) {
+                ts = requested;
+                break;
+            }
+            // fall through
+        case TIMEOUT_CONTINUE:
+            // FIXME we do not retry if requested < 10ms? needs documentation on this state machine
+            if (!measure || requested->tv_sec < total.tv_sec ||
+                    (requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {
+                status = TIMED_OUT;
+                goto end;
+            }
+            remaining.tv_sec = requested->tv_sec - total.tv_sec;
+            if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {
+                remaining.tv_nsec += 1000000000;
+                remaining.tv_sec++;
+            }
+            if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {
+                remaining.tv_sec = MAX_SEC;
+                remaining.tv_nsec = 0;
+            }
+            ts = &remaining;
+            break;
+        default:
+            LOG_FATAL("obtainBuffer() timeout=%d", timeout);
+            ts = NULL;
+            break;
+        }
+        int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
+        if (!(old & CBLK_FUTEX_WAKE)) {
+            int rc;
+            if (measure && !beforeIsValid) {
+                clock_gettime(CLOCK_MONOTONIC, &before);
+                beforeIsValid = true;
+            }
+            int ret = __futex_syscall4(&cblk->mFutex,
+                    mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
+            // update total elapsed time spent waiting
+            if (measure) {
+                struct timespec after;
+                clock_gettime(CLOCK_MONOTONIC, &after);
+                total.tv_sec += after.tv_sec - before.tv_sec;
+                long deltaNs = after.tv_nsec - before.tv_nsec;
+                if (deltaNs < 0) {
+                    deltaNs += 1000000000;
+                    total.tv_sec--;
+                }
+                if ((total.tv_nsec += deltaNs) >= 1000000000) {
+                    total.tv_nsec -= 1000000000;
+                    total.tv_sec++;
+                }
+                before = after;
+                beforeIsValid = true;
+            }
+            switch (ret) {
+            case 0:             // normal wakeup by server, or by binderDied()
+            case -EWOULDBLOCK:  // benign race condition with server
+            case -EINTR:        // wait was interrupted by signal or other spurious wakeup
+            case -ETIMEDOUT:    // time-out expired
+                // FIXME these error/non-0 status are being dropped
+                break;
+            default:
+                ALOGE("%s unexpected error %d", __func__, ret);
+                status = -ret;
+                goto end;
+            }
+        }
+    }
+
+end:
+    if (status != NO_ERROR) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        mUnreleased = 0;
+    }
+    if (elapsed != NULL) {
+        *elapsed = total;
+    }
+    if (requested == NULL) {
+        requested = &kNonBlocking;
+    }
+    if (measure) {
+        ALOGV("requested %ld.%03ld elapsed %ld.%03ld",
+              requested->tv_sec, requested->tv_nsec / 1000000,
+              total.tv_sec, total.tv_nsec / 1000000);
+    }
+    return status;
+}
+
+void ClientProxy::releaseBuffer(Buffer* buffer)
+{
+    LOG_ALWAYS_FATAL_IF(buffer == NULL);
+    size_t stepCount = buffer->mFrameCount;
+    if (stepCount == 0 || mIsShutdown) {
+        // prevent accidental re-use of buffer
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        return;
+    }
+    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount));
+    mUnreleased -= stepCount;
+    audio_track_cblk_t* cblk = mCblk;
+    // Both of these barriers are required
+    if (mIsOut) {
+        int32_t rear = cblk->u.mStreaming.mRear;
+        android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear);
+    } else {
+        int32_t front = cblk->u.mStreaming.mFront;
+        android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront);
+    }
+}
+
+void ClientProxy::binderDied()
+{
+    audio_track_cblk_t* cblk = mCblk;
+    if (!(android_atomic_or(CBLK_INVALID, &cblk->mFlags) & CBLK_INVALID)) {
+        // it seems that a FUTEX_WAKE_PRIVATE will not wake a FUTEX_WAIT, even within same process
+        (void) __futex_syscall3(&cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
+                1);
+    }
+}
+
+void ClientProxy::interrupt()
+{
+    audio_track_cblk_t* cblk = mCblk;
+    if (!(android_atomic_or(CBLK_INTERRUPT, &cblk->mFlags) & CBLK_INTERRUPT)) {
+        (void) __futex_syscall3(&cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
+                1);
+    }
+}
+
+size_t ClientProxy::getMisalignment()
+{
+    audio_track_cblk_t* cblk = mCblk;
+    return (mFrameCountP2 - (mIsOut ? cblk->u.mStreaming.mRear : cblk->u.mStreaming.mFront)) &
+            (mFrameCountP2 - 1);
+}
+
+// ---------------------------------------------------------------------------
+
+void AudioTrackClientProxy::flush()
+{
+    mCblk->u.mStreaming.mFlush++;
+}
+
+bool AudioTrackClientProxy::clearStreamEndDone() {
+    return (android_atomic_and(~CBLK_STREAM_END_DONE, &mCblk->mFlags) & CBLK_STREAM_END_DONE) != 0;
+}
+
+bool AudioTrackClientProxy::getStreamEndDone() const {
+    return (mCblk->mFlags & CBLK_STREAM_END_DONE) != 0;
+}
+
+status_t AudioTrackClientProxy::waitStreamEndDone(const struct timespec *requested)
+{
+    struct timespec total;          // total elapsed time spent waiting
+    total.tv_sec = 0;
+    total.tv_nsec = 0;
+    audio_track_cblk_t* cblk = mCblk;
+    status_t status;
+    enum {
+        TIMEOUT_ZERO,       // requested == NULL || *requested == 0
+        TIMEOUT_INFINITE,   // *requested == infinity
+        TIMEOUT_FINITE,     // 0 < *requested < infinity
+        TIMEOUT_CONTINUE,   // additional chances after TIMEOUT_FINITE
+    } timeout;
+    if (requested == NULL) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == INT_MAX) {
+        timeout = TIMEOUT_INFINITE;
+    } else {
+        timeout = TIMEOUT_FINITE;
+    }
+    for (;;) {
+        int32_t flags = android_atomic_and(~(CBLK_INTERRUPT|CBLK_STREAM_END_DONE), &cblk->mFlags);
+        // check for track invalidation by server, or server death detection
+        if (flags & CBLK_INVALID) {
+            ALOGV("Track invalidated");
+            status = DEAD_OBJECT;
+            goto end;
+        }
+        if (flags & CBLK_STREAM_END_DONE) {
+            ALOGV("stream end received");
+            status = NO_ERROR;
+            goto end;
+        }
+        // check for obtainBuffer interrupted by client
+        // check for obtainBuffer interrupted by client
+        if (flags & CBLK_INTERRUPT) {
+            ALOGV("waitStreamEndDone() interrupted by client");
+            status = -EINTR;
+            goto end;
+        }
+        struct timespec remaining;
+        const struct timespec *ts;
+        switch (timeout) {
+        case TIMEOUT_ZERO:
+            status = WOULD_BLOCK;
+            goto end;
+        case TIMEOUT_INFINITE:
+            ts = NULL;
+            break;
+        case TIMEOUT_FINITE:
+            timeout = TIMEOUT_CONTINUE;
+            if (MAX_SEC == 0) {
+                ts = requested;
+                break;
+            }
+            // fall through
+        case TIMEOUT_CONTINUE:
+            // FIXME we do not retry if requested < 10ms? needs documentation on this state machine
+            if (requested->tv_sec < total.tv_sec ||
+                    (requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {
+                status = TIMED_OUT;
+                goto end;
+            }
+            remaining.tv_sec = requested->tv_sec - total.tv_sec;
+            if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {
+                remaining.tv_nsec += 1000000000;
+                remaining.tv_sec++;
+            }
+            if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {
+                remaining.tv_sec = MAX_SEC;
+                remaining.tv_nsec = 0;
+            }
+            ts = &remaining;
+            break;
+        default:
+            LOG_FATAL("waitStreamEndDone() timeout=%d", timeout);
+            ts = NULL;
+            break;
+        }
+        int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
+        if (!(old & CBLK_FUTEX_WAKE)) {
+            int rc;
+            int ret = __futex_syscall4(&cblk->mFutex,
+                    mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
+            switch (ret) {
+            case 0:             // normal wakeup by server, or by binderDied()
+            case -EWOULDBLOCK:  // benign race condition with server
+            case -EINTR:        // wait was interrupted by signal or other spurious wakeup
+            case -ETIMEDOUT:    // time-out expired
+                break;
+            default:
+                ALOGE("%s unexpected error %d", __func__, ret);
+                status = -ret;
+                goto end;
+            }
+        }
+    }
+
+end:
+    if (requested == NULL) {
+        requested = &kNonBlocking;
+    }
+    return status;
+}
+
+// ---------------------------------------------------------------------------
+
+StaticAudioTrackClientProxy::StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers,
+        size_t frameCount, size_t frameSize)
+    : AudioTrackClientProxy(cblk, buffers, frameCount, frameSize),
+      mMutator(&cblk->u.mStatic.mSingleStateQueue), mBufferPosition(0)
+{
+}
+
+void StaticAudioTrackClientProxy::flush()
+{
+    LOG_FATAL("static flush");
+}
+
+void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int loopCount)
+{
+    StaticAudioTrackState newState;
+    newState.mLoopStart = loopStart;
+    newState.mLoopEnd = loopEnd;
+    newState.mLoopCount = loopCount;
+    mBufferPosition = loopStart;
+    (void) mMutator.push(newState);
+}
+
+size_t StaticAudioTrackClientProxy::getBufferPosition()
+{
+    size_t bufferPosition;
+    if (mMutator.ack()) {
+        bufferPosition = mCblk->u.mStatic.mBufferPosition;
+        if (bufferPosition > mFrameCount) {
+            bufferPosition = mFrameCount;
         }
     } else {
-        return s - u;
+        bufferPosition = mBufferPosition;
     }
+    return bufferPosition;
 }
 
-bool audio_track_cblk_t::tryLock()
+// ---------------------------------------------------------------------------
+
+ServerProxy::ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+        size_t frameSize, bool isOut, bool clientInServer)
+    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer),
+      mAvailToClient(0), mFlush(0), mDeferWake(false)
 {
-    // the code below simulates lock-with-timeout
-    // we MUST do this to protect the AudioFlinger server
-    // as this lock is shared with the client.
-    status_t err;
-
-    err = lock.tryLock();
-    if (err == -EBUSY) { // just wait a bit
-        usleep(1000);
-        err = lock.tryLock();
-    }
-    if (err != NO_ERROR) {
-        // probably, the client just died.
-        return false;
-    }
-    return true;
 }
 
+status_t ServerProxy::obtainBuffer(Buffer* buffer)
+{
+    LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
+    if (mIsShutdown) {
+        goto no_init;
+    }
+    {
+    audio_track_cblk_t* cblk = mCblk;
+    // compute number of frames available to write (AudioTrack) or read (AudioRecord),
+    // or use previous cached value from framesReady(), with added barrier if it omits.
+    int32_t front;
+    int32_t rear;
+    // See notes on barriers at ClientProxy::obtainBuffer()
+    if (mIsOut) {
+        int32_t flush = cblk->u.mStreaming.mFlush;
+        rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+        front = cblk->u.mStreaming.mFront;
+        if (flush != mFlush) {
+            mFlush = flush;
+            // effectively obtain then release whatever is in the buffer
+            android_atomic_release_store(rear, &cblk->u.mStreaming.mFront);
+            if (front != rear) {
+                int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
+                if (!(old & CBLK_FUTEX_WAKE)) {
+                    (void) __futex_syscall3(&cblk->mFutex,
+                            mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+                }
+            }
+            front = rear;
+        }
+    } else {
+        front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
+        rear = cblk->u.mStreaming.mRear;
+    }
+    ssize_t filled = rear - front;
+    // pipe should not already be overfull
+    if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+        ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+        mIsShutdown = true;
+    }
+    if (mIsShutdown) {
+        goto no_init;
+    }
+    // don't allow filling pipe beyond the nominal size
+    size_t availToServer;
+    if (mIsOut) {
+        availToServer = filled;
+        mAvailToClient = mFrameCount - filled;
+    } else {
+        availToServer = mFrameCount - filled;
+        mAvailToClient = filled;
+    }
+    // 'availToServer' may be non-contiguous, so return only the first contiguous chunk
+    size_t part1;
+    if (mIsOut) {
+        front &= mFrameCountP2 - 1;
+        part1 = mFrameCountP2 - front;
+    } else {
+        rear &= mFrameCountP2 - 1;
+        part1 = mFrameCountP2 - rear;
+    }
+    if (part1 > availToServer) {
+        part1 = availToServer;
+    }
+    size_t ask = buffer->mFrameCount;
+    if (part1 > ask) {
+        part1 = ask;
+    }
+    // is assignment redundant in some cases?
+    buffer->mFrameCount = part1;
+    buffer->mRaw = part1 > 0 ?
+            &((char *) mBuffers)[(mIsOut ? front : rear) * mFrameSize] : NULL;
+    buffer->mNonContig = availToServer - part1;
+    mUnreleased = part1;
+    // optimization to avoid waking up the client too early
+    // FIXME need to test for recording
+    mDeferWake = part1 < ask && availToServer >= ask;
+    return part1 > 0 ? NO_ERROR : WOULD_BLOCK;
+    }
+no_init:
+    buffer->mFrameCount = 0;
+    buffer->mRaw = NULL;
+    buffer->mNonContig = 0;
+    mUnreleased = 0;
+    return NO_INIT;
+}
+
+void ServerProxy::releaseBuffer(Buffer* buffer)
+{
+    LOG_ALWAYS_FATAL_IF(buffer == NULL);
+    size_t stepCount = buffer->mFrameCount;
+    if (stepCount == 0 || mIsShutdown) {
+        // prevent accidental re-use of buffer
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        return;
+    }
+    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased && mUnreleased <= mFrameCount));
+    mUnreleased -= stepCount;
+    audio_track_cblk_t* cblk = mCblk;
+    if (mIsOut) {
+        int32_t front = cblk->u.mStreaming.mFront;
+        android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront);
+    } else {
+        int32_t rear = cblk->u.mStreaming.mRear;
+        android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear);
+    }
+
+    mCblk->mServer += stepCount;
+
+    size_t half = mFrameCount / 2;
+    if (half == 0) {
+        half = 1;
+    }
+    size_t minimum = cblk->mMinimum;
+    if (minimum == 0) {
+        minimum = mIsOut ? half : 1;
+    } else if (minimum > half) {
+        minimum = half;
+    }
+    // FIXME AudioRecord wakeup needs to be optimized; it currently wakes up client every time
+    if (!mIsOut || (!mDeferWake && mAvailToClient + stepCount >= minimum)) {
+        ALOGV("mAvailToClient=%u stepCount=%u minimum=%u", mAvailToClient, stepCount, minimum);
+        int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
+        if (!(old & CBLK_FUTEX_WAKE)) {
+            (void) __futex_syscall3(&cblk->mFutex,
+                    mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+        }
+    }
+
+    buffer->mFrameCount = 0;
+    buffer->mRaw = NULL;
+    buffer->mNonContig = 0;
+}
+
+// ---------------------------------------------------------------------------
+
+size_t AudioTrackServerProxy::framesReady()
+{
+    LOG_ALWAYS_FATAL_IF(!mIsOut);
+
+    if (mIsShutdown) {
+        return 0;
+    }
+    audio_track_cblk_t* cblk = mCblk;
+
+    int32_t flush = cblk->u.mStreaming.mFlush;
+    if (flush != mFlush) {
+        return mFrameCount;
+    }
+    // the acquire might not be necessary since not doing a subsequent read
+    int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+    ssize_t filled = rear - cblk->u.mStreaming.mFront;
+    // pipe should not already be overfull
+    if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+        ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+        mIsShutdown = true;
+        return 0;
+    }
+    //  cache this value for later use by obtainBuffer(), with added barrier
+    //  and racy if called by normal mixer thread
+    // ignores flush(), so framesReady() may report a larger mFrameCount than obtainBuffer()
+    return filled;
+}
+
+bool  AudioTrackServerProxy::setStreamEndDone() {
+    bool old =
+            (android_atomic_or(CBLK_STREAM_END_DONE, &mCblk->mFlags) & CBLK_STREAM_END_DONE) != 0;
+    if (!old) {
+        (void) __futex_syscall3(&mCblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
+                1);
+    }
+    return old;
+}
+
+void AudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
+{
+    mCblk->u.mStreaming.mUnderrunFrames += frameCount;
+
+    // FIXME also wake futex so that underrun is noticed more quickly
+    (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->mFlags);
+}
+
+// ---------------------------------------------------------------------------
+
+StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
+        size_t frameCount, size_t frameSize)
+    : AudioTrackServerProxy(cblk, buffers, frameCount, frameSize),
+      mObserver(&cblk->u.mStatic.mSingleStateQueue), mPosition(0),
+      mEnd(frameCount), mFramesReadyIsCalledByMultipleThreads(false)
+{
+    mState.mLoopStart = 0;
+    mState.mLoopEnd = 0;
+    mState.mLoopCount = 0;
+}
+
+void StaticAudioTrackServerProxy::framesReadyIsCalledByMultipleThreads()
+{
+    mFramesReadyIsCalledByMultipleThreads = true;
+}
+
+size_t StaticAudioTrackServerProxy::framesReady()
+{
+    // FIXME
+    // This is racy if called by normal mixer thread,
+    // as we're reading 2 independent variables without a lock.
+    // Can't call mObserver.poll(), as we might be called from wrong thread.
+    // If looping is enabled, should return a higher number (since includes non-contiguous).
+    size_t position = mPosition;
+    if (!mFramesReadyIsCalledByMultipleThreads) {
+        ssize_t positionOrStatus = pollPosition();
+        if (positionOrStatus >= 0) {
+            position = (size_t) positionOrStatus;
+        }
+    }
+    size_t end = mEnd;
+    return position < end ? end - position : 0;
+}
+
+ssize_t StaticAudioTrackServerProxy::pollPosition()
+{
+    size_t position = mPosition;
+    StaticAudioTrackState state;
+    if (mObserver.poll(state)) {
+        bool valid = false;
+        size_t loopStart = state.mLoopStart;
+        size_t loopEnd = state.mLoopEnd;
+        if (state.mLoopCount == 0) {
+            if (loopStart > mFrameCount) {
+                loopStart = mFrameCount;
+            }
+            // ignore loopEnd
+            mPosition = position = loopStart;
+            mEnd = mFrameCount;
+            mState.mLoopCount = 0;
+            valid = true;
+        } else {
+            if (loopStart < loopEnd && loopEnd <= mFrameCount &&
+                    loopEnd - loopStart >= MIN_LOOP) {
+                if (!(loopStart <= position && position < loopEnd)) {
+                    mPosition = position = loopStart;
+                }
+                mEnd = loopEnd;
+                mState = state;
+                valid = true;
+            }
+        }
+        if (!valid) {
+            ALOGE("%s client pushed an invalid state, shutting down", __func__);
+            mIsShutdown = true;
+            return (ssize_t) NO_INIT;
+        }
+        mCblk->u.mStatic.mBufferPosition = position;
+    }
+    return (ssize_t) position;
+}
+
+status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer)
+{
+    if (mIsShutdown) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        mUnreleased = 0;
+        return NO_INIT;
+    }
+    ssize_t positionOrStatus = pollPosition();
+    if (positionOrStatus < 0) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        mUnreleased = 0;
+        return (status_t) positionOrStatus;
+    }
+    size_t position = (size_t) positionOrStatus;
+    size_t avail;
+    if (position < mEnd) {
+        avail = mEnd - position;
+        size_t wanted = buffer->mFrameCount;
+        if (avail < wanted) {
+            buffer->mFrameCount = avail;
+        } else {
+            avail = wanted;
+        }
+        buffer->mRaw = &((char *) mBuffers)[position * mFrameSize];
+    } else {
+        avail = 0;
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+    }
+    buffer->mNonContig = 0;     // FIXME should be > 0 for looping
+    mUnreleased = avail;
+    return NO_ERROR;
+}
+
+void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
+{
+    size_t stepCount = buffer->mFrameCount;
+    LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased));
+    if (stepCount == 0) {
+        // prevent accidental re-use of buffer
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        return;
+    }
+    mUnreleased -= stepCount;
+    audio_track_cblk_t* cblk = mCblk;
+    size_t position = mPosition;
+    size_t newPosition = position + stepCount;
+    int32_t setFlags = 0;
+    if (!(position <= newPosition && newPosition <= mFrameCount)) {
+        ALOGW("%s newPosition %u outside [%u, %u]", __func__, newPosition, position, mFrameCount);
+        newPosition = mFrameCount;
+    } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {
+        if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) {
+            newPosition = mState.mLoopStart;
+            setFlags = CBLK_LOOP_CYCLE;
+        } else {
+            mEnd = mFrameCount;     // this is what allows playback to continue after the loop
+            setFlags = CBLK_LOOP_FINAL;
+        }
+    }
+    if (newPosition == mFrameCount) {
+        setFlags |= CBLK_BUFFER_END;
+    }
+    mPosition = newPosition;
+
+    cblk->mServer += stepCount;
+    cblk->u.mStatic.mBufferPosition = newPosition;
+    if (setFlags != 0) {
+        (void) android_atomic_or(setFlags, &cblk->mFlags);
+        // this would be a good place to wake a futex
+    }
+
+    buffer->mFrameCount = 0;
+    buffer->mRaw = NULL;
+    buffer->mNonContig = 0;
+}
+
+void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
+{
+    // Unlike AudioTrackServerProxy::tallyUnderrunFrames() used for streaming tracks,
+    // we don't have a location to count underrun frames.  The underrun frame counter
+    // only exists in AudioTrackSharedStreaming.  Fortunately, underruns are not
+    // possible for static buffer tracks other than at end of buffer, so this is not a loss.
+
+    // FIXME also wake futex so that underrun is noticed more quickly
+    (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->mFlags);
+}
+
+// ---------------------------------------------------------------------------
+
 }   // namespace android
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 2f18680..be818c6 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -73,6 +73,7 @@
     LOAD_HW_MODULE,
     GET_PRIMARY_OUTPUT_SAMPLING_RATE,
     GET_PRIMARY_OUTPUT_FRAME_COUNT,
+    SET_LOW_RAM_DEVICE,
 };
 
 class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -94,6 +95,7 @@
                                 audio_io_handle_t output,
                                 pid_t tid,
                                 int *sessionId,
+                                String8& name,
                                 status_t *status)
     {
         Parcel data, reply;
@@ -126,6 +128,7 @@
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
             }
+            name = reply.readString8();
             lStatus = reply.readInt32();
             track = interface_cast<IAudioTrack>(reply.readStrongBinder());
         }
@@ -141,7 +144,7 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
-                                track_flags_t flags,
+                                track_flags_t *flags,
                                 pid_t tid,
                                 int *sessionId,
                                 status_t *status)
@@ -154,7 +157,8 @@
         data.writeInt32(format);
         data.writeInt32(channelMask);
         data.writeInt32(frameCount);
-        data.writeInt32(flags);
+        track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
+        data.writeInt32(lFlags);
         data.writeInt32((int32_t) tid);
         int lSessionId = 0;
         if (sessionId != NULL) {
@@ -165,6 +169,10 @@
         if (lStatus != NO_ERROR) {
             ALOGE("openRecord error: %s", strerror(-lStatus));
         } else {
+            lFlags = reply.readInt32();
+            if (flags != NULL) {
+                *flags = lFlags;
+            }
             lSessionId = reply.readInt32();
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
@@ -361,15 +369,16 @@
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask,
                                          uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags)
+                                         audio_output_flags_t flags,
+                                         const audio_offload_info_t *offloadInfo)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices ? *pDevices : (audio_devices_t)0;
-        uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-        audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
-        audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : (audio_channel_mask_t)0;
-        uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
-
+        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
+        audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
+        audio_channel_mask_t channelMask = pChannelMask != NULL ?
+                *pChannelMask : (audio_channel_mask_t)0;
+        uint32_t latency = pLatencyMs != NULL ? *pLatencyMs : 0;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
         data.writeInt32(devices);
@@ -378,19 +387,25 @@
         data.writeInt32(channelMask);
         data.writeInt32(latency);
         data.writeInt32((int32_t) flags);
+        if (offloadInfo == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.write(offloadInfo, sizeof(audio_offload_info_t));
+        }
         remote()->transact(OPEN_OUTPUT, data, &reply);
         audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
         ALOGV("openOutput() returned output, %d", output);
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices) *pDevices = devices;
+        if (pDevices != NULL) *pDevices = devices;
         samplingRate = reply.readInt32();
-        if (pSamplingRate) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
         format = (audio_format_t) reply.readInt32();
-        if (pFormat) *pFormat = format;
+        if (pFormat != NULL) *pFormat = format;
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) *pChannelMask = channelMask;
         latency = reply.readInt32();
-        if (pLatencyMs) *pLatencyMs = latency;
+        if (pLatencyMs != NULL) *pLatencyMs = latency;
         return output;
     }
 
@@ -439,10 +454,11 @@
                                         audio_channel_mask_t *pChannelMask)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices ? *pDevices : (audio_devices_t)0;
-        uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-        audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
-        audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : (audio_channel_mask_t)0;
+        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
+        audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
+        audio_channel_mask_t channelMask = pChannelMask != NULL ?
+                *pChannelMask : (audio_channel_mask_t)0;
 
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
@@ -453,13 +469,13 @@
         remote()->transact(OPEN_INPUT, data, &reply);
         audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = (audio_devices_t)reply.readInt32();
-        if (pDevices) *pDevices = devices;
+        if (pDevices != NULL) *pDevices = devices;
         samplingRate = reply.readInt32();
-        if (pSamplingRate) *pSamplingRate = samplingRate;
+        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
         format = (audio_format_t) reply.readInt32();
-        if (pFormat) *pFormat = format;
+        if (pFormat != NULL) *pFormat = format;
         channelMask = (audio_channel_mask_t)reply.readInt32();
-        if (pChannelMask) *pChannelMask = channelMask;
+        if (pChannelMask != NULL) *pChannelMask = channelMask;
         return input;
     }
 
@@ -695,6 +711,15 @@
         return reply.readInt32();
     }
 
+    virtual status_t setLowRamDevice(bool isLowRamDevice)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+        data.writeInt32((int) isLowRamDevice);
+        remote()->transact(SET_LOW_RAM_DEVICE, data, &reply);
+        return reply.readInt32();
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -717,12 +742,14 @@
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
             int sessionId = data.readInt32();
+            String8 name;
             status_t status;
             sp<IAudioTrack> track = createTrack(
                     (audio_stream_type_t) streamType, sampleRate, format,
-                    channelMask, frameCount, &flags, buffer, output, tid, &sessionId, &status);
+                    channelMask, frameCount, &flags, buffer, output, tid, &sessionId, name, &status);
             reply->writeInt32(flags);
             reply->writeInt32(sessionId);
+            reply->writeString8(name);
             reply->writeInt32(status);
             reply->writeStrongBinder(track->asBinder());
             return NO_ERROR;
@@ -739,7 +766,8 @@
             int sessionId = data.readInt32();
             status_t status;
             sp<IAudioRecord> record = openRecord(input,
-                    sampleRate, format, channelMask, frameCount, flags, tid, &sessionId, &status);
+                    sampleRate, format, channelMask, frameCount, &flags, tid, &sessionId, &status);
+            reply->writeInt32(flags);
             reply->writeInt32(sessionId);
             reply->writeInt32(status);
             reply->writeStrongBinder(record->asBinder());
@@ -868,13 +896,19 @@
             audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
             uint32_t latency = data.readInt32();
             audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
+            bool hasOffloadInfo = data.readInt32() != 0;
+            audio_offload_info_t offloadInfo;
+            if (hasOffloadInfo) {
+                data.read(&offloadInfo, sizeof(audio_offload_info_t));
+            }
             audio_io_handle_t output = openOutput(module,
                                                  &devices,
                                                  &samplingRate,
                                                  &format,
                                                  &channelMask,
                                                  &latency,
-                                                 flags);
+                                                 flags,
+                                                 hasOffloadInfo ? &offloadInfo : NULL);
             ALOGV("OPEN_OUTPUT output, %p", output);
             reply->writeInt32((int32_t) output);
             reply->writeInt32(devices);
@@ -1056,6 +1090,12 @@
             reply->writeInt32(getPrimaryOutputFrameCount());
             return NO_ERROR;
         } break;
+        case SET_LOW_RAM_DEVICE: {
+            CHECK_INTERFACE(IAudioFlinger, data, reply);
+            bool isLowRamDevice = data.readInt32() != 0;
+            reply->writeInt32(setLowRamDevice(isLowRamDevice));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 2d1e0f8..3c0d4cf 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -54,7 +54,7 @@
                     (const AudioSystem::OutputDescriptor *)param2;
             data.writeInt32(desc->samplingRate);
             data.writeInt32(desc->format);
-            data.writeInt32(desc->channels);
+            data.writeInt32(desc->channelMask);
             data.writeInt32(desc->frameCount);
             data.writeInt32(desc->latency);
         }
@@ -83,8 +83,8 @@
                 ALOGV("STREAM_CONFIG_CHANGED stream %d", stream);
             } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
                 desc.samplingRate = data.readInt32();
-                desc.format = data.readInt32();
-                desc.channels = data.readInt32();
+                desc.format = (audio_format_t) data.readInt32();
+                desc.channelMask = (audio_channel_mask_t) data.readInt32();
                 desc.frameCount = data.readInt32();
                 desc.latency = data.readInt32();
                 param2 = &desc;
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 386c351..4be3c09 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -56,7 +56,8 @@
     GET_DEVICES_FOR_STREAM,
     QUERY_DEFAULT_PRE_PROCESSING,
     SET_EFFECT_ENABLED,
-    IS_STREAM_ACTIVE_REMOTELY
+    IS_STREAM_ACTIVE_REMOTELY,
+    IS_OFFLOAD_SUPPORTED
 };
 
 class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -126,7 +127,8 @@
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
-                                        audio_output_flags_t flags)
+                                        audio_output_flags_t flags,
+                                        const audio_offload_info_t *offloadInfo)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -135,6 +137,12 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(static_cast <uint32_t>(flags));
+        if (offloadInfo == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.write(offloadInfo, sizeof(audio_offload_info_t));
+        }
         remote()->transact(GET_OUTPUT, data, &reply);
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
@@ -374,6 +382,14 @@
         *count = retCount;
         return status;
     }
+
+    virtual bool isOffloadSupported(const audio_offload_info_t& info)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(&info, sizeof(audio_offload_info_t));
+        remote()->transact(IS_OFFLOAD_SUPPORTED, data, &reply);
+        return reply.readInt32();    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -442,12 +458,17 @@
             audio_channel_mask_t channelMask = data.readInt32();
             audio_output_flags_t flags =
                     static_cast <audio_output_flags_t>(data.readInt32());
-
+            bool hasOffloadInfo = data.readInt32() != 0;
+            audio_offload_info_t offloadInfo;
+            if (hasOffloadInfo) {
+                data.read(&offloadInfo, sizeof(audio_offload_info_t));
+            }
             audio_io_handle_t output = getOutput(stream,
                                                  samplingRate,
                                                  format,
                                                  channelMask,
-                                                 flags);
+                                                 flags,
+                                                 hasOffloadInfo ? &offloadInfo : NULL);
             reply->writeInt32(static_cast <int>(output));
             return NO_ERROR;
         } break;
@@ -654,6 +675,15 @@
             return status;
         }
 
+        case IS_OFFLOAD_SUPPORTED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_offload_info_t info;
+            data.read(&info, sizeof(audio_offload_info_t));
+            bool isSupported = isOffloadSupported(info);
+            reply->writeInt32(isSupported);
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 0d06e98..4a7de65 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -42,6 +42,18 @@
     {
     }
 
+    virtual sp<IMemory> getCblk() const
+    {
+        Parcel data, reply;
+        sp<IMemory> cblk;
+        data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_CBLK, data, &reply);
+        if (status == NO_ERROR) {
+            cblk = interface_cast<IMemory>(reply.readStrongBinder());
+        }
+        return cblk;
+    }
+
     virtual status_t start(int /*AudioSystem::sync_event_t*/ event, int triggerSession)
     {
         Parcel data, reply;
@@ -64,17 +76,6 @@
         remote()->transact(STOP, data, &reply);
     }
 
-    virtual sp<IMemory> getCblk() const
-    {
-        Parcel data, reply;
-        sp<IMemory> cblk;
-        data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_CBLK, data, &reply);
-        if (status == NO_ERROR) {
-            cblk = interface_cast<IMemory>(reply.readStrongBinder());
-        }
-        return cblk;
-    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioRecord, "android.media.IAudioRecord");
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index e92f8aa..a2b49a3 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -39,6 +39,7 @@
     ALLOCATE_TIMED_BUFFER,
     QUEUE_TIMED_BUFFER,
     SET_MEDIA_TIME_TRANSFORM,
+    SET_PARAMETERS
 };
 
 class BpAudioTrack : public BpInterface<IAudioTrack>
@@ -154,6 +155,17 @@
         }
         return status;
     }
+
+    virtual status_t setParameters(const String8& keyValuePairs) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeString8(keyValuePairs);
+        status_t status = remote()->transact(SET_PARAMETERS, data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
@@ -223,6 +235,12 @@
             reply->writeInt32(setMediaTimeTransform(xform, target));
             return NO_ERROR;
         } break;
+        case SET_PARAMETERS: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            String8 keyValuePairs(data.readString8());
+            reply->writeInt32(setParameters(keyValuePairs));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IHDCP.cpp b/media/libmedia/IHDCP.cpp
index f13addc..a46ff91 100644
--- a/media/libmedia/IHDCP.cpp
+++ b/media/libmedia/IHDCP.cpp
@@ -31,6 +31,7 @@
     HDCP_INIT_ASYNC,
     HDCP_SHUTDOWN_ASYNC,
     HDCP_ENCRYPT,
+    HDCP_ENCRYPT_NATIVE,
     HDCP_DECRYPT,
 };
 
@@ -108,6 +109,31 @@
         return err;
     }
 
+    virtual status_t encryptNative(
+            const sp<GraphicBuffer> &graphicBuffer,
+            size_t offset, size_t size, uint32_t streamCTR,
+            uint64_t *outInputCTR, void *outData) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
+        data.write(*graphicBuffer);
+        data.writeInt32(offset);
+        data.writeInt32(size);
+        data.writeInt32(streamCTR);
+        remote()->transact(HDCP_ENCRYPT_NATIVE, data, &reply);
+
+        status_t err = reply.readInt32();
+
+        if (err != OK) {
+            *outInputCTR = 0;
+            return err;
+        }
+
+        *outInputCTR = reply.readInt64();
+        reply.read(outData, size);
+
+        return err;
+    }
+
     virtual status_t decrypt(
             const void *inData, size_t size,
             uint32_t streamCTR, uint64_t inputCTR,
@@ -222,6 +248,34 @@
             return OK;
         }
 
+        case HDCP_ENCRYPT_NATIVE:
+        {
+            CHECK_INTERFACE(IHDCP, data, reply);
+
+            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+            data.read(*graphicBuffer);
+            size_t offset = data.readInt32();
+            size_t size = data.readInt32();
+            uint32_t streamCTR = data.readInt32();
+            void *outData = malloc(size);
+            uint64_t inputCTR;
+
+            status_t err = encryptNative(graphicBuffer, offset, size,
+                                         streamCTR, &inputCTR, outData);
+
+            reply->writeInt32(err);
+
+            if (err == OK) {
+                reply->writeInt64(inputCTR);
+                reply->write(outData, size);
+            }
+
+            free(outData);
+            outData = NULL;
+
+            return OK;
+        }
+
         case HDCP_DECRYPT:
         {
             size_t size = data.readInt32();
diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp
index 9199db6..9db5b1b 100644
--- a/media/libmedia/IMediaDeathNotifier.cpp
+++ b/media/libmedia/IMediaDeathNotifier.cpp
@@ -49,10 +49,10 @@
         } while (true);
 
         if (sDeathNotifier == NULL) {
-        sDeathNotifier = new DeathNotifier();
-    }
-    binder->linkToDeath(sDeathNotifier);
-    sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
+            sDeathNotifier = new DeathNotifier();
+        }
+        binder->linkToDeath(sDeathNotifier);
+        sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
     }
     ALOGE_IF(sMediaPlayerService == 0, "no media player service!?");
     return sMediaPlayerService;
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index d6cd43a..ef99f4f 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -51,6 +51,8 @@
     GET_EXTENSION_INDEX,
     OBSERVER_ON_MSG,
     GET_GRAPHIC_BUFFER_USAGE,
+    SET_INTERNAL_OPTION,
+    UPDATE_GRAPHIC_BUFFER_IN_META,
 };
 
 class BpOMX : public BpInterface<IOMX> {
@@ -282,6 +284,21 @@
         return err;
     }
 
+    virtual status_t updateGraphicBufferInMeta(
+            node_id node, OMX_U32 port_index,
+            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        data.writeIntPtr((intptr_t)node);
+        data.writeInt32(port_index);
+        data.write(*graphicBuffer);
+        data.writeIntPtr((intptr_t)buffer);
+        remote()->transact(UPDATE_GRAPHIC_BUFFER_IN_META, data, &reply);
+
+        status_t err = reply.readInt32();
+        return err;
+    }
+
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
             sp<IGraphicBufferProducer> *bufferProducer) {
@@ -439,6 +456,24 @@
 
         return err;
     }
+
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *optionData,
+            size_t size) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        data.writeIntPtr((intptr_t)node);
+        data.writeInt32(port_index);
+        data.writeInt32(size);
+        data.write(optionData, size);
+        data.writeInt32(type);
+        remote()->transact(SET_INTERNAL_OPTION, data, &reply);
+
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
@@ -537,6 +572,7 @@
         case SET_PARAMETER:
         case GET_CONFIG:
         case SET_CONFIG:
+        case SET_INTERNAL_OPTION:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
 
@@ -562,6 +598,15 @@
                 case SET_CONFIG:
                     err = setConfig(node, index, params, size);
                     break;
+                case SET_INTERNAL_OPTION:
+                {
+                    InternalOptionType type =
+                        (InternalOptionType)data.readInt32();
+
+                    err = setInternalOption(node, index, type, params, size);
+                    break;
+                }
+
                 default:
                     TRESPASS();
             }
@@ -662,6 +707,23 @@
             return NO_ERROR;
         }
 
+        case UPDATE_GRAPHIC_BUFFER_IN_META:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+            node_id node = (void*)data.readIntPtr();
+            OMX_U32 port_index = data.readInt32();
+            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+            data.read(*graphicBuffer);
+            buffer_id buffer = (void*)data.readIntPtr();
+
+            status_t err = updateGraphicBufferInMeta(
+                    node, port_index, graphicBuffer, buffer);
+            reply->writeInt32(err);
+
+            return NO_ERROR;
+        }
+
         case CREATE_INPUT_SURFACE:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 59e538f..e914b34 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -18,8 +18,6 @@
 #define LOG_TAG "JetPlayer-C"
 
 #include <utils/Log.h>
-#include <utils/threads.h>
-
 #include <media/JetPlayer.h>
 
 
@@ -39,7 +37,6 @@
         mMaxTracks(maxTracks),
         mEasData(NULL),
         mEasJetFileLoc(NULL),
-        mAudioTrack(NULL),
         mTrackBufferSize(trackBufferSize)
 {
     ALOGV("JetPlayer constructor");
@@ -140,11 +137,10 @@
         free(mEasJetFileLoc);
         mEasJetFileLoc = NULL;
     }
-    if (mAudioTrack) {
+    if (mAudioTrack != 0) {
         mAudioTrack->stop();
         mAudioTrack->flush();
-        delete mAudioTrack;
-        mAudioTrack = NULL;
+        mAudioTrack.clear();
     }
     if (mAudioBuffer) {
         delete mAudioBuffer;
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index e1e3348..93a4a4c 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -16,7 +16,7 @@
 
 #include <media/mediascanner.h>
 
-#include <utils/StringArray.h>
+#include "StringArray.h"
 
 #include "autodetect.h"
 #include "unicode/ucnv.h"
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index ee70ef7..7f10e05 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -20,14 +20,8 @@
 
 //#define USE_SHARED_MEM_BUFFER
 
-// XXX needed for timing latency
-#include <utils/Timers.h>
-
 #include <media/AudioTrack.h>
 #include <media/mediaplayer.h>
-
-#include <system/audio.h>
-
 #include <media/SoundPool.h>
 #include "SoundPoolThread.h"
 
@@ -547,8 +541,8 @@
 void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftVolume,
         float rightVolume, int priority, int loop, float rate)
 {
-    AudioTrack* oldTrack;
-    AudioTrack* newTrack;
+    sp<AudioTrack> oldTrack;
+    sp<AudioTrack> newTrack;
     status_t status;
 
     { // scope for the lock
@@ -620,7 +614,7 @@
             ALOGE("Error creating AudioTrack");
             goto exit;
         }
-        ALOGV("setVolume %p", newTrack);
+        ALOGV("setVolume %p", newTrack.get());
         newTrack->setVolume(leftVolume, rightVolume);
         newTrack->setLoop(0, frameCount, loop);
 
@@ -643,11 +637,9 @@
     }
 
 exit:
-    ALOGV("delete oldTrack %p", oldTrack);
-    delete oldTrack;
+    ALOGV("delete oldTrack %p", oldTrack.get());
     if (status != NO_ERROR) {
-        delete newTrack;
-        mAudioTrack = NULL;
+        mAudioTrack.clear();
     }
 }
 
@@ -884,7 +876,7 @@
     }
     // do not call AudioTrack destructor with mLock held as it will wait for the AudioTrack
     // callback thread to exit which may need to execute process() and acquire the mLock.
-    delete mAudioTrack;
+    mAudioTrack.clear();
 }
 
 void SoundChannel::dump()
diff --git a/media/libmedia/StringArray.cpp b/media/libmedia/StringArray.cpp
new file mode 100644
index 0000000..5f5b57a
--- /dev/null
+++ b/media/libmedia/StringArray.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Sortable array of strings.  STL-ish, but STL-free.
+//  
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "StringArray.h"
+
+namespace android {
+
+//
+// An expanding array of strings.  Add, get, sort, delete.
+//
+StringArray::StringArray()
+    : mMax(0), mCurrent(0), mArray(NULL)
+{
+}
+
+StringArray:: ~StringArray() {
+    for (int i = 0; i < mCurrent; i++)
+        delete[] mArray[i];
+    delete[] mArray;
+}
+
+//
+// Add a string.  A copy of the string is made.
+//
+bool StringArray::push_back(const char* str) {
+    if (mCurrent >= mMax) {
+        char** tmp;
+
+        if (mMax == 0)
+            mMax = 16;      // initial storage
+        else
+            mMax *= 2;
+
+        tmp = new char*[mMax];
+        if (tmp == NULL)
+            return false;
+
+        memcpy(tmp, mArray, mCurrent * sizeof(char*));
+        delete[] mArray;
+        mArray = tmp;
+    }
+
+    int len = strlen(str);
+    mArray[mCurrent] = new char[len+1];
+    memcpy(mArray[mCurrent], str, len+1);
+    mCurrent++;
+
+    return true;
+}
+
+//
+// Delete an entry.
+//
+void StringArray::erase(int idx) {
+    if (idx < 0 || idx >= mCurrent)
+        return;
+    delete[] mArray[idx];
+    if (idx < mCurrent-1) {
+        memmove(&mArray[idx], &mArray[idx+1],
+                (mCurrent-1 - idx) * sizeof(char*));
+    }
+    mCurrent--;
+}
+
+//
+// Sort the array.
+//
+void StringArray::sort(int (*compare)(const void*, const void*)) {
+    qsort(mArray, mCurrent, sizeof(char*), compare);
+}
+
+//
+// Pass this to the sort routine to do an ascending alphabetical sort.
+//
+int StringArray::cmpAscendingAlpha(const void* pstr1, const void* pstr2) {
+    return strcmp(*(const char**)pstr1, *(const char**)pstr2);
+}
+
+//
+// Set entry N to specified string.
+// [should use operator[] here]
+//
+void StringArray::setEntry(int idx, const char* str) {
+    if (idx < 0 || idx >= mCurrent)
+        return;
+    delete[] mArray[idx];
+    int len = strlen(str);
+    mArray[idx] = new char[len+1];
+    memcpy(mArray[idx], str, len+1);
+}
+
+
+}; // namespace android
diff --git a/media/libmedia/StringArray.h b/media/libmedia/StringArray.h
new file mode 100644
index 0000000..ae47085
--- /dev/null
+++ b/media/libmedia/StringArray.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Sortable array of strings.  STL-ish, but STL-free.
+//  
+#ifndef _LIBS_MEDIA_STRING_ARRAY_H
+#define _LIBS_MEDIA_STRING_ARRAY_H
+
+#include <stdlib.h>
+#include <string.h>
+
+namespace android {
+
+//
+// An expanding array of strings.  Add, get, sort, delete.
+//
+class StringArray {
+public:
+    StringArray();
+    virtual ~StringArray();
+
+    //
+    // Add a string.  A copy of the string is made.
+    //
+    bool push_back(const char* str);
+
+    //
+    // Delete an entry.
+    //
+    void erase(int idx);
+
+    //
+    // Sort the array.
+    //
+    void sort(int (*compare)(const void*, const void*));
+
+    //
+    // Pass this to the sort routine to do an ascending alphabetical sort.
+    //
+    static int cmpAscendingAlpha(const void* pstr1, const void* pstr2);
+
+    //
+    // Get the #of items in the array.
+    //
+    inline int size(void) const { return mCurrent; }
+
+    //
+    // Return entry N.
+    // [should use operator[] here]
+    //
+    const char* getEntry(int idx) const {
+        return (unsigned(idx) >= unsigned(mCurrent)) ? NULL : mArray[idx];
+    }
+
+    //
+    // Set entry N to specified string.
+    // [should use operator[] here]
+    //
+    void setEntry(int idx, const char* str);
+
+private:
+    int     mMax;
+    int     mCurrent;
+    char**  mArray;
+};
+
+}; // namespace android
+
+#endif // _LIBS_MEDIA_STRING_ARRAY_H
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index f55b697..adef3be 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -16,13 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ToneGenerator"
-#include <utils/threads.h>
 
-#include <stdio.h>
 #include <math.h>
 #include <utils/Log.h>
-#include <utils/RefBase.h>
-#include <utils/Timers.h>
 #include <cutils/properties.h>
 #include "media/ToneGenerator.h"
 
@@ -803,7 +799,6 @@
     ALOGV("ToneGenerator constructor: streamType=%d, volume=%f", streamType, volume);
 
     mState = TONE_IDLE;
-    mpAudioTrack = NULL;
 
     if (AudioSystem::getOutputSamplingRate(&mSamplingRate, streamType) != NO_ERROR) {
         ALOGE("Unable to marshal AudioFlinger");
@@ -855,10 +850,10 @@
 ToneGenerator::~ToneGenerator() {
     ALOGV("ToneGenerator destructor");
 
-    if (mpAudioTrack != NULL) {
+    if (mpAudioTrack != 0) {
         stopTone();
-        ALOGV("Delete Track: %p", mpAudioTrack);
-        delete mpAudioTrack;
+        ALOGV("Delete Track: %p", mpAudioTrack.get());
+        mpAudioTrack.clear();
     }
 }
 
@@ -1047,14 +1042,9 @@
 ////////////////////////////////////////////////////////////////////////////////
 bool ToneGenerator::initAudioTrack() {
 
-    if (mpAudioTrack) {
-        delete mpAudioTrack;
-        mpAudioTrack = NULL;
-    }
-
     // Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
     mpAudioTrack = new AudioTrack();
-    ALOGV("Create Track: %p", mpAudioTrack);
+    ALOGV("Create Track: %p", mpAudioTrack.get());
 
     mpAudioTrack->set(mStreamType,
                       0,    // sampleRate
@@ -1066,7 +1056,9 @@
                       this, // user
                       0,    // notificationFrames
                       0,    // sharedBuffer
-                      mThreadCanCallJava);
+                      mThreadCanCallJava,
+                      0,    // sessionId
+                      AudioTrack::TRANSFER_CALLBACK);
 
     if (mpAudioTrack->initCheck() != NO_ERROR) {
         ALOGE("AudioTrack->initCheck failed");
@@ -1081,12 +1073,10 @@
 
 initAudioTrack_exit:
 
+    ALOGV("Init failed: %p", mpAudioTrack.get());
+
     // Cleanup
-    if (mpAudioTrack != NULL) {
-        ALOGV("Delete Track I: %p", mpAudioTrack);
-        delete mpAudioTrack;
-        mpAudioTrack = NULL;
-    }
+    mpAudioTrack.clear();
 
     return false;
 }
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 5b4071b..e519f13 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -28,6 +28,7 @@
 
 #include <media/Visualizer.h>
 #include <audio_utils/fixedfft.h>
+#include <utils/Thread.h>
 
 namespace android {
 
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 963b04f..056cc0a 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -811,6 +811,13 @@
     if (mPlayer == NULL) {
         return NO_INIT;
     }
+
+    if (next != NULL && !(next->mCurrentState &
+            (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE))) {
+        ALOGE("next player is not prepared");
+        return INVALID_OPERATION;
+    }
+
     return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
 }
 
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index d87bc7f..8f21632 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -34,6 +34,7 @@
     libsonivox                  \
     libstagefright              \
     libstagefright_foundation   \
+    libstagefright_httplive     \
     libstagefright_omx          \
     libstagefright_wfd          \
     libutils                    \
diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp
index ae4d845..62593b2 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/media/libmediaplayerservice/Crypto.cpp
@@ -134,7 +134,6 @@
         return;
     }
 
-    ALOGE("Failed to find crypto plugin");
     mInitCheck = ERROR_UNSUPPORTED;
 }
 
@@ -151,6 +150,7 @@
     if (!mLibrary.get()) {
         mLibrary = new SharedLibrary(path);
         if (!*mLibrary) {
+            ALOGE("loadLibraryForScheme failed:%s", mLibrary->lastError());
             return false;
         }
 
@@ -165,6 +165,7 @@
     if (createCryptoFactory == NULL ||
         (mFactory = createCryptoFactory()) == NULL ||
         !mFactory->isCryptoSchemeSupported(uuid)) {
+        ALOGE("createCryptoFactory failed:%s", mLibrary->lastError());
         closeFactory();
         return false;
     }
diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp
index 469a02e..8a3188c 100644
--- a/media/libmediaplayerservice/HDCP.cpp
+++ b/media/libmediaplayerservice/HDCP.cpp
@@ -116,6 +116,24 @@
     return mHDCPModule->encrypt(inData, size, streamCTR, outInputCTR, outData);
 }
 
+status_t HDCP::encryptNative(
+        const sp<GraphicBuffer> &graphicBuffer,
+        size_t offset, size_t size, uint32_t streamCTR,
+        uint64_t *outInputCTR, void *outData) {
+    Mutex::Autolock autoLock(mLock);
+
+    CHECK(mIsEncryptionModule);
+
+    if (mHDCPModule == NULL) {
+        *outInputCTR = 0;
+
+        return NO_INIT;
+    }
+
+    return mHDCPModule->encryptNative(graphicBuffer->handle,
+                    offset, size, streamCTR, outInputCTR, outData);
+}
+
 status_t HDCP::decrypt(
         const void *inData, size_t size,
         uint32_t streamCTR, uint64_t outInputCTR, void *outData) {
diff --git a/media/libmediaplayerservice/HDCP.h b/media/libmediaplayerservice/HDCP.h
index 42e6467..c60c2e0 100644
--- a/media/libmediaplayerservice/HDCP.h
+++ b/media/libmediaplayerservice/HDCP.h
@@ -35,6 +35,11 @@
             const void *inData, size_t size, uint32_t streamCTR,
             uint64_t *outInputCTR, void *outData);
 
+    virtual status_t encryptNative(
+            const sp<GraphicBuffer> &graphicBuffer,
+            size_t offset, size_t size, uint32_t streamCTR,
+            uint64_t *outInputCTR, void *outData);
+
     virtual status_t decrypt(
             const void *inData, size_t size,
             uint32_t streamCTR, uint64_t outInputCTR, void *outData);
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 57ec7ea..8833bd7 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -53,6 +53,8 @@
 #include <media/AudioTrack.h>
 #include <media/MemoryLeakTrackUtil.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/AudioPlayer.h>
+#include <media/stagefright/foundation/ADebug.h>
 
 #include <system/audio.h>
 
@@ -1295,8 +1297,6 @@
       mSessionId(sessionId),
       mFlags(AUDIO_OUTPUT_FLAG_NONE) {
     ALOGV("AudioOutput(%d)", sessionId);
-    mTrack = 0;
-    mRecycledTrack = 0;
     mStreamType = AUDIO_STREAM_MUSIC;
     mLeftVolume = 1.0;
     mRightVolume = 1.0;
@@ -1311,7 +1311,6 @@
 MediaPlayerService::AudioOutput::~AudioOutput()
 {
     close();
-    delete mRecycledTrack;
     delete mCallbackData;
 }
 
@@ -1384,11 +1383,51 @@
     return OK;
 }
 
+status_t MediaPlayerService::AudioOutput::setParameters(const String8& keyValuePairs)
+{
+    if (mTrack == 0) return NO_INIT;
+    return mTrack->setParameters(keyValuePairs);
+}
+
+String8  MediaPlayerService::AudioOutput::getParameters(const String8& keys)
+{
+    if (mTrack == 0) return String8::empty();
+    return mTrack->getParameters(keys);
+}
+
+void MediaPlayerService::AudioOutput::deleteRecycledTrack()
+{
+    ALOGV("deleteRecycledTrack");
+
+    if (mRecycledTrack != 0) {
+
+        if (mCallbackData != NULL) {
+            mCallbackData->setOutput(NULL);
+            mCallbackData->endTrackSwitch();
+        }
+
+        if ((mRecycledTrack->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
+            mRecycledTrack->flush();
+        }
+        // An offloaded track isn't flushed because the STREAM_END is reported
+        // slightly prematurely to allow time for the gapless track switch
+        // but this means that if we decide not to recycle the track there
+        // could be a small amount of residual data still playing. We leave
+        // AudioFlinger to drain the track.
+
+        mRecycledTrack.clear();
+        delete mCallbackData;
+        mCallbackData = NULL;
+        close();
+    }
+}
+
 status_t MediaPlayerService::AudioOutput::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
         AudioCallback cb, void *cookie,
-        audio_output_flags_t flags)
+        audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo)
 {
     mCallback = cb;
     mCallbackCookie = cookie;
@@ -1399,20 +1438,34 @@
         bufferCount = mMinBufferCount;
 
     }
-    ALOGV("open(%u, %d, 0x%x, %d, %d, %d)", sampleRate, channelCount, channelMask,
-            format, bufferCount, mSessionId);
+    ALOGV("open(%u, %d, 0x%x, 0x%x, %d, %d 0x%x)", sampleRate, channelCount, channelMask,
+                format, bufferCount, mSessionId, flags);
     uint32_t afSampleRate;
     size_t afFrameCount;
     uint32_t frameCount;
 
-    if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
-        return NO_INIT;
-    }
-    if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
-        return NO_INIT;
+    // offloading is only supported in callback mode for now.
+    // offloadInfo must be present if offload flag is set
+    if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) &&
+            ((cb == NULL) || (offloadInfo == NULL))) {
+        return BAD_VALUE;
     }
 
-    frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        frameCount = 0; // AudioTrack will get frame count from AudioFlinger
+    } else {
+        uint32_t afSampleRate;
+        size_t afFrameCount;
+
+        if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
+            return NO_INIT;
+        }
+        if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
+            return NO_INIT;
+        }
+
+        frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+    }
 
     if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
         channelMask = audio_channel_out_mask_from_count(channelCount);
@@ -1422,90 +1475,127 @@
         }
     }
 
-    AudioTrack *t;
-    CallbackData *newcbd = NULL;
-    if (mCallback != NULL) {
-        newcbd = new CallbackData(this);
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags,
-                CallbackWrapper,
-                newcbd,
-                0,  // notification frames
-                mSessionId);
-    } else {
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags,
-                NULL,
-                NULL,
-                0,
-                mSessionId);
-    }
+    // Check whether we can recycle the track
+    bool reuse = false;
+    bool bothOffloaded = false;
 
-    if ((t == 0) || (t->initCheck() != NO_ERROR)) {
-        ALOGE("Unable to create audio track");
-        delete t;
-        delete newcbd;
-        return NO_INIT;
-    }
+    if (mRecycledTrack != 0) {
+        // check whether we are switching between two offloaded tracks
+        bothOffloaded = (flags & mRecycledTrack->getFlags()
+                                & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0;
 
-
-    if (mRecycledTrack) {
         // check if the existing track can be reused as-is, or if a new track needs to be created.
+        reuse = true;
 
-        bool reuse = true;
         if ((mCallbackData == NULL && mCallback != NULL) ||
                 (mCallbackData != NULL && mCallback == NULL)) {
             // recycled track uses callbacks but the caller wants to use writes, or vice versa
             ALOGV("can't chain callback and write");
             reuse = false;
         } else if ((mRecycledTrack->getSampleRate() != sampleRate) ||
-                (mRecycledTrack->channelCount() != channelCount) ||
-                (mRecycledTrack->frameCount() != t->frameCount())) {
-            ALOGV("samplerate, channelcount or framecount differ: %d/%d Hz, %d/%d ch, %d/%d frames",
+                (mRecycledTrack->channelCount() != (uint32_t)channelCount) ) {
+            ALOGV("samplerate, channelcount differ: %u/%u Hz, %u/%d ch",
                   mRecycledTrack->getSampleRate(), sampleRate,
-                  mRecycledTrack->channelCount(), channelCount,
-                  mRecycledTrack->frameCount(), t->frameCount());
+                  mRecycledTrack->channelCount(), channelCount);
             reuse = false;
         } else if (flags != mFlags) {
             ALOGV("output flags differ %08x/%08x", flags, mFlags);
             reuse = false;
+        } else if (mRecycledTrack->format() != format) {
+            reuse = false;
         }
+    } else {
+        ALOGV("no track available to recycle");
+    }
+
+    ALOGV_IF(bothOffloaded, "both tracks offloaded");
+
+    // If we can't recycle and both tracks are offloaded
+    // we must close the previous output before opening a new one
+    if (bothOffloaded && !reuse) {
+        ALOGV("both offloaded and not recycling");
+        deleteRecycledTrack();
+    }
+
+    sp<AudioTrack> t;
+    CallbackData *newcbd = NULL;
+
+    // We don't attempt to create a new track if we are recycling an
+    // offloaded track. But, if we are recycling a non-offloaded or we
+    // are switching where one is offloaded and one isn't then we create
+    // the new track in advance so that we can read additional stream info
+
+    if (!(reuse && bothOffloaded)) {
+        ALOGV("creating new AudioTrack");
+
+        if (mCallback != NULL) {
+            newcbd = new CallbackData(this);
+            t = new AudioTrack(
+                    mStreamType,
+                    sampleRate,
+                    format,
+                    channelMask,
+                    frameCount,
+                    flags,
+                    CallbackWrapper,
+                    newcbd,
+                    0,  // notification frames
+                    mSessionId,
+                    AudioTrack::TRANSFER_CALLBACK,
+                    offloadInfo);
+        } else {
+            t = new AudioTrack(
+                    mStreamType,
+                    sampleRate,
+                    format,
+                    channelMask,
+                    frameCount,
+                    flags,
+                    NULL,
+                    NULL,
+                    0,
+                    mSessionId);
+        }
+
+        if ((t == 0) || (t->initCheck() != NO_ERROR)) {
+            ALOGE("Unable to create audio track");
+            delete newcbd;
+            return NO_INIT;
+        }
+    }
+
+    if (reuse) {
+        CHECK(mRecycledTrack != NULL);
+
+        if (!bothOffloaded) {
+            if (mRecycledTrack->frameCount() != t->frameCount()) {
+                ALOGV("framecount differs: %u/%u frames",
+                      mRecycledTrack->frameCount(), t->frameCount());
+                reuse = false;
+            }
+        }
+
         if (reuse) {
-            ALOGV("chaining to next output");
+            ALOGV("chaining to next output and recycling track");
             close();
             mTrack = mRecycledTrack;
-            mRecycledTrack = NULL;
+            mRecycledTrack.clear();
             if (mCallbackData != NULL) {
                 mCallbackData->setOutput(this);
             }
-            delete t;
             delete newcbd;
             return OK;
         }
-
-        // if we're not going to reuse the track, unblock and flush it
-        if (mCallbackData != NULL) {
-            mCallbackData->setOutput(NULL);
-            mCallbackData->endTrackSwitch();
-        }
-        mRecycledTrack->flush();
-        delete mRecycledTrack;
-        mRecycledTrack = NULL;
-        delete mCallbackData;
-        mCallbackData = NULL;
-        close();
     }
 
+    // we're not going to reuse the track, unblock and flush it
+    // this was done earlier if both tracks are offloaded
+    if (!bothOffloaded) {
+        deleteRecycledTrack();
+    }
+
+    CHECK((t != NULL) && ((mCallback == NULL) || (newcbd != NULL)));
+
     mCallbackData = newcbd;
     ALOGV("setVolume");
     t->setVolume(mLeftVolume, mRightVolume);
@@ -1519,25 +1609,30 @@
     }
     mTrack = t;
 
-    status_t res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
-    if (res != NO_ERROR) {
-        return res;
+    status_t res = NO_ERROR;
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
+        res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
+        if (res == NO_ERROR) {
+            t->setAuxEffectSendLevel(mSendLevel);
+            res = t->attachAuxEffect(mAuxEffectId);
+        }
     }
-    t->setAuxEffectSendLevel(mSendLevel);
-    return t->attachAuxEffect(mAuxEffectId);;
+    ALOGV("open() DONE status %d", res);
+    return res;
 }
 
-void MediaPlayerService::AudioOutput::start()
+status_t MediaPlayerService::AudioOutput::start()
 {
     ALOGV("start");
     if (mCallbackData != NULL) {
         mCallbackData->endTrackSwitch();
     }
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
         mTrack->setAuxEffectSendLevel(mSendLevel);
-        mTrack->start();
+        return mTrack->start();
     }
+    return NO_INIT;
 }
 
 void MediaPlayerService::AudioOutput::setNextOutput(const sp<AudioOutput>& nextOutput) {
@@ -1555,7 +1650,7 @@
         mNextOutput->mCallbackData = mCallbackData;
         mCallbackData = NULL;
         mNextOutput->mRecycledTrack = mTrack;
-        mTrack = NULL;
+        mTrack.clear();
         mNextOutput->mSampleRateHz = mSampleRateHz;
         mNextOutput->mMsecsPerFrame = mMsecsPerFrame;
         mNextOutput->mBytesWritten = mBytesWritten;
@@ -1568,7 +1663,7 @@
     LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
 
     //ALOGV("write(%p, %u)", buffer, size);
-    if (mTrack) {
+    if (mTrack != 0) {
         ssize_t ret = mTrack->write(buffer, size);
         mBytesWritten += ret;
         return ret;
@@ -1579,26 +1674,25 @@
 void MediaPlayerService::AudioOutput::stop()
 {
     ALOGV("stop");
-    if (mTrack) mTrack->stop();
+    if (mTrack != 0) mTrack->stop();
 }
 
 void MediaPlayerService::AudioOutput::flush()
 {
     ALOGV("flush");
-    if (mTrack) mTrack->flush();
+    if (mTrack != 0) mTrack->flush();
 }
 
 void MediaPlayerService::AudioOutput::pause()
 {
     ALOGV("pause");
-    if (mTrack) mTrack->pause();
+    if (mTrack != 0) mTrack->pause();
 }
 
 void MediaPlayerService::AudioOutput::close()
 {
     ALOGV("close");
-    delete mTrack;
-    mTrack = 0;
+    mTrack.clear();
 }
 
 void MediaPlayerService::AudioOutput::setVolume(float left, float right)
@@ -1606,7 +1700,7 @@
     ALOGV("setVolume(%f, %f)", left, right);
     mLeftVolume = left;
     mRightVolume = right;
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(left, right);
     }
 }
@@ -1615,7 +1709,7 @@
 {
     ALOGV("setPlaybackRatePermille(%d)", ratePermille);
     status_t res = NO_ERROR;
-    if (mTrack) {
+    if (mTrack != 0) {
         res = mTrack->setSampleRate(ratePermille * mSampleRateHz / 1000);
     } else {
         res = NO_INIT;
@@ -1631,7 +1725,7 @@
 {
     ALOGV("setAuxEffectSendLevel(%f)", level);
     mSendLevel = level;
-    if (mTrack) {
+    if (mTrack != 0) {
         return mTrack->setAuxEffectSendLevel(level);
     }
     return NO_ERROR;
@@ -1641,7 +1735,7 @@
 {
     ALOGV("attachAuxEffect(%d)", effectId);
     mAuxEffectId = effectId;
-    if (mTrack) {
+    if (mTrack != 0) {
         return mTrack->attachAuxEffect(effectId);
     }
     return NO_ERROR;
@@ -1651,10 +1745,6 @@
 void MediaPlayerService::AudioOutput::CallbackWrapper(
         int event, void *cookie, void *info) {
     //ALOGV("callbackwrapper");
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
-    }
-
     CallbackData *data = (CallbackData*)cookie;
     data->lock();
     AudioOutput *me = data->getOutput();
@@ -1663,22 +1753,46 @@
         // no output set, likely because the track was scheduled to be reused
         // by another player, but the format turned out to be incompatible.
         data->unlock();
-        buffer->size = 0;
+        if (buffer != NULL) {
+            buffer->size = 0;
+        }
         return;
     }
 
-    size_t actualSize = (*me->mCallback)(
-            me, buffer->raw, buffer->size, me->mCallbackCookie);
+    switch(event) {
+    case AudioTrack::EVENT_MORE_DATA: {
+        size_t actualSize = (*me->mCallback)(
+                me, buffer->raw, buffer->size, me->mCallbackCookie,
+                CB_EVENT_FILL_BUFFER);
 
-    if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
-        // We've reached EOS but the audio track is not stopped yet,
-        // keep playing silence.
+        if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
+            // We've reached EOS but the audio track is not stopped yet,
+            // keep playing silence.
 
-        memset(buffer->raw, 0, buffer->size);
-        actualSize = buffer->size;
+            memset(buffer->raw, 0, buffer->size);
+            actualSize = buffer->size;
+        }
+
+        buffer->size = actualSize;
+        } break;
+
+
+    case AudioTrack::EVENT_STREAM_END:
+        ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
+        (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
+                me->mCallbackCookie, CB_EVENT_STREAM_END);
+        break;
+
+    case AudioTrack::EVENT_NEW_IAUDIOTRACK :
+        ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
+        (*me->mCallback)(me,  NULL /* buffer */, 0 /* size */,
+                me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
+        break;
+
+    default:
+        ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
     }
 
-    buffer->size = actualSize;
     data->unlock();
 }
 
@@ -1774,7 +1888,8 @@
     }
 
     size_t actualSize =
-        (*mCallback)(sink.get(), mBuffer, mBufferSize, mCookie);
+        (*mCallback)(sink.get(), mBuffer, mBufferSize, mCookie,
+                MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER);
 
     if (actualSize > 0) {
         sink->write(mBuffer, actualSize);
@@ -1788,7 +1903,8 @@
 status_t MediaPlayerService::AudioCache::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
-        AudioCallback cb, void *cookie, audio_output_flags_t flags)
+        AudioCallback cb, void *cookie, audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo)
 {
     ALOGV("open(%u, %d, 0x%x, %d, %d)", sampleRate, channelCount, channelMask, format, bufferCount);
     if (mHeap->getHeapID() < 0) {
@@ -1806,10 +1922,11 @@
     return NO_ERROR;
 }
 
-void MediaPlayerService::AudioCache::start() {
+status_t MediaPlayerService::AudioCache::start() {
     if (mCallbackThread != NULL) {
         mCallbackThread->run("AudioCache callback");
     }
+    return NO_ERROR;
 }
 
 void MediaPlayerService::AudioCache::stop() {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index b33805d..7d27944 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -20,15 +20,12 @@
 
 #include <arpa/inet.h>
 
-#include <utils/Log.h>
 #include <utils/threads.h>
-#include <utils/List.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/String8.h>
 #include <utils/Vector.h>
 
-#include <media/IMediaPlayerService.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/Metadata.h>
 #include <media/stagefright/foundation/ABase.h>
@@ -78,7 +75,7 @@
                                 AudioOutput(int sessionId);
         virtual                 ~AudioOutput();
 
-        virtual bool            ready() const { return mTrack != NULL; }
+        virtual bool            ready() const { return mTrack != 0; }
         virtual bool            realtime() const { return true; }
         virtual ssize_t         bufferSize() const;
         virtual ssize_t         frameCount() const;
@@ -94,9 +91,10 @@
                 uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
                 audio_format_t format, int bufferCount,
                 AudioCallback cb, void *cookie,
-                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE);
+                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                const audio_offload_info_t *offloadInfo = NULL);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush();
@@ -114,14 +112,17 @@
                 void            setNextOutput(const sp<AudioOutput>& nextOutput);
                 void            switchToNextOutput();
         virtual bool            needsTrailingPadding() { return mNextOutput == NULL; }
+        virtual status_t        setParameters(const String8& keyValuePairs);
+        virtual String8         getParameters(const String8& keys);
 
     private:
         static void             setMinBufferCount();
         static void             CallbackWrapper(
                 int event, void *me, void *info);
+               void             deleteRecycledTrack();
 
-        AudioTrack*             mTrack;
-        AudioTrack*             mRecycledTrack;
+        sp<AudioTrack>          mTrack;
+        sp<AudioTrack>          mRecycledTrack;
         sp<AudioOutput>         mNextOutput;
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
@@ -195,9 +196,10 @@
                 uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
                 audio_format_t format, int bufferCount = 1,
                 AudioCallback cb = NULL, void *cookie = NULL,
-                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE);
+                audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                const audio_offload_info_t *offloadInfo = NULL);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush() {}
diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp
index 8db5b9b..0a6aa90 100644
--- a/media/libmediaplayerservice/MidiFile.cpp
+++ b/media/libmediaplayerservice/MidiFile.cpp
@@ -220,6 +220,9 @@
     }
 
     mRender = true;
+    if (mState == EAS_STATE_PLAY) {
+        sendEvent(MEDIA_STARTED);
+    }
 
     // wake up render thread
     ALOGV("  wakeup render thread");
@@ -242,6 +245,7 @@
         }
     }
     mPaused = false;
+    sendEvent(MEDIA_STOPPED);
     return NO_ERROR;
 }
 
@@ -279,6 +283,7 @@
         return ERROR_EAS_FAILURE;
     }
     mPaused = true;
+    sendEvent(MEDIA_PAUSED);
     return NO_ERROR;
 }
 
@@ -382,6 +387,7 @@
 status_t MidiFile::reset_nosync()
 {
     ALOGV("MidiFile::reset_nosync");
+    sendEvent(MEDIA_STOPPED);
     // close file
     if (mEasHandle) {
         EAS_CloseFile(mEasData, mEasHandle);
@@ -422,7 +428,7 @@
 
 status_t MidiFile::createOutputTrack() {
     if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels,
-            CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) {
+            CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2 /*bufferCount*/) != NO_ERROR) {
         ALOGE("mAudioSink open failed");
         return ERROR_OPEN_FAILED;
     }
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
index 20e6513..eb959b4 100644
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ b/media/libmediaplayerservice/RemoteDisplay.cpp
@@ -16,19 +16,23 @@
 
 #include "RemoteDisplay.h"
 
-#include "ANetworkSession.h"
 #include "source/WifiDisplaySource.h"
 
 #include <media/IRemoteDisplayClient.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 
 namespace android {
 
 RemoteDisplay::RemoteDisplay(
-        const sp<IRemoteDisplayClient> &client, const char *iface)
+        const sp<IRemoteDisplayClient> &client,
+        const char *iface)
     : mLooper(new ALooper),
-      mNetSession(new ANetworkSession),
-      mSource(new WifiDisplaySource(mNetSession, client)) {
+      mNetSession(new ANetworkSession) {
     mLooper->setName("wfd_looper");
+
+    mSource = new WifiDisplaySource(mNetSession, client);
     mLooper->registerHandler(mSource);
 
     mNetSession->start();
@@ -50,6 +54,7 @@
 
 status_t RemoteDisplay::dispose() {
     mSource->stop();
+    mSource.clear();
 
     mLooper->stop();
     mNetSession->stop();
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
index bd8b684..82a0116 100644
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ b/media/libmediaplayerservice/RemoteDisplay.h
@@ -18,6 +18,7 @@
 
 #define REMOTE_DISPLAY_H_
 
+#include <media/IMediaPlayerService.h>
 #include <media/IRemoteDisplay.h>
 #include <media/stagefright/foundation/ABase.h>
 #include <utils/Errors.h>
@@ -31,7 +32,9 @@
 struct WifiDisplaySource;
 
 struct RemoteDisplay : public BnRemoteDisplay {
-    RemoteDisplay(const sp<IRemoteDisplayClient> &client, const char *iface);
+    RemoteDisplay(
+            const sp<IRemoteDisplayClient> &client,
+            const char *iface);
 
     virtual status_t pause();
     virtual status_t resume();
diff --git a/media/libmediaplayerservice/SharedLibrary.cpp b/media/libmediaplayerservice/SharedLibrary.cpp
index 178e15d..34db761 100644
--- a/media/libmediaplayerservice/SharedLibrary.cpp
+++ b/media/libmediaplayerservice/SharedLibrary.cpp
@@ -46,4 +46,10 @@
         }
         return dlsym(mLibHandle, symbol);
     }
+
+    const char *SharedLibrary::lastError() const {
+        const char *error = dlerror();
+        return error ? error : "No errors or unknown error";
+    }
+
 };
diff --git a/media/libmediaplayerservice/SharedLibrary.h b/media/libmediaplayerservice/SharedLibrary.h
index 5353642..88451a0 100644
--- a/media/libmediaplayerservice/SharedLibrary.h
+++ b/media/libmediaplayerservice/SharedLibrary.h
@@ -29,6 +29,7 @@
 
         bool operator!() const;
         void *lookup(const char *symbol) const;
+        const char *lastError() const;
 
     private:
         void *mLibHandle;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 655ee55..c8901ce 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -20,7 +20,6 @@
 
 #include "HTTPLiveSource.h"
 
-#include "ATSParser.h"
 #include "AnotherPacketSource.h"
 #include "LiveDataSource.h"
 #include "LiveSession.h"
@@ -62,7 +61,10 @@
 NuPlayer::HTTPLiveSource::~HTTPLiveSource() {
     if (mLiveSession != NULL) {
         mLiveSession->disconnect();
+        mLiveSession.clear();
+
         mLiveLooper->stop();
+        mLiveLooper.clear();
     }
 }
 
@@ -76,112 +78,42 @@
     mLiveSession = new LiveSession(
             notify,
             (mFlags & kFlagIncognito) ? LiveSession::kFlagIncognito : 0,
-            mUIDValid, mUID);
+            mUIDValid,
+            mUID);
 
     mLiveLooper->registerHandler(mLiveSession);
 
-    mLiveSession->connect(
+    mLiveSession->connectAsync(
             mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
-
-    mTSParser = new ATSParser;
 }
 
 void NuPlayer::HTTPLiveSource::start() {
 }
 
-sp<MetaData> NuPlayer::HTTPLiveSource::getFormatMeta(bool audio) {
-    ATSParser::SourceType type =
-        audio ? ATSParser::AUDIO : ATSParser::VIDEO;
+sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) {
+    sp<AMessage> format;
+    status_t err = mLiveSession->getStreamFormat(
+            audio ? LiveSession::STREAMTYPE_AUDIO
+                  : LiveSession::STREAMTYPE_VIDEO,
+            &format);
 
-    sp<AnotherPacketSource> source =
-        static_cast<AnotherPacketSource *>(mTSParser->getSource(type).get());
-
-    if (source == NULL) {
+    if (err != OK) {
         return NULL;
     }
 
-    return source->getFormat();
+    return format;
 }
 
 status_t NuPlayer::HTTPLiveSource::feedMoreTSData() {
-    if (mFinalResult != OK) {
-        return mFinalResult;
-    }
-
-    sp<LiveDataSource> source =
-        static_cast<LiveDataSource *>(mLiveSession->getDataSource().get());
-
-    for (int32_t i = 0; i < 50; ++i) {
-        char buffer[188];
-        ssize_t n = source->readAtNonBlocking(mOffset, buffer, sizeof(buffer));
-
-        if (n == -EWOULDBLOCK) {
-            break;
-        } else if (n < 0) {
-            if (n != ERROR_END_OF_STREAM) {
-                ALOGI("input data EOS reached, error %ld", n);
-            } else {
-                ALOGI("input data EOS reached.");
-            }
-            mTSParser->signalEOS(n);
-            mFinalResult = n;
-            break;
-        } else {
-            if (buffer[0] == 0x00) {
-                // XXX legacy
-
-                uint8_t type = buffer[1];
-
-                sp<AMessage> extra = new AMessage;
-
-                if (type & 2) {
-                    int64_t mediaTimeUs;
-                    memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
-
-                    extra->setInt64(IStreamListener::kKeyMediaTimeUs, mediaTimeUs);
-                }
-
-                mTSParser->signalDiscontinuity(
-                        ((type & 1) == 0)
-                            ? ATSParser::DISCONTINUITY_SEEK
-                            : ATSParser::DISCONTINUITY_FORMATCHANGE,
-                        extra);
-            } else {
-                status_t err = mTSParser->feedTSPacket(buffer, sizeof(buffer));
-
-                if (err != OK) {
-                    ALOGE("TS Parser returned error %d", err);
-                    mTSParser->signalEOS(err);
-                    mFinalResult = err;
-                    break;
-                }
-            }
-
-            mOffset += n;
-        }
-    }
-
     return OK;
 }
 
 status_t NuPlayer::HTTPLiveSource::dequeueAccessUnit(
         bool audio, sp<ABuffer> *accessUnit) {
-    ATSParser::SourceType type =
-        audio ? ATSParser::AUDIO : ATSParser::VIDEO;
-
-    sp<AnotherPacketSource> source =
-        static_cast<AnotherPacketSource *>(mTSParser->getSource(type).get());
-
-    if (source == NULL) {
-        return -EWOULDBLOCK;
-    }
-
-    status_t finalResult;
-    if (!source->hasBufferAvailable(&finalResult)) {
-        return finalResult == OK ? -EWOULDBLOCK : finalResult;
-    }
-
-    return source->dequeueAccessUnit(accessUnit);
+    return mLiveSession->dequeueAccessUnit(
+            audio ? LiveSession::STREAMTYPE_AUDIO
+                  : LiveSession::STREAMTYPE_VIDEO,
+            accessUnit);
 }
 
 status_t NuPlayer::HTTPLiveSource::getDuration(int64_t *durationUs) {
@@ -189,15 +121,7 @@
 }
 
 status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs) {
-    // We need to make sure we're not seeking until we have seen the very first
-    // PTS timestamp in the whole stream (from the beginning of the stream).
-    while (!mTSParser->PTSTimeDeltaEstablished() && feedMoreTSData() == OK) {
-        usleep(100000);
-    }
-
-    mLiveSession->seekTo(seekTimeUs);
-
-    return OK;
+    return mLiveSession->seekTo(seekTimeUs);
 }
 
 void NuPlayer::HTTPLiveSource::onMessageReceived(const sp<AMessage> &msg) {
@@ -249,6 +173,32 @@
             break;
         }
 
+        case LiveSession::kWhatStreamsChanged:
+        {
+            uint32_t changedMask;
+            CHECK(msg->findInt32(
+                        "changedMask", (int32_t *)&changedMask));
+
+            bool audio = changedMask & LiveSession::STREAMTYPE_AUDIO;
+            bool video = changedMask & LiveSession::STREAMTYPE_VIDEO;
+
+            sp<AMessage> reply;
+            CHECK(msg->findMessage("reply", &reply));
+
+            sp<AMessage> notify = dupNotify();
+            notify->setInt32("what", kWhatQueueDecoderShutdown);
+            notify->setInt32("audio", audio);
+            notify->setInt32("video", video);
+            notify->setMessage("reply", reply);
+            notify->post();
+            break;
+        }
+
+        case LiveSession::kWhatError:
+        {
+            break;
+        }
+
         default:
             TRESPASS();
     }
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 067d1da..aa9434b 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -23,7 +23,6 @@
 
 namespace android {
 
-struct ATSParser;
 struct LiveSession;
 
 struct NuPlayer::HTTPLiveSource : public NuPlayer::Source {
@@ -37,18 +36,16 @@
     virtual void prepareAsync();
     virtual void start();
 
-    virtual status_t feedMoreTSData();
-
     virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+    virtual sp<AMessage> getFormat(bool audio);
 
+    virtual status_t feedMoreTSData();
     virtual status_t getDuration(int64_t *durationUs);
     virtual status_t seekTo(int64_t seekTimeUs);
 
 protected:
     virtual ~HTTPLiveSource();
 
-    virtual sp<MetaData> getFormatMeta(bool audio);
-
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
 private:
@@ -70,7 +67,6 @@
     off64_t mOffset;
     sp<ALooper> mLiveLooper;
     sp<LiveSession> mLiveSession;
-    sp<ATSParser> mTSParser;
 
     void onSessionNotify(const sp<AMessage> &msg);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index b89b1c8..b411f34 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -89,6 +89,38 @@
     DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction);
 };
 
+struct NuPlayer::ShutdownDecoderAction : public Action {
+    ShutdownDecoderAction(bool audio, bool video)
+        : mAudio(audio),
+          mVideo(video) {
+    }
+
+    virtual void execute(NuPlayer *player) {
+        player->performDecoderShutdown(mAudio, mVideo);
+    }
+
+private:
+    bool mAudio;
+    bool mVideo;
+
+    DISALLOW_EVIL_CONSTRUCTORS(ShutdownDecoderAction);
+};
+
+struct NuPlayer::PostMessageAction : public Action {
+    PostMessageAction(const sp<AMessage> &msg)
+        : mMessage(msg) {
+    }
+
+    virtual void execute(NuPlayer *) {
+        mMessage->post();
+    }
+
+private:
+    sp<AMessage> mMessage;
+
+    DISALLOW_EVIL_CONSTRUCTORS(PostMessageAction);
+};
+
 // Use this if there's no state necessary to save in order to execute
 // the action.
 struct NuPlayer::SimpleAction : public Action {
@@ -335,7 +367,8 @@
             ALOGV("kWhatSetVideoNativeWindow");
 
             mDeferredActions.push_back(
-                    new SimpleAction(&NuPlayer::performDecoderShutdown));
+                    new ShutdownDecoderAction(
+                        false /* audio */, true /* video */));
 
             sp<RefBase> obj;
             CHECK(msg->findObject("native-window", &obj));
@@ -698,6 +731,9 @@
                 ALOGV("renderer %s flush completed.", audio ? "audio" : "video");
             } else if (what == Renderer::kWhatVideoRenderingStart) {
                 notifyListener(MEDIA_INFO, MEDIA_INFO_RENDERING_START, 0);
+            } else if (what == Renderer::kWhatMediaRenderingStart) {
+                ALOGV("media rendering started");
+                notifyListener(MEDIA_STARTED, 0, 0);
             }
             break;
         }
@@ -712,7 +748,8 @@
             ALOGV("kWhatReset");
 
             mDeferredActions.push_back(
-                    new SimpleAction(&NuPlayer::performDecoderShutdown));
+                    new ShutdownDecoderAction(
+                        true /* audio */, true /* video */));
 
             mDeferredActions.push_back(
                     new SimpleAction(&NuPlayer::performReset));
@@ -1023,6 +1060,9 @@
 }
 
 void NuPlayer::flushDecoder(bool audio, bool needShutdown) {
+    ALOGV("[%s] flushDecoder needShutdown=%d",
+          audio ? "audio" : "video", needShutdown);
+
     if ((audio && mAudioDecoder == NULL) || (!audio && mVideoDecoder == NULL)) {
         ALOGI("flushDecoder %s without decoder present",
              audio ? "audio" : "video");
@@ -1173,20 +1213,29 @@
     }
 }
 
-void NuPlayer::performDecoderShutdown() {
-    ALOGV("performDecoderShutdown");
+void NuPlayer::performDecoderShutdown(bool audio, bool video) {
+    ALOGV("performDecoderShutdown audio=%d, video=%d", audio, video);
 
-    if (mAudioDecoder == NULL && mVideoDecoder == NULL) {
+    if ((!audio || mAudioDecoder == NULL)
+            && (!video || mVideoDecoder == NULL)) {
         return;
     }
 
     mTimeDiscontinuityPending = true;
 
-    if (mAudioDecoder != NULL) {
+    if (mFlushingAudio == NONE && (!audio || mAudioDecoder == NULL)) {
+        mFlushingAudio = FLUSHED;
+    }
+
+    if (mFlushingVideo == NONE && (!video || mVideoDecoder == NULL)) {
+        mFlushingVideo = FLUSHED;
+    }
+
+    if (audio && mAudioDecoder != NULL) {
         flushDecoder(true /* audio */, true /* needShutdown */);
     }
 
-    if (mVideoDecoder != NULL) {
+    if (video && mVideoDecoder != NULL) {
         flushDecoder(false /* audio */, true /* needShutdown */);
     }
 }
@@ -1322,6 +1371,19 @@
             break;
         }
 
+        case Source::kWhatQueueDecoderShutdown:
+        {
+            int32_t audio, video;
+            CHECK(msg->findInt32("audio", &audio));
+            CHECK(msg->findInt32("video", &video));
+
+            sp<AMessage> reply;
+            CHECK(msg->findMessage("reply", &reply));
+
+            queueDecoderShutdown(audio, video, reply);
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -1355,4 +1417,19 @@
     TRESPASS();
 }
 
+void NuPlayer::queueDecoderShutdown(
+        bool audio, bool video, const sp<AMessage> &reply) {
+    ALOGI("queueDecoderShutdown audio=%d, video=%d", audio, video);
+
+    mDeferredActions.push_back(
+            new ShutdownDecoderAction(audio, video));
+
+    mDeferredActions.push_back(
+            new SimpleAction(&NuPlayer::performScanSources));
+
+    mDeferredActions.push_back(new PostMessageAction(reply));
+
+    processDeferredActions();
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 50d0462..8b6c8c1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -80,6 +80,8 @@
     struct Action;
     struct SeekAction;
     struct SetSurfaceAction;
+    struct ShutdownDecoderAction;
+    struct PostMessageAction;
     struct SimpleAction;
 
     enum {
@@ -172,13 +174,16 @@
 
     void performSeek(int64_t seekTimeUs);
     void performDecoderFlush();
-    void performDecoderShutdown();
+    void performDecoderShutdown(bool audio, bool video);
     void performReset();
     void performScanSources();
     void performSetSurface(const sp<NativeWindowWrapper> &wrapper);
 
     void onSourceNotify(const sp<AMessage> &msg);
 
+    void queueDecoderShutdown(
+            bool audio, bool video, const sp<AMessage> &reply);
+
     DISALLOW_EVIL_CONSTRUCTORS(NuPlayer);
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 68b9623..cf0373c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -255,6 +255,7 @@
             return OK;
 
         case STATE_RUNNING:
+            notifyListener(MEDIA_PAUSED);
             mPlayer->pause();
             break;
 
@@ -287,6 +288,8 @@
         case STATE_PAUSED:
         {
             mAtEOS = false;
+            // seeks can take a while, so we essentially paused
+            notifyListener(MEDIA_PAUSED);
             mPlayer->seekToAsync(seekTimeUs);
             break;
         }
@@ -345,6 +348,8 @@
             break;
     }
 
+    notifyListener(MEDIA_STOPPED);
+
     mState = STATE_RESET_IN_PROGRESS;
     mPlayer->resetAsync();
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 404b56f..3b2784b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -50,6 +50,8 @@
       mSyncQueues(false),
       mPaused(false),
       mVideoRenderingStarted(false),
+      mVideoRenderingStartGeneration(0),
+      mAudioRenderingStartGeneration(0),
       mLastPositionUpdateUs(-1ll),
       mVideoLateByUs(0ll) {
 }
@@ -95,11 +97,11 @@
 }
 
 void NuPlayer::Renderer::signalTimeDiscontinuity() {
-    CHECK(mAudioQueue.empty());
-    CHECK(mVideoQueue.empty());
+    // CHECK(mAudioQueue.empty());
+    // CHECK(mVideoQueue.empty());
     mAnchorTimeMediaUs = -1;
     mAnchorTimeRealUs = -1;
-    mSyncQueues = mHasAudio && mHasVideo;
+    mSyncQueues = false;
 }
 
 void NuPlayer::Renderer::pause() {
@@ -220,6 +222,23 @@
     (new AMessage(kWhatAudioSinkChanged, id()))->post();
 }
 
+void NuPlayer::Renderer::prepareForMediaRenderingStart() {
+    mAudioRenderingStartGeneration = mAudioQueueGeneration;
+    mVideoRenderingStartGeneration = mVideoQueueGeneration;
+}
+
+void NuPlayer::Renderer::notifyIfMediaRenderingStarted() {
+    if (mVideoRenderingStartGeneration == mVideoQueueGeneration &&
+        mAudioRenderingStartGeneration == mAudioQueueGeneration) {
+        mVideoRenderingStartGeneration = -1;
+        mAudioRenderingStartGeneration = -1;
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatMediaRenderingStart);
+        notify->post();
+    }
+}
+
 bool NuPlayer::Renderer::onDrainAudioQueue() {
     uint32_t numFramesPlayed;
     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
@@ -299,6 +318,8 @@
         numBytesAvailableToWrite -= copy;
         size_t copiedFrames = copy / mAudioSink->frameSize();
         mNumFramesWritten += copiedFrames;
+
+        notifyIfMediaRenderingStarted();
     }
 
     notifyPosition();
@@ -405,6 +426,8 @@
         notifyVideoRenderingStart();
     }
 
+    notifyIfMediaRenderingStarted();
+
     notifyPosition();
 }
 
@@ -552,6 +575,7 @@
     // is flushed.
     syncQueuesDone();
 
+    ALOGV("flushing %s", audio ? "audio" : "video");
     if (audio) {
         flushQueue(&mAudioQueue);
 
@@ -560,6 +584,8 @@
 
         mDrainAudioQueuePending = false;
         ++mAudioQueueGeneration;
+
+        prepareForMediaRenderingStart();
     } else {
         flushQueue(&mVideoQueue);
 
@@ -568,6 +594,8 @@
 
         mDrainVideoQueuePending = false;
         ++mVideoQueueGeneration;
+
+        prepareForMediaRenderingStart();
     }
 
     notifyFlushComplete(audio);
@@ -658,6 +686,8 @@
     mDrainVideoQueuePending = false;
     ++mVideoQueueGeneration;
 
+    prepareForMediaRenderingStart();
+
     if (mHasAudio) {
         mAudioSink->pause();
     }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index c9796e2..94a05ea 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -53,6 +53,7 @@
         kWhatFlushComplete       = 'fluC',
         kWhatPosition            = 'posi',
         kWhatVideoRenderingStart = 'vdrd',
+        kWhatMediaRenderingStart = 'mdrd',
     };
 
 protected:
@@ -106,6 +107,8 @@
 
     bool mPaused;
     bool mVideoRenderingStarted;
+    int32_t mVideoRenderingStartGeneration;
+    int32_t mAudioRenderingStartGeneration;
 
     int64_t mLastPositionUpdateUs;
     int64_t mVideoLateByUs;
@@ -116,6 +119,9 @@
     void onDrainVideoQueue();
     void postDrainVideoQueue();
 
+    void prepareForMediaRenderingStart();
+    void notifyIfMediaRenderingStarted();
+
     void onQueueBuffer(const sp<AMessage> &msg);
     void onQueueEOS(const sp<AMessage> &msg);
     void onFlush(const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 1cbf575..81ffd21 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -42,6 +42,7 @@
         kWhatVideoSizeChanged,
         kWhatBufferingStart,
         kWhatBufferingEnd,
+        kWhatQueueDecoderShutdown,
     };
 
     // The provides message is used to notify the player about various
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index cf41cf2..2e55c4f 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -255,6 +255,8 @@
 struct ACodec::ExecutingState : public ACodec::BaseState {
     ExecutingState(ACodec *codec);
 
+    void submitRegularOutputBuffers();
+    void submitOutputMetaBuffers();
     void submitOutputBuffers();
 
     // Submit output buffers to the decoder, submit input buffers to client
@@ -359,11 +361,16 @@
       mNode(NULL),
       mSentFormat(false),
       mIsEncoder(false),
+      mUseMetadataOnEncoderOutput(false),
       mShutdownInProgress(false),
       mEncoderDelay(0),
       mEncoderPadding(0),
       mChannelMaskPresent(false),
-      mChannelMask(0) {
+      mChannelMask(0),
+      mDequeueCounter(0),
+      mStoreMetaDataInOutputBuffers(false),
+      mMetaDataBuffersToSubmit(0),
+      mRepeatFrameDelayUs(-1ll) {
     mUninitializedState = new UninitializedState(this);
     mLoadedState = new LoadedState(this);
     mLoadedToIdleState = new LoadedToIdleState(this);
@@ -453,7 +460,11 @@
 
     status_t err;
     if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
-        err = allocateOutputBuffersFromNativeWindow();
+        if (mStoreMetaDataInOutputBuffers) {
+            err = allocateOutputMetaDataBuffers();
+        } else {
+            err = allocateOutputBuffersFromNativeWindow();
+        }
     } else {
         OMX_PARAM_PORTDEFINITIONTYPE def;
         InitOMXParams(&def);
@@ -483,7 +494,8 @@
                         ? OMXCodec::kRequiresAllocateBufferOnInputPorts
                         : OMXCodec::kRequiresAllocateBufferOnOutputPorts;
 
-                if (portIndex == kPortIndexInput && (mFlags & kFlagIsSecure)) {
+                if ((portIndex == kPortIndexInput && (mFlags & kFlagIsSecure))
+                        || mUseMetadataOnEncoderOutput) {
                     mem.clear();
 
                     void *ptr;
@@ -491,7 +503,10 @@
                             mNode, portIndex, def.nBufferSize, &info.mBufferID,
                             &ptr);
 
-                    info.mData = new ABuffer(ptr, def.nBufferSize);
+                    int32_t bufSize = mUseMetadataOnEncoderOutput ?
+                            (4 + sizeof(buffer_handle_t)) : def.nBufferSize;
+
+                    info.mData = new ABuffer(ptr, bufSize);
                 } else if (mQuirks & requiresAllocateBufferBit) {
                     err = mOMX->allocateBufferWithBackup(
                             mNode, portIndex, mem, &info.mBufferID);
@@ -531,7 +546,9 @@
     return OK;
 }
 
-status_t ACodec::allocateOutputBuffersFromNativeWindow() {
+status_t ACodec::configureOutputBuffersFromNativeWindow(
+        OMX_U32 *bufferCount, OMX_U32 *bufferSize,
+        OMX_U32 *minUndequeuedBuffers) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexOutput;
@@ -596,10 +613,10 @@
         return err;
     }
 
-    int minUndequeuedBufs = 0;
+    *minUndequeuedBuffers = 0;
     err = mNativeWindow->query(
             mNativeWindow.get(), NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
-            &minUndequeuedBufs);
+            (int *)minUndequeuedBuffers);
 
     if (err != 0) {
         ALOGE("NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)",
@@ -610,8 +627,8 @@
     // XXX: Is this the right logic to use?  It's not clear to me what the OMX
     // buffer counts refer to - how do they account for the renderer holding on
     // to buffers?
-    if (def.nBufferCountActual < def.nBufferCountMin + minUndequeuedBufs) {
-        OMX_U32 newBufferCount = def.nBufferCountMin + minUndequeuedBufs;
+    if (def.nBufferCountActual < def.nBufferCountMin + *minUndequeuedBuffers) {
+        OMX_U32 newBufferCount = def.nBufferCountMin + *minUndequeuedBuffers;
         def.nBufferCountActual = newBufferCount;
         err = mOMX->setParameter(
                 mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
@@ -632,12 +649,24 @@
         return err;
     }
 
+    *bufferCount = def.nBufferCountActual;
+    *bufferSize =  def.nBufferSize;
+    return err;
+}
+
+status_t ACodec::allocateOutputBuffersFromNativeWindow() {
+    OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
+    status_t err = configureOutputBuffersFromNativeWindow(
+            &bufferCount, &bufferSize, &minUndequeuedBuffers);
+    if (err != 0)
+        return err;
+
     ALOGV("[%s] Allocating %lu buffers from a native window of size %lu on "
          "output port",
-         mComponentName.c_str(), def.nBufferCountActual, def.nBufferSize);
+         mComponentName.c_str(), bufferCount, bufferSize);
 
     // Dequeue buffers and send them to OMX
-    for (OMX_U32 i = 0; i < def.nBufferCountActual; i++) {
+    for (OMX_U32 i = 0; i < bufferCount; i++) {
         ANativeWindowBuffer *buf;
         err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &buf);
         if (err != 0) {
@@ -648,7 +677,7 @@
         sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
         BufferInfo info;
         info.mStatus = BufferInfo::OWNED_BY_US;
-        info.mData = new ABuffer(NULL /* data */, def.nBufferSize /* capacity */);
+        info.mData = new ABuffer(NULL /* data */, bufferSize /* capacity */);
         info.mGraphicBuffer = graphicBuffer;
         mBuffers[kPortIndexOutput].push(info);
 
@@ -677,9 +706,9 @@
         cancelStart = 0;
         cancelEnd = mBuffers[kPortIndexOutput].size();
     } else {
-        // Return the last two buffers to the native window.
-        cancelStart = def.nBufferCountActual - minUndequeuedBufs;
-        cancelEnd = def.nBufferCountActual;
+        // Return the required minimum undequeued buffers to the native window.
+        cancelStart = bufferCount - minUndequeuedBuffers;
+        cancelEnd = bufferCount;
     }
 
     for (OMX_U32 i = cancelStart; i < cancelEnd; i++) {
@@ -690,6 +719,65 @@
     return err;
 }
 
+status_t ACodec::allocateOutputMetaDataBuffers() {
+    OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
+    status_t err = configureOutputBuffersFromNativeWindow(
+            &bufferCount, &bufferSize, &minUndequeuedBuffers);
+    if (err != 0)
+        return err;
+
+    ALOGV("[%s] Allocating %lu meta buffers on output port",
+         mComponentName.c_str(), bufferCount);
+
+    size_t totalSize = bufferCount * 8;
+    mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
+
+    // Dequeue buffers and send them to OMX
+    for (OMX_U32 i = 0; i < bufferCount; i++) {
+        BufferInfo info;
+        info.mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
+        info.mGraphicBuffer = NULL;
+        info.mDequeuedAt = mDequeueCounter;
+
+        sp<IMemory> mem = mDealer[kPortIndexOutput]->allocate(
+                sizeof(struct VideoDecoderOutputMetaData));
+        CHECK(mem.get() != NULL);
+        info.mData = new ABuffer(mem->pointer(), mem->size());
+
+        // we use useBuffer for metadata regardless of quirks
+        err = mOMX->useBuffer(
+                mNode, kPortIndexOutput, mem, &info.mBufferID);
+
+        mBuffers[kPortIndexOutput].push(info);
+
+        ALOGV("[%s] allocated meta buffer with ID %p (pointer = %p)",
+             mComponentName.c_str(), info.mBufferID, mem->pointer());
+    }
+
+    mMetaDataBuffersToSubmit = bufferCount - minUndequeuedBuffers;
+    return err;
+}
+
+status_t ACodec::submitOutputMetaDataBuffer() {
+    CHECK(mStoreMetaDataInOutputBuffers);
+    if (mMetaDataBuffersToSubmit == 0)
+        return OK;
+
+    BufferInfo *info = dequeueBufferFromNativeWindow();
+    if (info == NULL)
+        return ERROR_IO;
+
+    ALOGV("[%s] submitting output meta buffer ID %p for graphic buffer %p",
+          mComponentName.c_str(), info->mBufferID, info->mGraphicBuffer.get());
+
+    --mMetaDataBuffersToSubmit;
+    CHECK_EQ(mOMX->fillBuffer(mNode, info->mBufferID),
+             (status_t)OK);
+
+    info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+    return OK;
+}
+
 status_t ACodec::cancelBufferToNativeWindow(BufferInfo *info) {
     CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
 
@@ -709,16 +797,19 @@
 ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
     ANativeWindowBuffer *buf;
     int fenceFd = -1;
+    CHECK(mNativeWindow.get() != NULL);
     if (native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &buf) != 0) {
         ALOGE("dequeueBuffer failed.");
         return NULL;
     }
 
+    BufferInfo *oldest = NULL;
     for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
         BufferInfo *info =
             &mBuffers[kPortIndexOutput].editItemAt(i);
 
-        if (info->mGraphicBuffer->handle == buf->handle) {
+        if (info->mGraphicBuffer != NULL &&
+            info->mGraphicBuffer->handle == buf->handle) {
             CHECK_EQ((int)info->mStatus,
                      (int)BufferInfo::OWNED_BY_NATIVE_WINDOW);
 
@@ -726,6 +817,39 @@
 
             return info;
         }
+
+        if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW &&
+            (oldest == NULL ||
+             // avoid potential issues from counter rolling over
+             mDequeueCounter - info->mDequeuedAt >
+                    mDequeueCounter - oldest->mDequeuedAt)) {
+            oldest = info;
+        }
+    }
+
+    if (oldest) {
+        CHECK(mStoreMetaDataInOutputBuffers);
+
+        // discard buffer in LRU info and replace with new buffer
+        oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
+        oldest->mStatus = BufferInfo::OWNED_BY_US;
+
+        mOMX->updateGraphicBufferInMeta(
+                mNode, kPortIndexOutput, oldest->mGraphicBuffer,
+                oldest->mBufferID);
+
+        VideoDecoderOutputMetaData *metaData =
+            reinterpret_cast<VideoDecoderOutputMetaData *>(
+                    oldest->mData->base());
+        CHECK_EQ(metaData->eType, kMetadataBufferTypeGrallocSource);
+
+        ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
+                oldest - &mBuffers[kPortIndexOutput][0],
+                mDequeueCounter - oldest->mDequeuedAt,
+                metaData->pHandle,
+                oldest->mGraphicBuffer->handle, oldest->mData->base());
+
+        return oldest;
     }
 
     TRESPASS();
@@ -831,8 +955,10 @@
             "video_decoder.mpeg4", "video_encoder.mpeg4" },
         { MEDIA_MIMETYPE_VIDEO_H263,
             "video_decoder.h263", "video_encoder.h263" },
-        { MEDIA_MIMETYPE_VIDEO_VPX,
-            "video_decoder.vpx", "video_encoder.vpx" },
+        { MEDIA_MIMETYPE_VIDEO_VP8,
+            "video_decoder.vp8", "video_encoder.vp8" },
+        { MEDIA_MIMETYPE_VIDEO_VP9,
+            "video_decoder.vp9", "video_encoder.vp9" },
         { MEDIA_MIMETYPE_AUDIO_RAW,
             "audio_decoder.raw", "audio_encoder.raw" },
         { MEDIA_MIMETYPE_AUDIO_FLAC,
@@ -912,14 +1038,14 @@
         err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE);
 
         if (err != OK) {
-            ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
-                  mComponentName.c_str(), err);
+              ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
+                    mComponentName.c_str(), err);
 
-            return err;
-        }
-    }
+              return err;
+          }
+      }
 
-    int32_t prependSPSPPS;
+    int32_t prependSPSPPS = 0;
     if (encoder
             && msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
             && prependSPSPPS != 0) {
@@ -946,7 +1072,57 @@
         }
     }
 
-    if (!strncasecmp(mime, "video/", 6)) {
+    // Only enable metadata mode on encoder output if encoder can prepend
+    // sps/pps to idr frames, since in metadata mode the bitstream is in an
+    // opaque handle, to which we don't have access.
+    int32_t video = !strncasecmp(mime, "video/", 6);
+    if (encoder && video) {
+        OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
+            && msg->findInt32("store-metadata-in-buffers-output", &storeMeta)
+            && storeMeta != 0);
+
+        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable);
+
+        if (err != OK) {
+            ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
+                mComponentName.c_str(), err);
+            mUseMetadataOnEncoderOutput = 0;
+        } else {
+            mUseMetadataOnEncoderOutput = enable;
+        }
+
+        if (!msg->findInt64(
+                    "repeat-previous-frame-after",
+                    &mRepeatFrameDelayUs)) {
+            mRepeatFrameDelayUs = -1ll;
+        }
+    }
+
+    // Always try to enable dynamic output buffers on native surface
+    sp<RefBase> obj;
+    int32_t haveNativeWindow = msg->findObject("native-window", &obj) &&
+            obj != NULL;
+    mStoreMetaDataInOutputBuffers = false;
+    if (!encoder && video && haveNativeWindow) {
+        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, OMX_TRUE);
+        if (err != OK) {
+            // allow failure
+            ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
+                  mComponentName.c_str(), err);
+            err = OK;
+        } else {
+            ALOGV("[%s] storeMetaDataInBuffers succeeded", mComponentName.c_str());
+            mStoreMetaDataInOutputBuffers = true;
+        }
+
+        int32_t push;
+        if (msg->findInt32("push-blank-buffers-on-shutdown", &push)
+                && push != 0) {
+            mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
+        }
+    }
+
+    if (video) {
         if (encoder) {
             err = setupVideoEncoder(mime, msg);
         } else {
@@ -1476,7 +1652,8 @@
     { MEDIA_MIMETYPE_VIDEO_MPEG4, OMX_VIDEO_CodingMPEG4 },
     { MEDIA_MIMETYPE_VIDEO_H263, OMX_VIDEO_CodingH263 },
     { MEDIA_MIMETYPE_VIDEO_MPEG2, OMX_VIDEO_CodingMPEG2 },
-    { MEDIA_MIMETYPE_VIDEO_VPX, OMX_VIDEO_CodingVPX },
+    { MEDIA_MIMETYPE_VIDEO_VP8, OMX_VIDEO_CodingVP8 },
+    { MEDIA_MIMETYPE_VIDEO_VP9, OMX_VIDEO_CodingVP9 },
 };
 
 static status_t GetVideoCodingTypeFromMime(
@@ -2321,10 +2498,15 @@
                                 &params, sizeof(params)),
                              (status_t)OK);
 
+                    CHECK_GT(params.nChannels, 0);
                     CHECK(params.nChannels == 1 || params.bInterleaved);
                     CHECK_EQ(params.nBitPerSample, 16u);
-                    CHECK_EQ((int)params.eNumData, (int)OMX_NumericalDataSigned);
-                    CHECK_EQ((int)params.ePCMMode, (int)OMX_AUDIO_PCMModeLinear);
+
+                    CHECK_EQ((int)params.eNumData,
+                             (int)OMX_NumericalDataSigned);
+
+                    CHECK_EQ((int)params.ePCMMode,
+                             (int)OMX_AUDIO_PCMModeLinear);
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
                     notify->setInt32("channel-count", params.nChannels);
@@ -2334,11 +2516,14 @@
                         if (mSkipCutBuffer != NULL) {
                             size_t prevbufsize = mSkipCutBuffer->size();
                             if (prevbufsize != 0) {
-                                ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbufsize);
+                                ALOGW("Replacing SkipCutBuffer holding %d "
+                                      "bytes",
+                                      prevbufsize);
                             }
                         }
-                        mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay * frameSize,
-                                                           mEncoderPadding * frameSize);
+                        mSkipCutBuffer = new SkipCutBuffer(
+                                mEncoderDelay * frameSize,
+                                mEncoderPadding * frameSize);
                     }
 
                     if (mChannelMaskPresent) {
@@ -2463,6 +2648,14 @@
         goto error;
     }
 
+    err = native_window_set_scaling_mode(mNativeWindow.get(),
+                NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank_frames: set_scaling_mode failed: %s (%d)",
+              strerror(-err), -err);
+        goto error;
+    }
+
     err = native_window_set_usage(mNativeWindow.get(),
             GRALLOC_USAGE_SW_WRITE_OFTEN);
     if (err != NO_ERROR) {
@@ -2916,6 +3109,20 @@
                 mCodec->mBufferStats.add(timeUs, stats);
 #endif
 
+                if (mCodec->mStoreMetaDataInOutputBuffers) {
+                    // try to submit an output buffer for each input buffer
+                    PortMode outputMode = getPortMode(kPortIndexOutput);
+
+                    ALOGV("MetaDataBuffersToSubmit=%u portMode=%s",
+                            mCodec->mMetaDataBuffersToSubmit,
+                            (outputMode == FREE_BUFFERS ? "FREE" :
+                             outputMode == KEEP_BUFFERS ? "KEEP" : "RESUBMIT"));
+                    if (outputMode == RESUBMIT_BUFFERS) {
+                        CHECK_EQ(mCodec->submitOutputMetaDataBuffer(),
+                                (status_t)OK);
+                    }
+                }
+
                 CHECK_EQ(mCodec->mOMX->emptyBuffer(
                             mCodec->mNode,
                             bufferID,
@@ -3033,6 +3240,7 @@
 
     CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_COMPONENT);
 
+    info->mDequeuedAt = ++mCodec->mDequeueCounter;
     info->mStatus = BufferInfo::OWNED_BY_US;
 
     PortMode mode = getPortMode(kPortIndexOutput);
@@ -3062,7 +3270,15 @@
                 mCodec->sendFormatChange(reply);
             }
 
-            info->mData->setRange(rangeOffset, rangeLength);
+            if (mCodec->mUseMetadataOnEncoderOutput) {
+                native_handle_t* handle =
+                        *(native_handle_t**)(info->mData->data() + 4);
+                info->mData->meta()->setPointer("handle", handle);
+                info->mData->meta()->setInt32("rangeOffset", rangeOffset);
+                info->mData->meta()->setInt32("rangeLength", rangeLength);
+            } else {
+                info->mData->setRange(rangeOffset, rangeLength);
+            }
 #if 0
             if (mCodec->mNativeWindow == NULL) {
                 if (IsIDR(info->mData)) {
@@ -3220,6 +3436,7 @@
     mCodec->mOMX.clear();
     mCodec->mQuirks = 0;
     mCodec->mFlags = 0;
+    mCodec->mUseMetadataOnEncoderOutput = 0;
     mCodec->mComponentName.clear();
 }
 
@@ -3373,6 +3590,7 @@
 
     if (componentName.endsWith(".secure")) {
         mCodec->mFlags |= kFlagIsSecure;
+        mCodec->mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
     }
 
     mCodec->mQuirks = quirks;
@@ -3405,6 +3623,10 @@
 
     mCodec->mInputEOSResult = OK;
 
+    mCodec->mDequeueCounter = 0;
+    mCodec->mMetaDataBuffersToSubmit = 0;
+    mCodec->mRepeatFrameDelayUs = -1ll;
+
     if (mCodec->mShutdownInProgress) {
         bool keepComponentAllocated = mCodec->mKeepComponentAllocated;
 
@@ -3535,6 +3757,23 @@
 
     err = mCodec->mOMX->createInputSurface(mCodec->mNode, kPortIndexInput,
             &bufferProducer);
+
+    if (err == OK && mCodec->mRepeatFrameDelayUs > 0ll) {
+        err = mCodec->mOMX->setInternalOption(
+                mCodec->mNode,
+                kPortIndexInput,
+                IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY,
+                &mCodec->mRepeatFrameDelayUs,
+                sizeof(mCodec->mRepeatFrameDelayUs));
+
+        if (err != OK) {
+            ALOGE("[%s] Unable to configure option to repeat previous "
+                  "frames (err %d)",
+                  mCodec->mComponentName.c_str(),
+                  err);
+        }
+    }
+
     if (err == OK) {
         notify->setObject("input-surface",
                 new BufferProducerWrapper(bufferProducer));
@@ -3722,7 +3961,20 @@
     return RESUBMIT_BUFFERS;
 }
 
-void ACodec::ExecutingState::submitOutputBuffers() {
+void ACodec::ExecutingState::submitOutputMetaBuffers() {
+    // submit as many buffers as there are input buffers with the codec
+    // in case we are in port reconfiguring
+    for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); ++i) {
+        BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+
+        if (info->mStatus == BufferInfo::OWNED_BY_COMPONENT) {
+            if (mCodec->submitOutputMetaDataBuffer() != OK)
+                break;
+        }
+    }
+}
+
+void ACodec::ExecutingState::submitRegularOutputBuffers() {
     for (size_t i = 0; i < mCodec->mBuffers[kPortIndexOutput].size(); ++i) {
         BufferInfo *info = &mCodec->mBuffers[kPortIndexOutput].editItemAt(i);
 
@@ -3747,6 +3999,14 @@
     }
 }
 
+void ACodec::ExecutingState::submitOutputBuffers() {
+    if (mCodec->mStoreMetaDataInOutputBuffers) {
+        submitOutputMetaBuffers();
+    } else {
+        submitRegularOutputBuffers();
+    }
+}
+
 void ACodec::ExecutingState::resume() {
     if (mActive) {
         ALOGV("[%s] We're already active, no need to resume.",
@@ -3891,6 +4151,34 @@
         }
     }
 
+    int32_t dropInputFrames;
+    if (params->findInt32("drop-input-frames", &dropInputFrames)) {
+        bool suspend = dropInputFrames != 0;
+
+        status_t err =
+            mOMX->setInternalOption(
+                     mNode,
+                     kPortIndexInput,
+                     IOMX::INTERNAL_OPTION_SUSPEND,
+                     &suspend,
+                     sizeof(suspend));
+
+        if (err != OK) {
+            ALOGE("Failed to set parameter 'drop-input-frames' (err %d)", err);
+            return err;
+        }
+    }
+
+    int32_t dummy;
+    if (params->findInt32("request-sync", &dummy)) {
+        status_t err = requestIDRFrame();
+
+        if (err != OK) {
+            ALOGE("Requesting a sync frame failed w/ err %d", err);
+            return err;
+        }
+    }
+
     return OK;
 }
 
@@ -3913,6 +4201,7 @@
             CHECK_EQ(data1, (OMX_U32)kPortIndexOutput);
 
             if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
+                mCodec->mMetaDataBuffersToSubmit = 0;
                 CHECK_EQ(mCodec->mOMX->sendCommand(
                             mCodec->mNode,
                             OMX_CommandPortDisable, kPortIndexOutput),
@@ -4131,7 +4420,8 @@
         CHECK_EQ(mCodec->freeBuffersOnPort(kPortIndexInput), (status_t)OK);
         CHECK_EQ(mCodec->freeBuffersOnPort(kPortIndexOutput), (status_t)OK);
 
-        if (mCodec->mFlags & kFlagIsSecure && mCodec->mNativeWindow != NULL) {
+        if ((mCodec->mFlags & kFlagPushBlankBuffersToNativeWindowOnShutdown)
+                && mCodec->mNativeWindow != NULL) {
             // We push enough 1x1 blank buffers to ensure that one of
             // them has made it to the display.  This allows the OMX
             // component teardown to zero out any protected buffers
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index acc3abf..1f68b51 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -62,6 +62,7 @@
         $(TOP)/frameworks/av/include/media/stagefright/timedtext \
         $(TOP)/frameworks/native/include/media/hardware \
         $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/native/services/connectivitymanager \
         $(TOP)/external/flac/include \
         $(TOP)/external/tremolo \
         $(TOP)/external/openssl/include \
@@ -69,7 +70,7 @@
 LOCAL_SHARED_LIBRARIES := \
         libbinder \
         libcamera_client \
-        libcrypto \
+        libconnectivitymanager \
         libcutils \
         libdl \
         libdrmframework \
@@ -97,9 +98,9 @@
         libvpx \
         libwebm \
         libstagefright_mpeg2ts \
-        libstagefright_httplive \
         libstagefright_id3 \
         libFLAC \
+        libmedia_helper
 
 LOCAL_SRC_FILES += \
         chromium_http_stub.cpp
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 4208019..2418aab 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioPlayer"
 #include <utils/Log.h>
+#include <cutils/compiler.h>
 
 #include <binder/IPCThreadState.h>
 #include <media/AudioTrack.h>
@@ -27,6 +28,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 
 #include "include/AwesomePlayer.h"
 
@@ -34,10 +36,9 @@
 
 AudioPlayer::AudioPlayer(
         const sp<MediaPlayerBase::AudioSink> &audioSink,
-        bool allowDeepBuffering,
+        uint32_t flags,
         AwesomePlayer *observer)
-    : mAudioTrack(NULL),
-      mInputBuffer(NULL),
+    : mInputBuffer(NULL),
       mSampleRate(0),
       mLatencyUs(0),
       mFrameSize(0),
@@ -48,14 +49,17 @@
       mSeeking(false),
       mReachedEOS(false),
       mFinalStatus(OK),
+      mSeekTimeUs(0),
       mStarted(false),
       mIsFirstBuffer(false),
       mFirstBufferResult(OK),
       mFirstBuffer(NULL),
       mAudioSink(audioSink),
-      mAllowDeepBuffering(allowDeepBuffering),
       mObserver(observer),
-      mPinnedTimeUs(-1ll) {
+      mPinnedTimeUs(-1ll),
+      mPlaying(false),
+      mStartPosUs(0),
+      mCreateFlags(flags) {
 }
 
 AudioPlayer::~AudioPlayer() {
@@ -110,7 +114,7 @@
     const char *mime;
     bool success = format->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+    CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
 
     success = format->findInt32(kKeySampleRate, &mSampleRate);
     CHECK(success);
@@ -126,16 +130,74 @@
         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
     }
 
+    audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+    if (useOffload()) {
+        if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
+            ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime);
+            audioFormat = AUDIO_FORMAT_INVALID;
+        } else {
+            ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat);
+        }
+    }
+
+    int avgBitRate = -1;
+    format->findInt32(kKeyBitRate, &avgBitRate);
+
     if (mAudioSink.get() != NULL) {
 
+        uint32_t flags = AUDIO_OUTPUT_FLAG_NONE;
+        audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+
+        if (allowDeepBuffering()) {
+            flags |= AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+        }
+        if (useOffload()) {
+            flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+
+            int64_t durationUs;
+            if (format->findInt64(kKeyDuration, &durationUs)) {
+                offloadInfo.duration_us = durationUs;
+            } else {
+                offloadInfo.duration_us = -1;
+            }
+
+            offloadInfo.sample_rate = mSampleRate;
+            offloadInfo.channel_mask = channelMask;
+            offloadInfo.format = audioFormat;
+            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
+            offloadInfo.bit_rate = avgBitRate;
+            offloadInfo.has_video = ((mCreateFlags & HAS_VIDEO) != 0);
+            offloadInfo.is_streaming = ((mCreateFlags & IS_STREAMING) != 0);
+        }
+
         status_t err = mAudioSink->open(
-                mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT,
+                mSampleRate, numChannels, channelMask, audioFormat,
                 DEFAULT_AUDIOSINK_BUFFERCOUNT,
                 &AudioPlayer::AudioSinkCallback,
                 this,
-                (mAllowDeepBuffering ?
-                            AUDIO_OUTPUT_FLAG_DEEP_BUFFER :
-                            AUDIO_OUTPUT_FLAG_NONE));
+                (audio_output_flags_t)flags,
+                useOffload() ? &offloadInfo : NULL);
+
+        if (err == OK) {
+            mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
+            mFrameSize = mAudioSink->frameSize();
+
+            if (useOffload()) {
+                // If the playback is offloaded to h/w we pass the
+                // HAL some metadata information
+                // We don't want to do this for PCM because it will be going
+                // through the AudioFlinger mixer before reaching the hardware
+                sendMetaDataToHal(mAudioSink, format);
+            }
+
+            err = mAudioSink->start();
+            // do not alter behavior for non offloaded tracks: ignore start status.
+            if (!useOffload()) {
+                err = OK;
+            }
+        }
+
         if (err != OK) {
             if (mFirstBuffer != NULL) {
                 mFirstBuffer->release();
@@ -149,10 +211,6 @@
             return err;
         }
 
-        mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
-        mFrameSize = mAudioSink->frameSize();
-
-        mAudioSink->start();
     } else {
         // playing to an AudioTrack, set up mask if necessary
         audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
@@ -166,8 +224,7 @@
                 0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
-            delete mAudioTrack;
-            mAudioTrack = NULL;
+            mAudioTrack.clear();
 
             if (mFirstBuffer != NULL) {
                 mFirstBuffer->release();
@@ -188,6 +245,7 @@
     }
 
     mStarted = true;
+    mPlaying = true;
     mPinnedTimeUs = -1ll;
 
     return OK;
@@ -214,29 +272,57 @@
 
         mPinnedTimeUs = ALooper::GetNowUs();
     }
+
+    mPlaying = false;
 }
 
-void AudioPlayer::resume() {
+status_t AudioPlayer::resume() {
     CHECK(mStarted);
+    status_t err;
 
     if (mAudioSink.get() != NULL) {
-        mAudioSink->start();
+        err = mAudioSink->start();
     } else {
-        mAudioTrack->start();
+        err = mAudioTrack->start();
     }
+
+    if (err == OK) {
+        mPlaying = true;
+    }
+
+    return err;
 }
 
 void AudioPlayer::reset() {
     CHECK(mStarted);
 
+    ALOGV("reset: mPlaying=%d mReachedEOS=%d useOffload=%d",
+                                mPlaying, mReachedEOS, useOffload() );
+
     if (mAudioSink.get() != NULL) {
         mAudioSink->stop();
+        // If we're closing and have reached EOS, we don't want to flush
+        // the track because if it is offloaded there could be a small
+        // amount of residual data in the hardware buffer which we must
+        // play to give gapless playback.
+        // But if we're resetting when paused or before we've reached EOS
+        // we can't be doing a gapless playback and there could be a large
+        // amount of data queued in the hardware if the track is offloaded,
+        // so we must flush to prevent a track switch being delayed playing
+        // the buffered data that we don't want now
+        if (!mPlaying || !mReachedEOS) {
+            mAudioSink->flush();
+        }
+
         mAudioSink->close();
     } else {
         mAudioTrack->stop();
 
-        delete mAudioTrack;
-        mAudioTrack = NULL;
+        if (!mPlaying || !mReachedEOS) {
+            mAudioTrack->flush();
+        }
+
+        mAudioTrack.clear();
     }
 
     // Make sure to release any buffer we hold onto so that the
@@ -259,10 +345,16 @@
     // The following hack is necessary to ensure that the OMX
     // component is completely released by the time we may try
     // to instantiate it again.
-    wp<MediaSource> tmp = mSource;
-    mSource.clear();
-    while (tmp.promote() != NULL) {
-        usleep(1000);
+    // When offloading, the OMX component is not used so this hack
+    // is not needed
+    if (!useOffload()) {
+        wp<MediaSource> tmp = mSource;
+        mSource.clear();
+        while (tmp.promote() != NULL) {
+            usleep(1000);
+        }
+    } else {
+        mSource.clear();
     }
     IPCThreadState::self()->flushCommands();
 
@@ -274,6 +366,8 @@
     mReachedEOS = false;
     mFinalStatus = OK;
     mStarted = false;
+    mPlaying = false;
+    mStartPosUs = 0;
 }
 
 // static
@@ -294,10 +388,19 @@
     return mReachedEOS;
 }
 
+void AudioPlayer::notifyAudioEOS() {
+    ALOGV("AudioPlayer@0x%p notifyAudioEOS", this);
+
+    if (mObserver != NULL) {
+        mObserver->postAudioEOS(0);
+        ALOGV("Notified observer of EOS!");
+    }
+}
+
 status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
     if (mAudioSink.get() != NULL) {
         return mAudioSink->setPlaybackRatePermille(ratePermille);
-    } else if (mAudioTrack != NULL){
+    } else if (mAudioTrack != 0){
         return mAudioTrack->setSampleRate(ratePermille * mSampleRate / 1000);
     } else {
         return NO_INIT;
@@ -307,21 +410,44 @@
 // static
 size_t AudioPlayer::AudioSinkCallback(
         MediaPlayerBase::AudioSink *audioSink,
-        void *buffer, size_t size, void *cookie) {
+        void *buffer, size_t size, void *cookie,
+        MediaPlayerBase::AudioSink::cb_event_t event) {
     AudioPlayer *me = (AudioPlayer *)cookie;
 
-    return me->fillBuffer(buffer, size);
+    switch(event) {
+    case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
+        return me->fillBuffer(buffer, size);
+
+    case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
+        ALOGV("AudioSinkCallback: stream end");
+        me->mReachedEOS = true;
+        me->notifyAudioEOS();
+        break;
+
+    case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
+        ALOGV("AudioSinkCallback: Tear down event");
+        me->mObserver->postAudioTearDown();
+        break;
+    }
+
+    return 0;
 }
 
 void AudioPlayer::AudioCallback(int event, void *info) {
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
+    switch (event) {
+    case AudioTrack::EVENT_MORE_DATA:
+        {
+        AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+        size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
+        buffer->size = numBytesWritten;
+        }
+        break;
+
+    case AudioTrack::EVENT_STREAM_END:
+        mReachedEOS = true;
+        notifyAudioEOS();
+        break;
     }
-
-    AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-    size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
-
-    buffer->size = numBytesWritten;
 }
 
 uint32_t AudioPlayer::getNumFramesPendingPlayout() const {
@@ -361,6 +487,7 @@
     size_t size_remaining = size;
     while (size_remaining > 0) {
         MediaSource::ReadOptions options;
+        bool refreshSeekTime = false;
 
         {
             Mutex::Autolock autoLock(mLock);
@@ -375,6 +502,7 @@
                 }
 
                 options.setSeekTo(mSeekTimeUs);
+                refreshSeekTime = true;
 
                 if (mInputBuffer != NULL) {
                     mInputBuffer->release();
@@ -407,43 +535,56 @@
             Mutex::Autolock autoLock(mLock);
 
             if (err != OK) {
-                if (mObserver && !mReachedEOS) {
-                    // We don't want to post EOS right away but only
-                    // after all frames have actually been played out.
-
-                    // These are the number of frames submitted to the
-                    // AudioTrack that you haven't heard yet.
-                    uint32_t numFramesPendingPlayout =
-                        getNumFramesPendingPlayout();
-
-                    // These are the number of frames we're going to
-                    // submit to the AudioTrack by returning from this
-                    // callback.
-                    uint32_t numAdditionalFrames = size_done / mFrameSize;
-
-                    numFramesPendingPlayout += numAdditionalFrames;
-
-                    int64_t timeToCompletionUs =
-                        (1000000ll * numFramesPendingPlayout) / mSampleRate;
-
-                    ALOGV("total number of frames played: %lld (%lld us)",
-                            (mNumFramesPlayed + numAdditionalFrames),
-                            1000000ll * (mNumFramesPlayed + numAdditionalFrames)
-                                / mSampleRate);
-
-                    ALOGV("%d frames left to play, %lld us (%.2f secs)",
-                         numFramesPendingPlayout,
-                         timeToCompletionUs, timeToCompletionUs / 1E6);
-
-                    postEOS = true;
-                    if (mAudioSink->needsTrailingPadding()) {
-                        postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                if (!mReachedEOS) {
+                    if (useOffload()) {
+                        // no more buffers to push - stop() and wait for STREAM_END
+                        // don't set mReachedEOS until stream end received
+                        if (mAudioSink != NULL) {
+                            mAudioSink->stop();
+                        } else {
+                            mAudioTrack->stop();
+                        }
                     } else {
-                        postEOSDelayUs = 0;
+                        if (mObserver) {
+                            // We don't want to post EOS right away but only
+                            // after all frames have actually been played out.
+
+                            // These are the number of frames submitted to the
+                            // AudioTrack that you haven't heard yet.
+                            uint32_t numFramesPendingPlayout =
+                                getNumFramesPendingPlayout();
+
+                            // These are the number of frames we're going to
+                            // submit to the AudioTrack by returning from this
+                            // callback.
+                            uint32_t numAdditionalFrames = size_done / mFrameSize;
+
+                            numFramesPendingPlayout += numAdditionalFrames;
+
+                            int64_t timeToCompletionUs =
+                                (1000000ll * numFramesPendingPlayout) / mSampleRate;
+
+                            ALOGV("total number of frames played: %lld (%lld us)",
+                                    (mNumFramesPlayed + numAdditionalFrames),
+                                    1000000ll * (mNumFramesPlayed + numAdditionalFrames)
+                                        / mSampleRate);
+
+                            ALOGV("%d frames left to play, %lld us (%.2f secs)",
+                                 numFramesPendingPlayout,
+                                 timeToCompletionUs, timeToCompletionUs / 1E6);
+
+                            postEOS = true;
+                            if (mAudioSink->needsTrailingPadding()) {
+                                postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                            } else {
+                                postEOSDelayUs = 0;
+                            }
+                        }
+
+                        mReachedEOS = true;
                     }
                 }
 
-                mReachedEOS = true;
                 mFinalStatus = err;
                 break;
             }
@@ -454,17 +595,34 @@
                 mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
             }
 
-            CHECK(mInputBuffer->meta_data()->findInt64(
+            if(mInputBuffer->range_length() != 0) {
+                CHECK(mInputBuffer->meta_data()->findInt64(
                         kKeyTime, &mPositionTimeMediaUs));
+            }
 
-            mPositionTimeRealUs =
-                ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
-                    / mSampleRate;
+            // need to adjust the mStartPosUs for offload decoding since parser
+            // might not be able to get the exact seek time requested.
+            if (refreshSeekTime && useOffload()) {
+                if (postSeekComplete) {
+                    ALOGV("fillBuffer is going to post SEEK_COMPLETE");
+                    mObserver->postAudioSeekComplete();
+                    postSeekComplete = false;
+                }
 
-            ALOGV("buffer->size() = %d, "
-                 "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
-                 mInputBuffer->range_length(),
-                 mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+                mStartPosUs = mPositionTimeMediaUs;
+                ALOGV("adjust seek time to: %.2f", mStartPosUs/ 1E6);
+            }
+
+            if (!useOffload()) {
+                mPositionTimeRealUs =
+                    ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+                        / mSampleRate;
+                ALOGV("buffer->size() = %d, "
+                     "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
+                     mInputBuffer->range_length(),
+                     mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+            }
+
         }
 
         if (mInputBuffer->range_length() == 0) {
@@ -490,6 +648,13 @@
         size_remaining -= copy;
     }
 
+    if (useOffload()) {
+        // We must ask the hardware what it has played
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        ALOGV("mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
+             mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+    }
+
     {
         Mutex::Autolock autoLock(mLock);
         mNumFramesPlayed += size_done / mFrameSize;
@@ -538,9 +703,36 @@
     return result + diffUs;
 }
 
+int64_t AudioPlayer::getOutputPlayPositionUs_l() const
+{
+    uint32_t playedSamples = 0;
+    if (mAudioSink != NULL) {
+        mAudioSink->getPosition(&playedSamples);
+    } else {
+        mAudioTrack->getPosition(&playedSamples);
+    }
+
+    const int64_t playedUs = (static_cast<int64_t>(playedSamples) * 1000000 ) / mSampleRate;
+
+    // HAL position is relative to the first buffer we sent at mStartPosUs
+    const int64_t renderedDuration = mStartPosUs + playedUs;
+    ALOGV("getOutputPlayPositionUs_l %lld", renderedDuration);
+    return renderedDuration;
+}
+
 int64_t AudioPlayer::getMediaTimeUs() {
     Mutex::Autolock autoLock(mLock);
 
+    if (useOffload()) {
+        if (mSeeking) {
+            return mSeekTimeUs;
+        }
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %lld",
+              mPositionTimeRealUs);
+        return mPositionTimeRealUs;
+    }
+
     if (mPositionTimeMediaUs < 0 || mPositionTimeRealUs < 0) {
         if (mSeeking) {
             return mSeekTimeUs;
@@ -549,6 +741,11 @@
         return 0;
     }
 
+    if (useOffload()) {
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        return mPositionTimeRealUs;
+    }
+
     int64_t realTimeOffset = getRealTimeUsLocked() - mPositionTimeRealUs;
     if (realTimeOffset < 0) {
         realTimeOffset = 0;
@@ -570,19 +767,34 @@
 status_t AudioPlayer::seekTo(int64_t time_us) {
     Mutex::Autolock autoLock(mLock);
 
+    ALOGV("seekTo( %lld )", time_us);
+
     mSeeking = true;
     mPositionTimeRealUs = mPositionTimeMediaUs = -1;
     mReachedEOS = false;
     mSeekTimeUs = time_us;
+    mStartPosUs = time_us;
 
     // Flush resets the number of played frames
     mNumFramesPlayed = 0;
     mNumFramesPlayedSysTimeUs = ALooper::GetNowUs();
 
     if (mAudioSink != NULL) {
+        if (mPlaying) {
+            mAudioSink->pause();
+        }
         mAudioSink->flush();
+        if (mPlaying) {
+            mAudioSink->start();
+        }
     } else {
+        if (mPlaying) {
+            mAudioTrack->pause();
+        }
         mAudioTrack->flush();
+        if (mPlaying) {
+            mAudioTrack->start();
+        }
     }
 
     return OK;
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 3cf4d5c..bdd842f 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -49,8 +49,7 @@
 
 AudioSource::AudioSource(
         audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount)
-    : mRecord(NULL),
-      mStarted(false),
+    : mStarted(false),
       mSampleRate(sampleRate),
       mPrevSampleTimeUs(0),
       mNumFramesReceived(0),
@@ -91,9 +90,6 @@
     if (mStarted) {
         reset();
     }
-
-    delete mRecord;
-    mRecord = NULL;
 }
 
 status_t AudioSource::initCheck() const {
@@ -122,8 +118,7 @@
     if (err == OK) {
         mStarted = true;
     } else {
-        delete mRecord;
-        mRecord = NULL;
+        mRecord.clear();
     }
 
 
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index d53f442..52e178e 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -47,6 +47,7 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
 
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
@@ -65,6 +66,11 @@
 static const size_t kLowWaterMarkBytes = 40000;
 static const size_t kHighWaterMarkBytes = 200000;
 
+// maximum time in paused state when offloading audio decompression. When elapsed, the AudioPlayer
+// is destroyed to allow the audio DSP to power down.
+static int64_t kOffloadPauseMaxUs = 60000000ll;
+
+
 struct AwesomeEvent : public TimedEventQueue::Event {
     AwesomeEvent(
             AwesomePlayer *player,
@@ -185,6 +191,8 @@
       mTimeSource(NULL),
       mVideoRenderingStarted(false),
       mVideoRendererIsPreview(false),
+      mMediaRenderingStartGeneration(0),
+      mStartGeneration(0),
       mAudioPlayer(NULL),
       mDisplayWidth(0),
       mDisplayHeight(0),
@@ -194,7 +202,9 @@
       mVideoBuffer(NULL),
       mDecryptHandle(NULL),
       mLastVideoTimeUs(-1),
-      mTextDriver(NULL) {
+      mTextDriver(NULL),
+      mOffloadAudio(false),
+      mAudioTearDown(false) {
     CHECK_EQ(mClient.connect(), (status_t)OK);
 
     DataSource::RegisterDefaultSniffers();
@@ -206,13 +216,17 @@
     mBufferingEvent = new AwesomeEvent(this, &AwesomePlayer::onBufferingUpdate);
     mBufferingEventPending = false;
     mVideoLagEvent = new AwesomeEvent(this, &AwesomePlayer::onVideoLagUpdate);
-    mVideoEventPending = false;
+    mVideoLagEventPending = false;
 
     mCheckAudioStatusEvent = new AwesomeEvent(
             this, &AwesomePlayer::onCheckAudioStatus);
 
     mAudioStatusEventPending = false;
 
+    mAudioTearDownEvent = new AwesomeEvent(this,
+                              &AwesomePlayer::onAudioTearDownEvent);
+    mAudioTearDownEventPending = false;
+
     reset();
 }
 
@@ -232,6 +246,11 @@
     mQueue.cancelEvent(mVideoLagEvent->eventID());
     mVideoLagEventPending = false;
 
+    if (mOffloadAudio) {
+        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
+        mAudioTearDownEventPending = false;
+    }
+
     if (!keepNotifications) {
         mQueue.cancelEvent(mStreamDoneEvent->eventID());
         mStreamDoneEventPending = false;
@@ -474,6 +493,8 @@
     mDisplayWidth = 0;
     mDisplayHeight = 0;
 
+    notifyListener_l(MEDIA_STOPPED);
+
     if (mDecryptHandle != NULL) {
             mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
                     Playback::STOP, 0);
@@ -518,7 +539,7 @@
     mVideoTrack.clear();
     mExtractor.clear();
 
-    // Shutdown audio first, so that the respone to the reset request
+    // Shutdown audio first, so that the response to the reset request
     // appears to happen instantaneously as far as the user is concerned
     // If we did this later, audio would continue playing while we
     // shutdown the video-related resources and the player appear to
@@ -531,6 +552,7 @@
         mAudioSource->stop();
     }
     mAudioSource.clear();
+    mOmxSource.clear();
 
     mTimeSource = NULL;
 
@@ -586,7 +608,7 @@
 }
 
 void AwesomePlayer::notifyListener_l(int msg, int ext1, int ext2) {
-    if (mListener != NULL) {
+    if ((mListener != NULL) && !mAudioTearDown) {
         sp<MediaPlayerBase> listener = mListener.promote();
 
         if (listener != NULL) {
@@ -597,7 +619,7 @@
 
 bool AwesomePlayer::getBitrate(int64_t *bitrate) {
     off64_t size;
-    if (mDurationUs >= 0 && mCachedSource != NULL
+    if (mDurationUs > 0 && mCachedSource != NULL
             && mCachedSource->getSize(&size) == OK) {
         *bitrate = size * 8000000ll / mDurationUs;  // in bits/sec
         return true;
@@ -842,6 +864,13 @@
 
         pause_l(true /* at eos */);
 
+        // If audio hasn't completed MEDIA_SEEK_COMPLETE yet,
+        // notify MEDIA_SEEK_COMPLETE to observer immediately for state persistence.
+        if (mWatchForAudioSeekComplete) {
+            notifyListener_l(MEDIA_SEEK_COMPLETE);
+            mWatchForAudioSeekComplete = false;
+        }
+
         modifyFlags(AT_EOS, SET);
     }
 }
@@ -883,41 +912,42 @@
 
     if (mAudioSource != NULL) {
         if (mAudioPlayer == NULL) {
-            if (mAudioSink != NULL) {
-                bool allowDeepBuffering;
-                int64_t cachedDurationUs;
-                bool eos;
-                if (mVideoSource == NULL
-                        && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
-                        (getCachedDuration_l(&cachedDurationUs, &eos) &&
-                        cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
-                    allowDeepBuffering = true;
-                } else {
-                    allowDeepBuffering = false;
-                }
-
-                mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
-                mAudioPlayer->setSource(mAudioSource);
-
-                mTimeSource = mAudioPlayer;
-
-                // If there was a seek request before we ever started,
-                // honor the request now.
-                // Make sure to do this before starting the audio player
-                // to avoid a race condition.
-                seekAudioIfNecessary_l();
-            }
+            createAudioPlayer_l();
         }
 
         CHECK(!(mFlags & AUDIO_RUNNING));
 
         if (mVideoSource == NULL) {
+
             // We don't want to post an error notification at this point,
             // the error returned from MediaPlayer::start() will suffice.
 
             status_t err = startAudioPlayer_l(
                     false /* sendErrorNotification */);
 
+            if ((err != OK) && mOffloadAudio) {
+                ALOGI("play_l() cannot create offload output, fallback to sw decode");
+                delete mAudioPlayer;
+                mAudioPlayer = NULL;
+                // if the player was started it will take care of stopping the source when destroyed
+                if (!(mFlags & AUDIOPLAYER_STARTED)) {
+                    mAudioSource->stop();
+                }
+                modifyFlags((AUDIO_RUNNING | AUDIOPLAYER_STARTED), CLEAR);
+                mOffloadAudio = false;
+                mAudioSource = mOmxSource;
+                if (mAudioSource != NULL) {
+                    err = mAudioSource->start();
+
+                    if (err != OK) {
+                        mAudioSource.clear();
+                    } else {
+                        createAudioPlayer_l();
+                        err = startAudioPlayer_l(false);
+                    }
+                }
+            }
+
             if (err != OK) {
                 delete mAudioPlayer;
                 mAudioPlayer = NULL;
@@ -966,19 +996,65 @@
     return OK;
 }
 
+void AwesomePlayer::createAudioPlayer_l()
+{
+    uint32_t flags = 0;
+    int64_t cachedDurationUs;
+    bool eos;
+
+    if (mOffloadAudio) {
+        flags |= AudioPlayer::USE_OFFLOAD;
+    } else if (mVideoSource == NULL
+            && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
+            (getCachedDuration_l(&cachedDurationUs, &eos) &&
+            cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
+        flags |= AudioPlayer::ALLOW_DEEP_BUFFERING;
+    }
+    if (isStreamingHTTP()) {
+        flags |= AudioPlayer::IS_STREAMING;
+    }
+    if (mVideoSource != NULL) {
+        flags |= AudioPlayer::HAS_VIDEO;
+    }
+
+    mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);
+    mAudioPlayer->setSource(mAudioSource);
+
+    mTimeSource = mAudioPlayer;
+
+    // If there was a seek request before we ever started,
+    // honor the request now.
+    // Make sure to do this before starting the audio player
+    // to avoid a race condition.
+    seekAudioIfNecessary_l();
+}
+
+void AwesomePlayer::notifyIfMediaStarted_l() {
+    if (mMediaRenderingStartGeneration == mStartGeneration) {
+        mMediaRenderingStartGeneration = -1;
+        notifyListener_l(MEDIA_STARTED);
+    }
+}
+
 status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {
     CHECK(!(mFlags & AUDIO_RUNNING));
+    status_t err = OK;
 
     if (mAudioSource == NULL || mAudioPlayer == NULL) {
         return OK;
     }
 
+    if (mOffloadAudio) {
+        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
+        mAudioTearDownEventPending = false;
+    }
+
     if (!(mFlags & AUDIOPLAYER_STARTED)) {
         bool wasSeeking = mAudioPlayer->isSeeking();
 
         // We've already started the MediaSource in order to enable
         // the prefetcher to read its data.
-        status_t err = mAudioPlayer->start(
+        err = mAudioPlayer->start(
                 true /* sourceAlreadyStarted */);
 
         if (err != OK) {
@@ -996,16 +1072,20 @@
 
             // We will have finished the seek while starting the audio player.
             postAudioSeekComplete();
+        } else {
+            notifyIfMediaStarted_l();
         }
     } else {
-        mAudioPlayer->resume();
+        err = mAudioPlayer->resume();
     }
 
-    modifyFlags(AUDIO_RUNNING, SET);
+    if (err == OK) {
+        modifyFlags(AUDIO_RUNNING, SET);
 
-    mWatchForAudioEOS = true;
+        mWatchForAudioEOS = true;
+    }
 
-    return OK;
+    return err;
 }
 
 void AwesomePlayer::notifyVideoSize_l() {
@@ -1134,18 +1214,20 @@
         return OK;
     }
 
+    notifyListener_l(MEDIA_PAUSED);
+    mMediaRenderingStartGeneration = ++mStartGeneration;
+
     cancelPlayerEvents(true /* keepNotifications */);
 
     if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
-        if (at_eos) {
-            // If we played the audio stream to completion we
-            // want to make sure that all samples remaining in the audio
-            // track's queue are played out.
-            mAudioPlayer->pause(true /* playPendingSamples */);
-        } else {
-            mAudioPlayer->pause();
+        // If we played the audio stream to completion we
+        // want to make sure that all samples remaining in the audio
+        // track's queue are played out.
+        mAudioPlayer->pause(at_eos /* playPendingSamples */);
+        // send us a reminder to tear down the AudioPlayer if paused for too long.
+        if (mOffloadAudio) {
+            postAudioTearDownEvent(kOffloadPauseMaxUs);
         }
-
         modifyFlags(AUDIO_RUNNING, CLEAR);
     }
 
@@ -1290,7 +1372,6 @@
     } else {
         *positionUs = 0;
     }
-
     return OK;
 }
 
@@ -1324,6 +1405,9 @@
     mSeekTimeUs = timeUs;
     modifyFlags((AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS), CLEAR);
 
+    notifyListener_l(MEDIA_PAUSED);
+    mMediaRenderingStartGeneration = ++mStartGeneration;
+
     seekAudioIfNecessary_l();
 
     if (mFlags & TEXTPLAYER_INITIALIZED) {
@@ -1385,14 +1469,29 @@
 
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
+    // Check whether there is a hardware codec for this stream
+    // This doesn't guarantee that the hardware has a free stream
+    // but it avoids us attempting to open (and re-open) an offload
+    // stream to hardware that doesn't have the necessary codec
+    mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL), isStreamingHTTP());
 
     if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+        ALOGV("createAudioPlayer: bypass OMX (raw)");
         mAudioSource = mAudioTrack;
     } else {
-        mAudioSource = OMXCodec::Create(
+        // If offloading we still create a OMX decoder as a fall-back
+        // but we don't start it
+        mOmxSource = OMXCodec::Create(
                 mClient.interface(), mAudioTrack->getFormat(),
                 false, // createEncoder
                 mAudioTrack);
+
+        if (mOffloadAudio) {
+            ALOGV("createAudioPlayer: bypass OMX (offload)");
+            mAudioSource = mAudioTrack;
+        } else {
+            mAudioSource = mOmxSource;
+        }
     }
 
     if (mAudioSource != NULL) {
@@ -1408,6 +1507,7 @@
 
         if (err != OK) {
             mAudioSource.clear();
+            mOmxSource.clear();
             return err;
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
@@ -1822,6 +1922,7 @@
             notifyListener_l(MEDIA_INFO, MEDIA_INFO_RENDERING_START);
         }
 
+        notifyIfMediaStarted_l();
     }
 
     mVideoBuffer->release();
@@ -1885,6 +1986,15 @@
     mQueue.postEventWithDelay(mCheckAudioStatusEvent, delayUs);
 }
 
+void AwesomePlayer::postAudioTearDownEvent(int64_t delayUs) {
+    Mutex::Autolock autoLock(mAudioLock);
+    if (mAudioTearDownEventPending) {
+        return;
+    }
+    mAudioTearDownEventPending = true;
+    mQueue.postEventWithDelay(mAudioTearDownEvent, delayUs);
+}
+
 void AwesomePlayer::onCheckAudioStatus() {
     {
         Mutex::Autolock autoLock(mAudioLock);
@@ -1908,6 +2018,8 @@
         }
 
         mSeeking = NO_SEEK;
+
+        notifyIfMediaStarted_l();
     }
 
     status_t finalStatus;
@@ -2200,7 +2312,10 @@
 
 void AwesomePlayer::onPrepareAsyncEvent() {
     Mutex::Autolock autoLock(mLock);
+    beginPrepareAsync_l();
+}
 
+void AwesomePlayer::beginPrepareAsync_l() {
     if (mFlags & PREPARE_CANCELLED) {
         ALOGI("prepare was cancelled before doing anything");
         abortPrepare(UNKNOWN_ERROR);
@@ -2273,6 +2388,10 @@
     postCheckAudioStatusEvent(0);
 }
 
+void AwesomePlayer::postAudioTearDown() {
+    postAudioTearDownEvent(0);
+}
+
 status_t AwesomePlayer::setParameter(int key, const Parcel &request) {
     switch (key) {
         case KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS:
@@ -2404,6 +2523,7 @@
         mAudioSource->stop();
     }
     mAudioSource.clear();
+    mOmxSource.clear();
 
     mTimeSource = NULL;
 
@@ -2660,4 +2780,66 @@
     }
 }
 
+void AwesomePlayer::onAudioTearDownEvent() {
+
+    Mutex::Autolock autoLock(mLock);
+    if (!mAudioTearDownEventPending) {
+        return;
+    }
+    mAudioTearDownEventPending = false;
+
+    ALOGV("onAudioTearDownEvent");
+
+    // stream info is cleared by reset_l() so copy what we need
+    const bool wasPlaying = (mFlags & PLAYING);
+    KeyedVector<String8, String8> uriHeaders(mUriHeaders);
+    sp<DataSource> fileSource(mFileSource);
+
+    mStatsLock.lock();
+    String8 uri(mStats.mURI);
+    mStatsLock.unlock();
+
+    // get current position so we can start recreated stream from here
+    int64_t position = 0;
+    getPosition(&position);
+
+    // Reset and recreate
+    reset_l();
+    mFlags |= PREPARING;
+
+    status_t err;
+
+    if (fileSource != NULL) {
+        mFileSource = fileSource;
+        err = setDataSource_l(fileSource);
+    } else {
+        err = setDataSource_l(uri, &uriHeaders);
+    }
+
+    if ( err != OK ) {
+        // This will force beingPrepareAsync_l() to notify
+        // a MEDIA_ERROR to the client and abort the prepare
+        mFlags |= PREPARE_CANCELLED;
+    }
+
+    mAudioTearDown = true;
+    mIsAsyncPrepare = true;
+
+    // Call parepare for the host decoding
+    beginPrepareAsync_l();
+
+    if (mPrepareResult == OK) {
+        if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
+            seekTo_l(position);
+        }
+
+        if (wasPlaying) {
+            modifyFlags(CACHE_UNDERRUN, CLEAR);
+            play_l();
+        }
+    }
+
+    mAudioTearDown = false;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index d2cc6c2..5fa4b6f 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -30,6 +30,8 @@
 #include <cutils/properties.h>
 #include <cutils/qtaguid.h>
 
+#include <ConnectivityManager.h>
+
 namespace android {
 
 HTTPBase::HTTPBase()
@@ -164,4 +166,14 @@
     }
 }
 
+// static
+void HTTPBase::RegisterSocketUserMark(int sockfd, uid_t uid) {
+    ConnectivityManager::markSocketAsUser(sockfd, uid);
+}
+
+// static
+void HTTPBase::UnRegisterSocketUserMark(int sockfd) {
+    RegisterSocketUserMark(sockfd, geteuid());
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 145869e..ad985ee 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -341,6 +341,7 @@
       mDataSource(source),
       mInitCheck(NO_INIT),
       mHasVideo(false),
+      mHeaderTimescale(0),
       mFirstTrack(NULL),
       mLastTrack(NULL),
       mFileMetaData(new MetaData),
@@ -817,6 +818,7 @@
         case FOURCC('i', 'l', 's', 't'):
         case FOURCC('s', 'i', 'n', 'f'):
         case FOURCC('s', 'c', 'h', 'i'):
+        case FOURCC('e', 'd', 't', 's'):
         {
             if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
                 ALOGV("sampleTable chunk is %d bytes long.", (size_t)chunk_size);
@@ -904,6 +906,68 @@
             break;
         }
 
+        case FOURCC('e', 'l', 's', 't'):
+        {
+            // See 14496-12 8.6.6
+            uint8_t version;
+            if (mDataSource->readAt(data_offset, &version, 1) < 1) {
+                return ERROR_IO;
+            }
+
+            uint32_t entry_count;
+            if (!mDataSource->getUInt32(data_offset + 4, &entry_count)) {
+                return ERROR_IO;
+            }
+
+            if (entry_count != 1) {
+                // we only support a single entry at the moment, for gapless playback
+                ALOGW("ignoring edit list with %d entries", entry_count);
+            } else if (mHeaderTimescale == 0) {
+                ALOGW("ignoring edit list because timescale is 0");
+            } else {
+                off64_t entriesoffset = data_offset + 8;
+                uint64_t segment_duration;
+                int64_t media_time;
+
+                if (version == 1) {
+                    if (!mDataSource->getUInt64(entriesoffset, &segment_duration) ||
+                            !mDataSource->getUInt64(entriesoffset + 8, (uint64_t*)&media_time)) {
+                        return ERROR_IO;
+                    }
+                } else if (version == 0) {
+                    uint32_t sd;
+                    int32_t mt;
+                    if (!mDataSource->getUInt32(entriesoffset, &sd) ||
+                            !mDataSource->getUInt32(entriesoffset + 4, (uint32_t*)&mt)) {
+                        return ERROR_IO;
+                    }
+                    segment_duration = sd;
+                    media_time = mt;
+                } else {
+                    return ERROR_IO;
+                }
+
+                uint64_t halfscale = mHeaderTimescale / 2;
+                segment_duration = (segment_duration * 1000000 + halfscale)/ mHeaderTimescale;
+                media_time = (media_time * 1000000 + halfscale) / mHeaderTimescale;
+
+                int64_t duration;
+                int32_t samplerate;
+                if (mLastTrack->meta->findInt64(kKeyDuration, &duration) &&
+                        mLastTrack->meta->findInt32(kKeySampleRate, &samplerate)) {
+
+                    int64_t delay = (media_time  * samplerate + 500000) / 1000000;
+                    mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
+
+                    int64_t paddingus = duration - (segment_duration + media_time);
+                    int64_t paddingsamples = (paddingus * samplerate + 500000) / 1000000;
+                    mLastTrack->meta->setInt32(kKeyEncoderPadding, paddingsamples);
+                }
+            }
+            *offset += chunk_size;
+            break;
+        }
+
         case FOURCC('f', 'r', 'm', 'a'):
         {
             uint32_t original_fourcc;
@@ -1564,24 +1628,26 @@
 
         case FOURCC('m', 'v', 'h', 'd'):
         {
-            if (chunk_data_size < 12) {
+            if (chunk_data_size < 24) {
                 return ERROR_MALFORMED;
             }
 
-            uint8_t header[12];
+            uint8_t header[24];
             if (mDataSource->readAt(
                         data_offset, header, sizeof(header))
                     < (ssize_t)sizeof(header)) {
                 return ERROR_IO;
             }
 
-            int64_t creationTime;
+            uint64_t creationTime;
             if (header[0] == 1) {
                 creationTime = U64_AT(&header[4]);
+                mHeaderTimescale = U32_AT(&header[20]);
             } else if (header[0] != 0) {
                 return ERROR_MALFORMED;
             } else {
                 creationTime = U32_AT(&header[4]);
+                mHeaderTimescale = U32_AT(&header[12]);
             }
 
             String8 s;
@@ -1858,13 +1924,13 @@
         mtime = U64_AT(&buffer[12]);
         id = U32_AT(&buffer[20]);
         duration = U64_AT(&buffer[28]);
-    } else {
-        CHECK_EQ((unsigned)version, 0u);
-
+    } else if (version == 0) {
         ctime = U32_AT(&buffer[4]);
         mtime = U32_AT(&buffer[8]);
         id = U32_AT(&buffer[12]);
         duration = U32_AT(&buffer[20]);
+    } else {
+        return ERROR_UNSUPPORTED;
     }
 
     mLastTrack->meta->setInt32(kKeyTrackID, id);
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index d24337f..6248e90 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -509,7 +509,8 @@
 status_t MediaCodecList::getCodecCapabilities(
         size_t index, const char *type,
         Vector<ProfileLevel> *profileLevels,
-        Vector<uint32_t> *colorFormats) const {
+        Vector<uint32_t> *colorFormats,
+        uint32_t *flags) const {
     profileLevels->clear();
     colorFormats->clear();
 
@@ -547,6 +548,8 @@
         colorFormats->push(caps.mColorFormats.itemAt(i));
     }
 
+    *flags = caps.mFlags;
+
     return OK;
 }
 
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 5d8029c..b5d4e44 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -20,7 +20,8 @@
 
 const char *MEDIA_MIMETYPE_IMAGE_JPEG = "image/jpeg";
 
-const char *MEDIA_MIMETYPE_VIDEO_VPX = "video/x-vnd.on2.vp8";
+const char *MEDIA_MIMETYPE_VIDEO_VP8 = "video/x-vnd.on2.vp8";
+const char *MEDIA_MIMETYPE_VIDEO_VP9 = "video/x-vnd.on2.vp9";
 const char *MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
 const char *MEDIA_MIMETYPE_VIDEO_MPEG4 = "video/mp4v-es";
 const char *MEDIA_MIMETYPE_VIDEO_H263 = "video/3gpp";
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 1822f07..9820ef5 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -83,6 +83,10 @@
             node_id node, OMX_U32 port_index,
             const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
 
+    virtual status_t updateGraphicBufferInMeta(
+            node_id node, OMX_U32 port_index,
+            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
+
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
             sp<IGraphicBufferProducer> *bufferProducer);
@@ -113,6 +117,13 @@
             const char *parameter_name,
             OMX_INDEXTYPE *index);
 
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *data,
+            size_t size);
+
 private:
     mutable Mutex mLock;
 
@@ -280,6 +291,13 @@
             node, port_index, graphicBuffer, buffer);
 }
 
+status_t MuxOMX::updateGraphicBufferInMeta(
+        node_id node, OMX_U32 port_index,
+        const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
+    return getOMX(node)->updateGraphicBufferInMeta(
+            node, port_index, graphicBuffer, buffer);
+}
+
 status_t MuxOMX::createInputSurface(
         node_id node, OMX_U32 port_index,
         sp<IGraphicBufferProducer> *bufferProducer) {
@@ -331,6 +349,15 @@
     return getOMX(node)->getExtensionIndex(node, parameter_name, index);
 }
 
+status_t MuxOMX::setInternalOption(
+        node_id node,
+        OMX_U32 port_index,
+        InternalOptionType type,
+        const void *data,
+        size_t size) {
+    return getOMX(node)->setInternalOption(node, port_index, type, data, size);
+}
+
 OMXClient::OMXClient() {
 }
 
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 9d349a1..7b37365 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1195,8 +1195,10 @@
         compressionFormat = OMX_VIDEO_CodingMPEG4;
     } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
         compressionFormat = OMX_VIDEO_CodingH263;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VPX, mime)) {
-        compressionFormat = OMX_VIDEO_CodingVPX;
+    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VP8, mime)) {
+        compressionFormat = OMX_VIDEO_CodingVP8;
+    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VP9, mime)) {
+        compressionFormat = OMX_VIDEO_CodingVP9;
     } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG2, mime)) {
         compressionFormat = OMX_VIDEO_CodingMPEG2;
     } else {
@@ -1388,8 +1390,10 @@
             "video_decoder.mpeg4", "video_encoder.mpeg4" },
         { MEDIA_MIMETYPE_VIDEO_H263,
             "video_decoder.h263", "video_encoder.h263" },
-        { MEDIA_MIMETYPE_VIDEO_VPX,
-            "video_decoder.vpx", "video_encoder.vpx" },
+        { MEDIA_MIMETYPE_VIDEO_VP8,
+            "video_decoder.vp8", "video_encoder.vp8" },
+        { MEDIA_MIMETYPE_VIDEO_VP9,
+            "video_decoder.vp9", "video_encoder.vp9" },
         { MEDIA_MIMETYPE_AUDIO_RAW,
             "audio_decoder.raw", "audio_encoder.raw" },
         { MEDIA_MIMETYPE_AUDIO_FLAC,
@@ -4563,7 +4567,7 @@
         CodecCapabilities *caps) {
     if (strncmp(componentName, "OMX.", 4)) {
         // Not an OpenMax component but a software codec.
-
+        caps->mFlags = 0;
         caps->mComponentName = componentName;
         return OK;
     }
@@ -4578,8 +4582,15 @@
 
     OMXCodec::setComponentRole(omx, node, isEncoder, mime);
 
+    caps->mFlags = 0;
     caps->mComponentName = componentName;
 
+    if (!isEncoder && !strncmp(mime, "video/", 6) &&
+            omx->storeMetaDataInBuffers(
+                    node, 1 /* port index */, OMX_TRUE) == OK) {
+        caps->mFlags |= CodecCapabilities::kFlagSupportsAdaptivePlayback;
+    }
+
     OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
     InitOMXParams(&param);
 
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 409038a..6b934d4 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -21,7 +21,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <OMX_IVCommon.h>
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
 
 #include <ui/GraphicBuffer.h>
 #include <gui/ISurfaceComposer.h>
@@ -54,9 +54,8 @@
         ALOGE("Invalid dimensions %dx%d", bufferWidth, bufferHeight);
     }
 
-    mBufferQueue = new BufferQueue(true);
+    mBufferQueue = new BufferQueue();
     mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mBufferQueue->setSynchronousMode(true);
     mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
             GRALLOC_USAGE_HW_TEXTURE);
 
@@ -66,12 +65,10 @@
     // reference once the ctor ends, as that would cause the refcount of 'this'
     // dropping to 0 at the end of the ctor.  Since all we need is a wp<...>
     // that's what we create.
-    wp<BufferQueue::ConsumerListener> listener;
-    sp<BufferQueue::ConsumerListener> proxy;
-    listener = static_cast<BufferQueue::ConsumerListener*>(this);
-    proxy = new BufferQueue::ProxyConsumerListener(listener);
+    wp<ConsumerListener> listener = static_cast<ConsumerListener*>(this);
+    sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
 
-    status_t err = mBufferQueue->consumerConnect(proxy);
+    status_t err = mBufferQueue->consumerConnect(proxy, false);
     if (err != NO_ERROR) {
         ALOGE("SurfaceMediaSource: error connecting to BufferQueue: %s (%d)",
                 strerror(-err), err);
@@ -108,7 +105,7 @@
     Mutex::Autolock lock(mMutex);
 
     result.append(buffer);
-    mBufferQueue->dump(result);
+    mBufferQueue->dump(result, "");
 }
 
 status_t SurfaceMediaSource::setFrameRate(int32_t fps)
@@ -293,7 +290,7 @@
     // wait here till the frames come in from the client side
     while (mStarted) {
 
-        status_t err = mBufferQueue->acquireBuffer(&item);
+        status_t err = mBufferQueue->acquireBuffer(&item, 0);
         if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
             // wait for a buffer to be queued
             mFrameAvailableCondition.wait(mMutex);
@@ -305,8 +302,9 @@
 
             // First time seeing the buffer?  Added it to the SMS slot
             if (item.mGraphicBuffer != NULL) {
-                mBufferSlot[item.mBuf] = item.mGraphicBuffer;
+                mSlots[item.mBuf].mGraphicBuffer = item.mGraphicBuffer;
             }
+            mSlots[item.mBuf].mFrameNumber = item.mFrameNumber;
 
             // check for the timing of this buffer
             if (mNumFramesReceived == 0 && !mUseAbsoluteTimestamps) {
@@ -315,7 +313,8 @@
                 if (mStartTimeNs > 0) {
                     if (item.mTimestamp < mStartTimeNs) {
                         // This frame predates start of record, discard
-                        mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY,
+                        mBufferQueue->releaseBuffer(
+                                item.mBuf, item.mFrameNumber, EGL_NO_DISPLAY,
                                 EGL_NO_SYNC_KHR, Fence::NO_FENCE);
                         continue;
                     }
@@ -345,17 +344,18 @@
 
     // First time seeing the buffer?  Added it to the SMS slot
     if (item.mGraphicBuffer != NULL) {
-        mBufferSlot[mCurrentSlot] = item.mGraphicBuffer;
+        mSlots[item.mBuf].mGraphicBuffer = item.mGraphicBuffer;
     }
+    mSlots[item.mBuf].mFrameNumber = item.mFrameNumber;
 
-    mCurrentBuffers.push_back(mBufferSlot[mCurrentSlot]);
+    mCurrentBuffers.push_back(mSlots[mCurrentSlot].mGraphicBuffer);
     int64_t prevTimeStamp = mCurrentTimestamp;
     mCurrentTimestamp = item.mTimestamp;
 
     mNumFramesEncoded++;
     // Pass the data to the MediaBuffer. Pass in only the metadata
 
-    passMetadataBuffer(buffer, mBufferSlot[mCurrentSlot]->handle);
+    passMetadataBuffer(buffer, mSlots[mCurrentSlot].mGraphicBuffer->handle);
 
     (*buffer)->setObserver(this);
     (*buffer)->add_ref();
@@ -405,15 +405,16 @@
     }
 
     for (int id = 0; id < BufferQueue::NUM_BUFFER_SLOTS; id++) {
-        if (mBufferSlot[id] == NULL) {
+        if (mSlots[id].mGraphicBuffer == NULL) {
             continue;
         }
 
-        if (bufferHandle == mBufferSlot[id]->handle) {
+        if (bufferHandle == mSlots[id].mGraphicBuffer->handle) {
             ALOGV("Slot %d returned, matches handle = %p", id,
-                    mBufferSlot[id]->handle);
+                    mSlots[id].mGraphicBuffer->handle);
 
-            mBufferQueue->releaseBuffer(id, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
+            mBufferQueue->releaseBuffer(id, mSlots[id].mFrameNumber,
+                                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
                     Fence::NO_FENCE);
 
             buffer->setObserver(0);
@@ -469,7 +470,7 @@
     mFrameAvailableCondition.signal();
 
     for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
-       mBufferSlot[i] = 0;
+       mSlots[i].mGraphicBuffer = 0;
     }
 }
 
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index b0df379..4db8e80 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -26,7 +26,12 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/AudioSystem.h>
+#include <media/MediaPlayerInterface.h>
+#include <hardware/audio.h>
 #include <media/stagefright/Utils.h>
+#include <media/AudioParameter.h>
 
 namespace android {
 
@@ -471,5 +476,132 @@
     return ua;
 }
 
+status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink,
+                           const sp<MetaData>& meta)
+{
+    int32_t sampleRate = 0;
+    int32_t bitRate = 0;
+    int32_t channelMask = 0;
+    int32_t delaySamples = 0;
+    int32_t paddingSamples = 0;
+
+    AudioParameter param = AudioParameter();
+
+    if (meta->findInt32(kKeySampleRate, &sampleRate)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_SAMPLE_RATE), sampleRate);
+    }
+    if (meta->findInt32(kKeyChannelMask, &channelMask)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_NUM_CHANNEL), channelMask);
+    }
+    if (meta->findInt32(kKeyBitRate, &bitRate)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE), bitRate);
+    }
+    if (meta->findInt32(kKeyEncoderDelay, &delaySamples)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES), delaySamples);
+    }
+    if (meta->findInt32(kKeyEncoderPadding, &paddingSamples)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES), paddingSamples);
+    }
+
+    ALOGV("sendMetaDataToHal: bitRate %d, sampleRate %d, chanMask %d,"
+          "delaySample %d, paddingSample %d", bitRate, sampleRate,
+          channelMask, delaySamples, paddingSamples);
+
+    sink->setParameters(param.toString());
+    return OK;
+}
+
+struct mime_conv_t {
+    const char* mime;
+    audio_format_t format;
+};
+
+static const struct mime_conv_t mimeLookup[] = {
+    { MEDIA_MIMETYPE_AUDIO_MPEG,        AUDIO_FORMAT_MP3 },
+    { MEDIA_MIMETYPE_AUDIO_RAW,         AUDIO_FORMAT_PCM_16_BIT },
+    { MEDIA_MIMETYPE_AUDIO_AMR_NB,      AUDIO_FORMAT_AMR_NB },
+    { MEDIA_MIMETYPE_AUDIO_AMR_WB,      AUDIO_FORMAT_AMR_WB },
+    { MEDIA_MIMETYPE_AUDIO_AAC,         AUDIO_FORMAT_AAC },
+    { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
+    { 0, AUDIO_FORMAT_INVALID }
+};
+
+status_t mapMimeToAudioFormat( audio_format_t& format, const char* mime )
+{
+const struct mime_conv_t* p = &mimeLookup[0];
+    while (p->mime != NULL) {
+        if (0 == strcasecmp(mime, p->mime)) {
+            format = p->format;
+            return OK;
+        }
+        ++p;
+    }
+
+    return BAD_VALUE;
+}
+
+bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming)
+{
+    const char *mime;
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+    audio_offload_info_t info = AUDIO_INFO_INITIALIZER;
+
+    info.format = AUDIO_FORMAT_INVALID;
+    if (mapMimeToAudioFormat(info.format, mime) != OK) {
+        ALOGE(" Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format !", mime);
+        return false;
+    } else {
+        ALOGV("Mime type \"%s\" mapped to audio_format %d", mime, info.format);
+    }
+
+    if (AUDIO_FORMAT_INVALID == info.format) {
+        // can't offload if we don't know what the source format is
+        ALOGE("mime type \"%s\" not a known audio format", mime);
+        return false;
+    }
+
+    int32_t srate = -1;
+    if (!meta->findInt32(kKeySampleRate, &srate)) {
+        ALOGV("track of type '%s' does not publish sample rate", mime);
+    }
+    info.sample_rate = srate;
+
+    int32_t cmask = 0;
+    if (!meta->findInt32(kKeyChannelMask, &cmask)) {
+        ALOGV("track of type '%s' does not publish channel mask", mime);
+
+        // Try a channel count instead
+        int32_t channelCount;
+        if (!meta->findInt32(kKeyChannelCount, &channelCount)) {
+            ALOGV("track of type '%s' does not publish channel count", mime);
+        } else {
+            cmask = audio_channel_out_mask_from_count(channelCount);
+        }
+    }
+    info.channel_mask = cmask;
+
+    int64_t duration = 0;
+    if (!meta->findInt64(kKeyDuration, &duration)) {
+        ALOGV("track of type '%s' does not publish duration", mime);
+    }
+    info.duration_us = duration;
+
+    int32_t brate = -1;
+    if (!meta->findInt32(kKeyBitRate, &brate)) {
+        ALOGV("track of type '%s' does not publish bitrate", mime);
+     }
+    info.bit_rate = brate;
+
+
+    info.stream_type = AUDIO_STREAM_MUSIC;
+    info.has_video = hasVideo;
+    info.is_streaming = isStreaming;
+
+    // Check if offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming
+    return AudioSystem::isOffloadSupported(info);
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index cf50dc9..1b20cbb 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -604,6 +604,9 @@
     // To make the codec behave the same before and after a reset, we need to invalidate the
     // streaminfo struct. This does that:
     mStreamInfo->sampleRate = 0;
+
+    mSignalledError = false;
+    mOutputPortSettingsChange = NONE;
 }
 
 void SoftAAC2::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
diff --git a/media/libstagefright/codecs/aacenc/SampleCode/Android.mk b/media/libstagefright/codecs/aacenc/SampleCode/Android.mk
index 01016e7..d06dcf6 100644
--- a/media/libstagefright/codecs/aacenc/SampleCode/Android.mk
+++ b/media/libstagefright/codecs/aacenc/SampleCode/Android.mk
@@ -5,7 +5,7 @@
     AAC_E_SAMPLES.c \
     ../../common/cmnMemory.c
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE := AACEncTest
 
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index 4d4212f..3320688 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -457,6 +457,11 @@
     }
 }
 
+void SoftAMR::onReset() {
+    mSignalledError = false;
+    mOutputPortSettingsChange = NONE;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.h b/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
index 9a596e5..758d6ac 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
@@ -40,6 +40,7 @@
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
     virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
 
 private:
     enum {
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
index db34d08..c203f77 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
@@ -5,7 +5,7 @@
     AMRWB_E_SAMPLE.c \
     ../../common/cmnMemory.c
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE := AMRWBEncTest
 
 LOCAL_ARM_MODE := arm
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 020cc0a..fb2a430 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -48,42 +48,32 @@
     { OMX_VIDEO_H263ProfileISWV2,    OMX_VIDEO_H263Level45 },
 };
 
-template<class T>
-static void InitOMXParams(T *params) {
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 SoftMPEG4::SoftMPEG4(
         const char *name,
+        const char *componentRole,
+        OMX_VIDEO_CODINGTYPE codingType,
+        const CodecProfileLevel *profileLevels,
+        size_t numProfileLevels,
         const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component)
-    : SimpleSoftOMXComponent(name, callbacks, appData, component),
-      mMode(MODE_MPEG4),
+    : SoftVideoDecoderOMXComponent(
+            name, componentRole, codingType, profileLevels, numProfileLevels,
+            352 /* width */, 288 /* height */, callbacks, appData, component),
+      mMode(codingType == OMX_VIDEO_CodingH263 ? MODE_H263 : MODE_MPEG4),
       mHandle(new tagvideoDecControls),
       mInputBufferCount(0),
-      mWidth(352),
-      mHeight(288),
-      mCropLeft(0),
-      mCropTop(0),
-      mCropRight(mWidth - 1),
-      mCropBottom(mHeight - 1),
       mSignalledError(false),
       mInitialized(false),
       mFramesConfigured(false),
       mNumSamplesOutput(0),
-      mOutputPortSettingsChange(NONE) {
-    if (!strcmp(name, "OMX.google.h263.decoder")) {
-        mMode = MODE_H263;
-    } else {
-        CHECK(!strcmp(name, "OMX.google.mpeg4.decoder"));
-    }
-
-    initPorts();
+      mPvTime(0) {
+    initPorts(
+            kNumInputBuffers,
+            8192 /* inputBufferSize */,
+            kNumOutputBuffers,
+            (mMode == MODE_MPEG4)
+            ? MEDIA_MIMETYPE_VIDEO_MPEG4 : MEDIA_MIMETYPE_VIDEO_H263);
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
 
@@ -96,219 +86,11 @@
     mHandle = NULL;
 }
 
-void SoftMPEG4::initPorts() {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-
-    def.nPortIndex = 0;
-    def.eDir = OMX_DirInput;
-    def.nBufferCountMin = kNumInputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 8192;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 1;
-
-    def.format.video.cMIMEType =
-        (mMode == MODE_MPEG4)
-            ? const_cast<char *>(MEDIA_MIMETYPE_VIDEO_MPEG4)
-            : const_cast<char *>(MEDIA_MIMETYPE_VIDEO_H263);
-
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-
-    def.format.video.eCompressionFormat =
-        mMode == MODE_MPEG4 ? OMX_VIDEO_CodingMPEG4 : OMX_VIDEO_CodingH263;
-
-    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
-    def.format.video.pNativeWindow = NULL;
-
-    addPort(def);
-
-    def.nPortIndex = 1;
-    def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = kNumOutputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 2;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
-    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
-    def.format.video.pNativeWindow = NULL;
-
-    def.nBufferSize =
-        (def.format.video.nFrameWidth * def.format.video.nFrameHeight * 3) / 2;
-
-    addPort(def);
-}
-
 status_t SoftMPEG4::initDecoder() {
     memset(mHandle, 0, sizeof(tagvideoDecControls));
     return OK;
 }
 
-OMX_ERRORTYPE SoftMPEG4::internalGetParameter(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            if (formatParams->nPortIndex == 0) {
-                formatParams->eCompressionFormat =
-                    (mMode == MODE_MPEG4)
-                        ? OMX_VIDEO_CodingMPEG4 : OMX_VIDEO_CodingH263;
-
-                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
-                formatParams->xFramerate = 0;
-            } else {
-                CHECK_EQ(formatParams->nPortIndex, 1u);
-
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
-                formatParams->xFramerate = 0;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoProfileLevelQuerySupported:
-        {
-            OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
-                    (OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
-
-            if (profileLevel->nPortIndex != 0) {  // Input port only
-                ALOGE("Invalid port index: %ld", profileLevel->nPortIndex);
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            size_t index = profileLevel->nProfileIndex;
-            if (mMode == MODE_H263) {
-                size_t nProfileLevels =
-                    sizeof(kH263ProfileLevels) / sizeof(kH263ProfileLevels[0]);
-                if (index >= nProfileLevels) {
-                    return OMX_ErrorNoMore;
-                }
-
-                profileLevel->eProfile = kH263ProfileLevels[index].mProfile;
-                profileLevel->eLevel = kH263ProfileLevels[index].mLevel;
-            } else {
-                size_t nProfileLevels =
-                    sizeof(kM4VProfileLevels) / sizeof(kM4VProfileLevels[0]);
-                if (index >= nProfileLevels) {
-                    return OMX_ErrorNoMore;
-                }
-
-                profileLevel->eProfile = kM4VProfileLevels[index].mProfile;
-                profileLevel->eLevel = kM4VProfileLevels[index].mLevel;
-            }
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalGetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftMPEG4::internalSetParameter(
-        OMX_INDEXTYPE index, const OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamStandardComponentRole:
-        {
-            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
-                (const OMX_PARAM_COMPONENTROLETYPE *)params;
-
-            if (mMode == MODE_MPEG4) {
-                if (strncmp((const char *)roleParams->cRole,
-                            "video_decoder.mpeg4",
-                            OMX_MAX_STRINGNAME_SIZE - 1)) {
-                    return OMX_ErrorUndefined;
-                }
-            } else {
-                if (strncmp((const char *)roleParams->cRole,
-                            "video_decoder.h263",
-                            OMX_MAX_STRINGNAME_SIZE - 1)) {
-                    return OMX_ErrorUndefined;
-                }
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalSetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftMPEG4::getConfig(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexConfigCommonOutputCrop:
-        {
-            OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
-
-            if (rectParams->nPortIndex != 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            rectParams->nLeft = mCropLeft;
-            rectParams->nTop = mCropTop;
-            rectParams->nWidth = mCropRight - mCropLeft + 1;
-            rectParams->nHeight = mCropBottom - mCropTop + 1;
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return OMX_ErrorUnsupportedIndex;
-    }
-}
-
 void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
     if (mSignalledError || mOutputPortSettingsChange != NONE) {
         return;
@@ -415,9 +197,14 @@
 
         uint32_t useExtTimestamp = (inHeader->nOffset == 0);
 
-        // decoder deals in ms, OMX in us.
-        uint32_t timestamp =
-            useExtTimestamp ? (inHeader->nTimeStamp + 500) / 1000 : 0xFFFFFFFF;
+        // decoder deals in ms (int32_t), OMX in us (int64_t)
+        // so use fake timestamp instead
+        uint32_t timestamp = 0xFFFFFFFF;
+        if (useExtTimestamp) {
+            mPvToOmxTimeMap.add(mPvTime, inHeader->nTimeStamp);
+            timestamp = mPvTime;
+            mPvTime++;
+        }
 
         int32_t bufferSize = inHeader->nFilledLen;
         int32_t tmp = bufferSize;
@@ -441,7 +228,8 @@
         }
 
         // decoder deals in ms, OMX in us.
-        outHeader->nTimeStamp = timestamp * 1000;
+        outHeader->nTimeStamp = mPvToOmxTimeMap.valueFor(timestamp);
+        mPvToOmxTimeMap.removeItem(timestamp);
 
         inHeader->nOffset += bufferSize;
         inHeader->nFilledLen = 0;
@@ -482,11 +270,11 @@
 }
 
 bool SoftMPEG4::portSettingsChanged() {
-    int32_t disp_width, disp_height;
-    PVGetVideoDimensions(mHandle, &disp_width, &disp_height);
+    uint32_t disp_width, disp_height;
+    PVGetVideoDimensions(mHandle, (int32 *)&disp_width, (int32 *)&disp_height);
 
-    int32_t buf_width, buf_height;
-    PVGetBufferDimensions(mHandle, &buf_width, &buf_height);
+    uint32_t buf_width, buf_height;
+    PVGetBufferDimensions(mHandle, (int32 *)&buf_width, (int32 *)&buf_height);
 
     CHECK_LE(disp_width, buf_width);
     CHECK_LE(disp_height, buf_height);
@@ -494,12 +282,12 @@
     ALOGV("disp_width = %d, disp_height = %d, buf_width = %d, buf_height = %d",
             disp_width, disp_height, buf_width, buf_height);
 
-    if (mCropRight != disp_width - 1
-            || mCropBottom != disp_height - 1) {
+    if (mCropWidth != disp_width
+            || mCropHeight != disp_height) {
         mCropLeft = 0;
         mCropTop = 0;
-        mCropRight = disp_width - 1;
-        mCropBottom = disp_height - 1;
+        mCropWidth = disp_width;
+        mCropHeight = disp_height;
 
         notify(OMX_EventPortSettingsChanged,
                1,
@@ -545,45 +333,22 @@
     }
 }
 
-void SoftMPEG4::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
-    if (portIndex != 1) {
-        return;
-    }
-
-    switch (mOutputPortSettingsChange) {
-        case NONE:
-            break;
-
-        case AWAITING_DISABLED:
-        {
-            CHECK(!enabled);
-            mOutputPortSettingsChange = AWAITING_ENABLED;
-            break;
-        }
-
-        default:
-        {
-            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
-            CHECK(enabled);
-            mOutputPortSettingsChange = NONE;
-            break;
-        }
+void SoftMPEG4::onReset() {
+    SoftVideoDecoderOMXComponent::onReset();
+    mPvToOmxTimeMap.clear();
+    mSignalledError = false;
+    mFramesConfigured = false;
+    if (mInitialized) {
+        PVCleanUpVideoDecoder(mHandle);
+        mInitialized = false;
     }
 }
 
 void SoftMPEG4::updatePortDefinitions() {
-    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(0)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+    SoftVideoDecoderOMXComponent::updatePortDefinitions();
 
-    def = &editPortInfo(1)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
+    /* We have to align our width and height - this should affect stride! */
+    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef;
     def->nBufferSize =
         (((def->format.video.nFrameWidth + 15) & -16)
             * ((def->format.video.nFrameHeight + 15) & -16) * 3) / 2;
@@ -594,6 +359,19 @@
 android::SoftOMXComponent *createSoftOMXComponent(
         const char *name, const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData, OMX_COMPONENTTYPE **component) {
-    return new android::SoftMPEG4(name, callbacks, appData, component);
+    using namespace android;
+    if (!strcmp(name, "OMX.google.h263.decoder")) {
+        return new android::SoftMPEG4(
+                name, "video_decoder.h263", OMX_VIDEO_CodingH263,
+                kH263ProfileLevels, ARRAY_SIZE(kH263ProfileLevels),
+                callbacks, appData, component);
+    } else if (!strcmp(name, "OMX.google.mpeg4.decoder")) {
+        return new android::SoftMPEG4(
+                name, "video_decoder.mpeg4", OMX_VIDEO_CodingMPEG4,
+                kM4VProfileLevels, ARRAY_SIZE(kM4VProfileLevels),
+                callbacks, appData, component);
+    } else {
+        CHECK(!"Unknown component");
+    }
 }
 
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
index dff08a7..de14aaf 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
@@ -18,14 +18,18 @@
 
 #define SOFT_MPEG4_H_
 
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoDecoderOMXComponent.h"
 
 struct tagvideoDecControls;
 
 namespace android {
 
-struct SoftMPEG4 : public SimpleSoftOMXComponent {
+struct SoftMPEG4 : public SoftVideoDecoderOMXComponent {
     SoftMPEG4(const char *name,
+            const char *componentRole,
+            OMX_VIDEO_CODINGTYPE codingType,
+            const CodecProfileLevel *profileLevels,
+            size_t numProfileLevels,
             const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData,
             OMX_COMPONENTTYPE **component);
@@ -33,17 +37,9 @@
 protected:
     virtual ~SoftMPEG4();
 
-    virtual OMX_ERRORTYPE internalGetParameter(
-            OMX_INDEXTYPE index, OMX_PTR params);
-
-    virtual OMX_ERRORTYPE internalSetParameter(
-            OMX_INDEXTYPE index, const OMX_PTR params);
-
-    virtual OMX_ERRORTYPE getConfig(OMX_INDEXTYPE index, OMX_PTR params);
-
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
-    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
 
 private:
     enum {
@@ -54,32 +50,23 @@
     enum {
         MODE_MPEG4,
         MODE_H263,
-
     } mMode;
 
     tagvideoDecControls *mHandle;
 
     size_t mInputBufferCount;
 
-    int32_t mWidth, mHeight;
-    int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
-
     bool mSignalledError;
     bool mInitialized;
     bool mFramesConfigured;
 
     int32_t mNumSamplesOutput;
+    int32_t mPvTime;
+    KeyedVector<int32_t, OMX_TICKS> mPvToOmxTimeMap;
 
-    enum {
-        NONE,
-        AWAITING_DISABLED,
-        AWAITING_ENABLED
-    } mOutputPortSettingsChange;
-
-    void initPorts();
     status_t initDecoder();
 
-    void updatePortDefinitions();
+    virtual void updatePortDefinitions();
     bool portSettingsChanged();
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftMPEG4);
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 9f25536..7c382fb 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -361,6 +361,8 @@
 void SoftMP3::onReset() {
     pvmp3_InitDecoder(mConfig, mDecoderBuf);
     mIsFirst = true;
+    mSignalledError = false;
+    mOutputPortSettingsChange = NONE;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index a400b4c..476e986 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -29,26 +29,23 @@
 
 namespace android {
 
-template<class T>
-static void InitOMXParams(T *params) {
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 SoftVPX::SoftVPX(
         const char *name,
+        const char *componentRole,
+        OMX_VIDEO_CODINGTYPE codingType,
         const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component)
-    : SimpleSoftOMXComponent(name, callbacks, appData, component),
-      mCtx(NULL),
-      mWidth(320),
-      mHeight(240),
-      mOutputPortSettingsChange(NONE) {
-    initPorts();
+    : SoftVideoDecoderOMXComponent(
+            name, componentRole, codingType,
+            NULL /* profileLevels */, 0 /* numProfileLevels */,
+            320 /* width */, 240 /* height */, callbacks, appData, component),
+      mMode(codingType == OMX_VIDEO_CodingVP8 ? MODE_VP8 : MODE_VP9),
+      mCtx(NULL) {
+    initPorts(kNumBuffers, 768 * 1024 /* inputBufferSize */,
+            kNumBuffers,
+            codingType == OMX_VIDEO_CodingVP8 ? MEDIA_MIMETYPE_VIDEO_VP8 : MEDIA_MIMETYPE_VIDEO_VP9);
+
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
 
@@ -58,65 +55,6 @@
     mCtx = NULL;
 }
 
-void SoftVPX::initPorts() {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-
-    def.nPortIndex = 0;
-    def.eDir = OMX_DirInput;
-    def.nBufferCountMin = kNumBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 768 * 1024;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 1;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_VPX);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingVPX;
-    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
-    def.format.video.pNativeWindow = NULL;
-
-    addPort(def);
-
-    def.nPortIndex = 1;
-    def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = kNumBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 2;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
-    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
-    def.format.video.pNativeWindow = NULL;
-
-    def.nBufferSize =
-        (def.format.video.nFrameWidth * def.format.video.nFrameHeight * 3) / 2;
-
-    addPort(def);
-}
-
 static int GetCPUCoreCount() {
     int cpuCoreCount = 1;
 #if defined(_SC_NPROCESSORS_ONLN)
@@ -137,7 +75,9 @@
     memset(&cfg, 0, sizeof(vpx_codec_dec_cfg_t));
     cfg.threads = GetCPUCoreCount();
     if ((vpx_err = vpx_codec_dec_init(
-                (vpx_codec_ctx_t *)mCtx, &vpx_codec_vp8_dx_algo, &cfg, 0))) {
+                (vpx_codec_ctx_t *)mCtx,
+                 mMode == MODE_VP8 ? &vpx_codec_vp8_dx_algo : &vpx_codec_vp9_dx_algo,
+                 &cfg, 0))) {
         ALOGE("on2 decoder failed to initialize. (%d)", vpx_err);
         return UNKNOWN_ERROR;
     }
@@ -145,80 +85,6 @@
     return OK;
 }
 
-OMX_ERRORTYPE SoftVPX::internalGetParameter(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            if (formatParams->nPortIndex == 0) {
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingVPX;
-                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
-                formatParams->xFramerate = 0;
-            } else {
-                CHECK_EQ(formatParams->nPortIndex, 1u);
-
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
-                formatParams->xFramerate = 0;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalGetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftVPX::internalSetParameter(
-        OMX_INDEXTYPE index, const OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamStandardComponentRole:
-        {
-            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
-                (const OMX_PARAM_COMPONENTROLETYPE *)params;
-
-            if (strncmp((const char *)roleParams->cRole,
-                        "video_decoder.vpx",
-                        OMX_MAX_STRINGNAME_SIZE - 1)) {
-                return OMX_ErrorUndefined;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalSetParameter(index, params);
-    }
-}
-
 void SoftVPX::onQueueFilled(OMX_U32 portIndex) {
     if (mOutputPortSettingsChange != NONE) {
         return;
@@ -226,6 +92,7 @@
 
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
+    bool EOSseen = false;
 
     while (!inQueue.empty() && !outQueue.empty()) {
         BufferInfo *inInfo = *inQueue.begin();
@@ -235,17 +102,20 @@
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
 
         if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
+            EOSseen = true;
+            if (inHeader->nFilledLen == 0) {
+                inQueue.erase(inQueue.begin());
+                inInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inHeader);
 
-            outHeader->nFilledLen = 0;
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                outHeader->nFilledLen = 0;
+                outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
+                outQueue.erase(outQueue.begin());
+                outInfo->mOwnedByUs = false;
+                notifyFillBufferDone(outHeader);
+                return;
+            }
         }
 
         if (vpx_codec_decode(
@@ -266,8 +136,8 @@
         if (img != NULL) {
             CHECK_EQ(img->fmt, IMG_FMT_I420);
 
-            int32_t width = img->d_w;
-            int32_t height = img->d_h;
+            uint32_t width = img->d_w;
+            uint32_t height = img->d_h;
 
             if (width != mWidth || height != mHeight) {
                 mWidth = width;
@@ -282,7 +152,7 @@
 
             outHeader->nOffset = 0;
             outHeader->nFilledLen = (width * height * 3) / 2;
-            outHeader->nFlags = 0;
+            outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
             outHeader->nTimeStamp = inHeader->nTimeStamp;
 
             const uint8_t *srcLine = (const uint8_t *)img->planes[PLANE_Y];
@@ -325,58 +195,20 @@
     }
 }
 
-void SoftVPX::onPortFlushCompleted(OMX_U32 portIndex) {
-}
-
-void SoftVPX::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
-    if (portIndex != 1) {
-        return;
-    }
-
-    switch (mOutputPortSettingsChange) {
-        case NONE:
-            break;
-
-        case AWAITING_DISABLED:
-        {
-            CHECK(!enabled);
-            mOutputPortSettingsChange = AWAITING_ENABLED;
-            break;
-        }
-
-        default:
-        {
-            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
-            CHECK(enabled);
-            mOutputPortSettingsChange = NONE;
-            break;
-        }
-    }
-}
-
-void SoftVPX::updatePortDefinitions() {
-    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(0)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def = &editPortInfo(1)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def->nBufferSize =
-        (def->format.video.nFrameWidth
-            * def->format.video.nFrameHeight * 3) / 2;
-}
-
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
         const char *name, const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData, OMX_COMPONENTTYPE **component) {
-    return new android::SoftVPX(name, callbacks, appData, component);
+    if (!strcmp(name, "OMX.google.vp8.decoder")) {
+        return new android::SoftVPX(
+                name, "video_decoder.vp8", OMX_VIDEO_CodingVP8,
+                callbacks, appData, component);
+    } else if (!strcmp(name, "OMX.google.vp9.decoder")) {
+        return new android::SoftVPX(
+                name, "video_decoder.vp9", OMX_VIDEO_CodingVP9,
+                callbacks, appData, component);
+    } else {
+        CHECK(!"Unknown component");
+    }
 }
-
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index 3e814a2..cd5eb28 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -18,12 +18,14 @@
 
 #define SOFT_VPX_H_
 
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoDecoderOMXComponent.h"
 
 namespace android {
 
-struct SoftVPX : public SimpleSoftOMXComponent {
+struct SoftVPX : public SoftVideoDecoderOMXComponent {
     SoftVPX(const char *name,
+            const char *componentRole,
+            OMX_VIDEO_CODINGTYPE codingType,
             const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData,
             OMX_COMPONENTTYPE **component);
@@ -31,37 +33,22 @@
 protected:
     virtual ~SoftVPX();
 
-    virtual OMX_ERRORTYPE internalGetParameter(
-            OMX_INDEXTYPE index, OMX_PTR params);
-
-    virtual OMX_ERRORTYPE internalSetParameter(
-            OMX_INDEXTYPE index, const OMX_PTR params);
-
     virtual void onQueueFilled(OMX_U32 portIndex);
-    virtual void onPortFlushCompleted(OMX_U32 portIndex);
-    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
 
 private:
     enum {
         kNumBuffers = 4
     };
 
+    enum {
+        MODE_VP8,
+        MODE_VP9
+    } mMode;
+
     void *mCtx;
 
-    int32_t mWidth;
-    int32_t mHeight;
-
-    enum {
-        NONE,
-        AWAITING_DISABLED,
-        AWAITING_ENABLED
-    } mOutputPortSettingsChange;
-
-    void initPorts();
     status_t initDecoder();
 
-    void updatePortDefinitions();
-
     DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
 };
 
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index a92d376..4060a0a 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -12,11 +12,16 @@
         frameworks/av/media/libstagefright/include \
         frameworks/native/include/media/openmax \
 
+ifeq ($(TARGET_DEVICE), manta)
+    LOCAL_CFLAGS += -DSURFACE_IS_BGR32
+endif
+
 LOCAL_STATIC_LIBRARIES := \
         libvpx
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+        libhardware \
 
 LOCAL_MODULE := libstagefright_soft_vpxenc
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index e25637a..5f2b5c8 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -20,6 +20,8 @@
 
 #include <utils/Log.h>
 
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaDefs.h>
 
@@ -81,6 +83,52 @@
     }
 }
 
+static void ConvertRGB32ToPlanar(
+        const uint8_t *src, uint8_t *dstY, int32_t width, int32_t height) {
+    CHECK((width & 1) == 0);
+    CHECK((height & 1) == 0);
+
+    uint8_t *dstU = dstY + width * height;
+    uint8_t *dstV = dstU + (width / 2) * (height / 2);
+
+    for (int32_t y = 0; y < height; ++y) {
+        for (int32_t x = 0; x < width; ++x) {
+#ifdef SURFACE_IS_BGR32
+            unsigned blue = src[4 * x];
+            unsigned green = src[4 * x + 1];
+            unsigned red= src[4 * x + 2];
+#else
+            unsigned red= src[4 * x];
+            unsigned green = src[4 * x + 1];
+            unsigned blue = src[4 * x + 2];
+#endif
+
+            unsigned luma =
+                ((red * 66 + green * 129 + blue * 25) >> 8) + 16;
+
+            dstY[x] = luma;
+
+            if ((x & 1) == 0 && (y & 1) == 0) {
+                unsigned U =
+                    ((-red * 38 - green * 74 + blue * 112) >> 8) + 128;
+
+                unsigned V =
+                    ((red * 112 - green * 94 - blue * 18) >> 8) + 128;
+
+                dstU[x / 2] = U;
+                dstV[x / 2] = V;
+            }
+        }
+
+        if ((y & 1) == 0) {
+            dstU += width / 2;
+            dstV += width / 2;
+        }
+
+        src += 4 * width;
+        dstY += width;
+    }
+}
 
 SoftVPXEncoder::SoftVPXEncoder(const char *name,
                                const OMX_CALLBACKTYPE *callbacks,
@@ -99,8 +147,10 @@
       mErrorResilience(OMX_FALSE),
       mColorFormat(OMX_COLOR_FormatYUV420Planar),
       mLevel(OMX_VIDEO_VP8Level_Version0),
-      mConversionBuffer(NULL) {
-
+      mConversionBuffer(NULL),
+      mInputDataIsMeta(false),
+      mGrallocModule(NULL),
+      mKeyFrameRequested(false) {
     initPorts();
 }
 
@@ -165,8 +215,8 @@
     outputPort.eDir = OMX_DirOutput;
     outputPort.nBufferAlignment = kOutputBufferAlignment;
     outputPort.format.video.cMIMEType =
-        const_cast<char *>(MEDIA_MIMETYPE_VIDEO_VPX);
-    outputPort.format.video.eCompressionFormat = OMX_VIDEO_CodingVPX;
+        const_cast<char *>(MEDIA_MIMETYPE_VIDEO_VP8);
+    outputPort.format.video.eCompressionFormat = OMX_VIDEO_CodingVP8;
     outputPort.format.video.eColorFormat = OMX_COLOR_FormatUnused;
     outputPort.format.video.pNativeWindow = NULL;
     outputPort.nBufferSize = 256 * 1024;  // arbitrary
@@ -247,7 +297,7 @@
         return UNKNOWN_ERROR;
     }
 
-    if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+    if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || mInputDataIsMeta) {
         if (mConversionBuffer == NULL) {
             mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
             if (mConversionBuffer == NULL) {
@@ -315,7 +365,7 @@
                 formatParams->xFramerate = (1000000/mFrameDurationUs) << 16;
                 return OMX_ErrorNone;
             } else if (formatParams->nPortIndex == kOutputPortIndex) {
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingVPX;
+                formatParams->eCompressionFormat = OMX_VIDEO_CodingVP8;
                 formatParams->eColorFormat = OMX_COLOR_FormatUnused;
                 formatParams->xFramerate = 0;
                 return OMX_ErrorNone;
@@ -427,9 +477,17 @@
                 (const OMX_VIDEO_PARAM_BITRATETYPE *)param);
 
         case OMX_IndexParamPortDefinition:
-            return internalSetPortParams(
+        {
+            OMX_ERRORTYPE err = internalSetPortParams(
                 (const OMX_PARAM_PORTDEFINITIONTYPE *)param);
 
+            if (err != OMX_ErrorNone) {
+                return err;
+            }
+
+            return SimpleSoftOMXComponent::internalSetParameter(index, param);
+        }
+
         case OMX_IndexParamVideoPortFormat:
             return internalSetFormatParams(
                 (const OMX_VIDEO_PARAM_PORTFORMATTYPE *)param);
@@ -442,11 +500,47 @@
             return internalSetProfileLevel(
                 (const OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param);
 
+        case OMX_IndexVendorStartUnused:
+        {
+            // storeMetaDataInBuffers
+            const StoreMetaDataInBuffersParams *storeParam =
+                (const StoreMetaDataInBuffersParams *)param;
+
+            if (storeParam->nPortIndex != kInputPortIndex) {
+                return OMX_ErrorBadPortIndex;
+            }
+
+            mInputDataIsMeta = (storeParam->bStoreMetaData == OMX_TRUE);
+
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalSetParameter(index, param);
     }
 }
 
+OMX_ERRORTYPE SoftVPXEncoder::setConfig(
+        OMX_INDEXTYPE index, const OMX_PTR _params) {
+    switch (index) {
+        case OMX_IndexConfigVideoIntraVOPRefresh:
+        {
+            OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
+                (OMX_CONFIG_INTRAREFRESHVOPTYPE *)_params;
+
+            if (params->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorBadPortIndex;
+            }
+
+            mKeyFrameRequested = params->IntraRefreshVOP;
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::setConfig(index, _params);
+    }
+}
+
 OMX_ERRORTYPE SoftVPXEncoder::internalSetProfileLevel(
         const OMX_VIDEO_PARAM_PROFILELEVELTYPE* profileAndLevel) {
     if (profileAndLevel->nPortIndex != kOutputPortIndex) {
@@ -507,13 +601,17 @@
             format->eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
             format->eColorFormat == OMX_COLOR_FormatAndroidOpaque) {
             mColorFormat = format->eColorFormat;
+
+            OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+            def->format.video.eColorFormat = mColorFormat;
+
             return OMX_ErrorNone;
         } else {
             ALOGE("Unsupported color format %i", format->eColorFormat);
             return OMX_ErrorUnsupportedSetting;
         }
     } else if (format->nPortIndex == kOutputPortIndex) {
-        if (format->eCompressionFormat == OMX_VIDEO_CodingVPX) {
+        if (format->eCompressionFormat == OMX_VIDEO_CodingVP8) {
             return OMX_ErrorNone;
         } else {
             return OMX_ErrorUnsupportedSetting;
@@ -529,7 +627,7 @@
     const char* roleText = (const char*)role->cRole;
     const size_t roleTextMaxSize = OMX_MAX_STRINGNAME_SIZE - 1;
 
-    if (strncmp(roleText, "video_encoder.vpx", roleTextMaxSize)) {
+    if (strncmp(roleText, "video_encoder.vp8", roleTextMaxSize)) {
         ALOGE("Unsupported component role");
         return OMX_ErrorBadParameter;
     }
@@ -552,11 +650,17 @@
         if (port->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar ||
             port->format.video.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
             port->format.video.eColorFormat == OMX_COLOR_FormatAndroidOpaque) {
-                mColorFormat = port->format.video.eColorFormat;
+            mColorFormat = port->format.video.eColorFormat;
         } else {
             return OMX_ErrorUnsupportedSetting;
         }
 
+        OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+        def->format.video.nFrameWidth = mWidth;
+        def->format.video.nFrameHeight = mHeight;
+        def->format.video.xFramerate = port->format.video.xFramerate;
+        def->format.video.eColorFormat = mColorFormat;
+
         return OMX_ErrorNone;
     } else if (port->nPortIndex == kOutputPortIndex) {
         mBitrate = port->format.video.nBitrate;
@@ -625,24 +729,63 @@
             return;
         }
 
-        uint8_t* source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
+        uint8_t *source =
+            inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
 
-        // NOTE: As much as nothing is known about color format
-        // when it is denoted as AndroidOpaque, it is at least
-        // assumed to be planar.
-        if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
-            ConvertSemiPlanarToPlanar(source, mConversionBuffer, mWidth, mHeight);
+        if (mInputDataIsMeta) {
+            CHECK_GE(inputBufferHeader->nFilledLen,
+                     4 + sizeof(buffer_handle_t));
+
+            uint32_t bufferType = *(uint32_t *)source;
+            CHECK_EQ(bufferType, kMetadataBufferTypeGrallocSource);
+
+            if (mGrallocModule == NULL) {
+                CHECK_EQ(0, hw_get_module(
+                            GRALLOC_HARDWARE_MODULE_ID, &mGrallocModule));
+            }
+
+            const gralloc_module_t *grmodule =
+                (const gralloc_module_t *)mGrallocModule;
+
+            buffer_handle_t handle = *(buffer_handle_t *)(source + 4);
+
+            void *bits;
+            CHECK_EQ(0,
+                     grmodule->lock(
+                         grmodule, handle,
+                         GRALLOC_USAGE_SW_READ_OFTEN
+                            | GRALLOC_USAGE_SW_WRITE_NEVER,
+                         0, 0, mWidth, mHeight, &bits));
+
+            ConvertRGB32ToPlanar(
+                    (const uint8_t *)bits, mConversionBuffer, mWidth, mHeight);
+
+            source = mConversionBuffer;
+
+            CHECK_EQ(0, grmodule->unlock(grmodule, handle));
+        } else if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+            ConvertSemiPlanarToPlanar(
+                    source, mConversionBuffer, mWidth, mHeight);
+
             source = mConversionBuffer;
         }
         vpx_image_t raw_frame;
         vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
                      kInputBufferAlignment, source);
-        codec_return = vpx_codec_encode(mCodecContext,
-                                        &raw_frame,
-                                        inputBufferHeader->nTimeStamp,  // in timebase units
-                                        mFrameDurationUs,  // frame duration in timebase units
-                                        0,  // frame flags
-                                        VPX_DL_REALTIME);  // encoding deadline
+
+        vpx_enc_frame_flags_t flags = 0;
+        if (mKeyFrameRequested) {
+            flags |= VPX_EFLAG_FORCE_KF;
+            mKeyFrameRequested = false;
+        }
+
+        codec_return = vpx_codec_encode(
+                mCodecContext,
+                &raw_frame,
+                inputBufferHeader->nTimeStamp,  // in timebase units
+                mFrameDurationUs,  // frame duration in timebase units
+                flags,  // frame flags
+                VPX_DL_REALTIME);  // encoding deadline
         if (codec_return != VPX_CODEC_OK) {
             ALOGE("vpx encoder failed to encode frame");
             notify(OMX_EventError,
@@ -676,6 +819,17 @@
         notifyEmptyBufferDone(inputBufferHeader);
     }
 }
+
+OMX_ERRORTYPE SoftVPXEncoder::getExtensionIndex(
+        const char *name, OMX_INDEXTYPE *index) {
+    if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers")) {
+        *index = OMX_IndexVendorStartUnused;
+        return OMX_ErrorNone;
+    }
+
+    return SimpleSoftOMXComponent::getExtensionIndex(name, index);
+}
+
 }  // namespace android
 
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 3bc05c0..4ee5e51 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -23,6 +23,8 @@
 #include <OMX_VideoExt.h>
 #include <OMX_IndexExt.h>
 
+#include <hardware/gralloc.h>
+
 #include "vpx/vpx_encoder.h"
 #include "vpx/vpx_codec.h"
 #include "vpx/vp8cx.h"
@@ -57,14 +59,13 @@
 //    - OMX timestamps are in microseconds, therefore
 // encoder timebase is fixed to 1/1000000
 
-class SoftVPXEncoder : public SimpleSoftOMXComponent {
- public:
+struct SoftVPXEncoder : public SimpleSoftOMXComponent {
     SoftVPXEncoder(const char *name,
                    const OMX_CALLBACKTYPE *callbacks,
                    OMX_PTR appData,
                    OMX_COMPONENTTYPE **component);
 
- protected:
+protected:
     virtual ~SoftVPXEncoder();
 
     // Returns current values for requested OMX
@@ -77,13 +78,19 @@
     virtual OMX_ERRORTYPE internalSetParameter(
             OMX_INDEXTYPE index, const OMX_PTR param);
 
+    virtual OMX_ERRORTYPE setConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
     // OMX callback when buffers available
     // Note that both an input and output buffer
     // is expected to be available to carry out
     // encoding of the frame
     virtual void onQueueFilled(OMX_U32 portIndex);
 
- private:
+    virtual OMX_ERRORTYPE getExtensionIndex(
+            const char *name, OMX_INDEXTYPE *index);
+
+private:
     // number of buffers allocated per port
     static const uint32_t kNumBuffers = 4;
 
@@ -156,6 +163,11 @@
     // indeed YUV420SemiPlanar.
     uint8_t* mConversionBuffer;
 
+    bool mInputDataIsMeta;
+    const hw_module_t *mGrallocModule;
+
+    bool mKeyFrameRequested;
+
     // Initializes input and output OMX ports with sensible
     // default values.
     void initPorts();
@@ -175,7 +187,7 @@
         const OMX_VIDEO_PARAM_PORTFORMATTYPE* format);
 
     // Verifies the component role tried to be set to this OMX component is
-    // strictly video_encoder.vpx
+    // strictly video_encoder.vp8
     OMX_ERRORTYPE internalSetRoleParams(
         const OMX_PARAM_COMPONENTROLETYPE* role);
 
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index 2539f98..655b2ab 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -119,7 +119,7 @@
 
 LOCAL_SHARED_LIBRARIES := libstagefright_soft_h264dec
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE := decoder
 
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
index 6e36651..7ddb13c 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
@@ -47,38 +47,28 @@
     { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel51 },
 };
 
-template<class T>
-static void InitOMXParams(T *params) {
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 SoftAVC::SoftAVC(
         const char *name,
         const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component)
-    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+    : SoftVideoDecoderOMXComponent(
+            name, "video_decoder.avc", OMX_VIDEO_CodingAVC,
+            kProfileLevels, ARRAY_SIZE(kProfileLevels),
+            320 /* width */, 240 /* height */, callbacks, appData, component),
       mHandle(NULL),
       mInputBufferCount(0),
-      mWidth(320),
-      mHeight(240),
       mPictureSize(mWidth * mHeight * 3 / 2),
-      mCropLeft(0),
-      mCropTop(0),
-      mCropWidth(mWidth),
-      mCropHeight(mHeight),
       mFirstPicture(NULL),
       mFirstPictureId(-1),
       mPicId(0),
       mHeadersDecoded(false),
       mEOSStatus(INPUT_DATA_AVAILABLE),
-      mOutputPortSettingsChange(NONE),
       mSignalledError(false) {
-    initPorts();
+    initPorts(
+            kNumInputBuffers, 8192 /* inputBufferSize */,
+            kNumOutputBuffers, MEDIA_MIMETYPE_VIDEO_AVC);
+
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
 
@@ -100,65 +90,6 @@
     delete[] mFirstPicture;
 }
 
-void SoftAVC::initPorts() {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-
-    def.nPortIndex = kInputPortIndex;
-    def.eDir = OMX_DirInput;
-    def.nBufferCountMin = kNumInputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 8192;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 1;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_AVC);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingAVC;
-    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
-    def.format.video.pNativeWindow = NULL;
-
-    addPort(def);
-
-    def.nPortIndex = kOutputPortIndex;
-    def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = kNumOutputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 2;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
-    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
-    def.format.video.pNativeWindow = NULL;
-
-    def.nBufferSize =
-        (def.format.video.nFrameWidth * def.format.video.nFrameHeight * 3) / 2;
-
-    addPort(def);
-}
-
 status_t SoftAVC::initDecoder() {
     // Force decoder to output buffers in display order.
     if (H264SwDecInit(&mHandle, 0) == H264SWDEC_OK) {
@@ -167,126 +98,6 @@
     return UNKNOWN_ERROR;
 }
 
-OMX_ERRORTYPE SoftAVC::internalGetParameter(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > kOutputPortIndex) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            if (formatParams->nPortIndex == kInputPortIndex) {
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingAVC;
-                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
-                formatParams->xFramerate = 0;
-            } else {
-                CHECK(formatParams->nPortIndex == kOutputPortIndex);
-
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
-                formatParams->xFramerate = 0;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoProfileLevelQuerySupported:
-        {
-            OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
-                    (OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
-
-            if (profileLevel->nPortIndex != kInputPortIndex) {
-                ALOGE("Invalid port index: %ld", profileLevel->nPortIndex);
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            size_t index = profileLevel->nProfileIndex;
-            size_t nProfileLevels =
-                    sizeof(kProfileLevels) / sizeof(kProfileLevels[0]);
-            if (index >= nProfileLevels) {
-                return OMX_ErrorNoMore;
-            }
-
-            profileLevel->eProfile = kProfileLevels[index].mProfile;
-            profileLevel->eLevel = kProfileLevels[index].mLevel;
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalGetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftAVC::internalSetParameter(
-        OMX_INDEXTYPE index, const OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamStandardComponentRole:
-        {
-            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
-                (const OMX_PARAM_COMPONENTROLETYPE *)params;
-
-            if (strncmp((const char *)roleParams->cRole,
-                        "video_decoder.avc",
-                        OMX_MAX_STRINGNAME_SIZE - 1)) {
-                return OMX_ErrorUndefined;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > kOutputPortIndex) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalSetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftAVC::getConfig(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexConfigCommonOutputCrop:
-        {
-            OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
-
-            if (rectParams->nPortIndex != 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            rectParams->nLeft = mCropLeft;
-            rectParams->nTop = mCropTop;
-            rectParams->nWidth = mCropWidth;
-            rectParams->nHeight = mCropHeight;
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return OMX_ErrorUnsupportedIndex;
-    }
-}
-
 void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
     if (mSignalledError || mOutputPortSettingsChange != NONE) {
         return;
@@ -298,13 +109,21 @@
 
     List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
+
+    if (mHeadersDecoded) {
+        // Dequeue any already decoded output frames to free up space
+        // in the output queue.
+
+        drainAllOutputBuffers(false /* eos */);
+    }
+
     H264SwDecRet ret = H264SWDEC_PIC_RDY;
     bool portSettingsChanged = false;
     while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty())
             && outQueue.size() == kNumOutputBuffers) {
 
         if (mEOSStatus == INPUT_EOS_SEEN) {
-            drainAllOutputBuffers();
+            drainAllOutputBuffers(true /* eos */);
             return;
         }
 
@@ -392,15 +211,7 @@
             mFirstPictureId = -1;
         }
 
-        while (!outQueue.empty() &&
-                mHeadersDecoded &&
-                H264SwDecNextPicture(mHandle, &decodedPicture, 0)
-                    == H264SWDEC_PIC_RDY) {
-
-            int32_t picId = decodedPicture.picId;
-            uint8_t *data = (uint8_t *) decodedPicture.pOutputPicture;
-            drainOneOutputBuffer(picId, data);
-        }
+        drainAllOutputBuffers(false /* eos */);
     }
 }
 
@@ -409,8 +220,6 @@
         mWidth  = info->picWidth;
         mHeight = info->picHeight;
         mPictureSize = mWidth * mHeight * 3 / 2;
-        mCropWidth = mWidth;
-        mCropHeight = mHeight;
         updatePortDefinitions();
         notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
         mOutputPortSettingsChange = AWAITING_DISABLED;
@@ -463,43 +272,38 @@
     notifyFillBufferDone(outHeader);
 }
 
-bool SoftAVC::drainAllOutputBuffers() {
+void SoftAVC::drainAllOutputBuffers(bool eos) {
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
     H264SwDecPicture decodedPicture;
 
+    if (mHeadersDecoded) {
+        while (!outQueue.empty()
+                && H264SWDEC_PIC_RDY == H264SwDecNextPicture(
+                    mHandle, &decodedPicture, eos /* flush */)) {
+            int32_t picId = decodedPicture.picId;
+            uint8_t *data = (uint8_t *) decodedPicture.pOutputPicture;
+            drainOneOutputBuffer(picId, data);
+        }
+    }
+
+    if (!eos) {
+        return;
+    }
+
     while (!outQueue.empty()) {
         BufferInfo *outInfo = *outQueue.begin();
         outQueue.erase(outQueue.begin());
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
-        if (mHeadersDecoded &&
-            H264SWDEC_PIC_RDY ==
-                H264SwDecNextPicture(mHandle, &decodedPicture, 1 /* flush */)) {
 
-            int32_t picId = decodedPicture.picId;
-            CHECK(mPicToHeaderMap.indexOfKey(picId) >= 0);
-
-            memcpy(outHeader->pBuffer + outHeader->nOffset,
-                decodedPicture.pOutputPicture,
-                mPictureSize);
-
-            OMX_BUFFERHEADERTYPE *header = mPicToHeaderMap.valueFor(picId);
-            outHeader->nTimeStamp = header->nTimeStamp;
-            outHeader->nFlags = header->nFlags;
-            outHeader->nFilledLen = mPictureSize;
-            mPicToHeaderMap.removeItem(picId);
-            delete header;
-        } else {
-            outHeader->nTimeStamp = 0;
-            outHeader->nFilledLen = 0;
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-            mEOSStatus = OUTPUT_FRAMES_FLUSHED;
-        }
+        outHeader->nTimeStamp = 0;
+        outHeader->nFilledLen = 0;
+        outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
         outInfo->mOwnedByUs = false;
         notifyFillBufferDone(outHeader);
-    }
 
-    return true;
+        mEOSStatus = OUTPUT_FRAMES_FLUSHED;
+    }
 }
 
 void SoftAVC::onPortFlushCompleted(OMX_U32 portIndex) {
@@ -508,44 +312,9 @@
     }
 }
 
-void SoftAVC::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
-    switch (mOutputPortSettingsChange) {
-        case NONE:
-            break;
-
-        case AWAITING_DISABLED:
-        {
-            CHECK(!enabled);
-            mOutputPortSettingsChange = AWAITING_ENABLED;
-            break;
-        }
-
-        default:
-        {
-            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
-            CHECK(enabled);
-            mOutputPortSettingsChange = NONE;
-            break;
-        }
-    }
-}
-
-void SoftAVC::updatePortDefinitions() {
-    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(0)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def = &editPortInfo(1)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def->nBufferSize =
-        (def->format.video.nFrameWidth
-            * def->format.video.nFrameHeight * 3) / 2;
+void SoftAVC::onReset() {
+    SoftVideoDecoderOMXComponent::onReset();
+    mSignalledError = false;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
index 879b014..ee69926 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
@@ -18,7 +18,7 @@
 
 #define SOFT_AVC_H_
 
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoDecoderOMXComponent.h"
 #include <utils/KeyedVector.h>
 
 #include "H264SwDecApi.h"
@@ -26,7 +26,7 @@
 
 namespace android {
 
-struct SoftAVC : public SimpleSoftOMXComponent {
+struct SoftAVC : public SoftVideoDecoderOMXComponent {
     SoftAVC(const char *name,
             const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData,
@@ -35,22 +35,12 @@
 protected:
     virtual ~SoftAVC();
 
-    virtual OMX_ERRORTYPE internalGetParameter(
-            OMX_INDEXTYPE index, OMX_PTR params);
-
-    virtual OMX_ERRORTYPE internalSetParameter(
-            OMX_INDEXTYPE index, const OMX_PTR params);
-
-    virtual OMX_ERRORTYPE getConfig(OMX_INDEXTYPE index, OMX_PTR params);
-
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
-    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
 
 private:
     enum {
-        kInputPortIndex   = 0,
-        kOutputPortIndex  = 1,
         kNumInputBuffers  = 8,
         kNumOutputBuffers = 2,
     };
@@ -65,9 +55,7 @@
 
     size_t mInputBufferCount;
 
-    uint32_t mWidth, mHeight, mPictureSize;
-    uint32_t mCropLeft, mCropTop;
-    uint32_t mCropWidth, mCropHeight;
+    uint32_t mPictureSize;
 
     uint8_t *mFirstPicture;
     int32_t mFirstPictureId;
@@ -81,19 +69,10 @@
 
     EOSStatus mEOSStatus;
 
-    enum OutputPortSettingChange {
-        NONE,
-        AWAITING_DISABLED,
-        AWAITING_ENABLED
-    };
-    OutputPortSettingChange mOutputPortSettingsChange;
-
     bool mSignalledError;
 
-    void initPorts();
     status_t initDecoder();
-    void updatePortDefinitions();
-    bool drainAllOutputBuffers();
+    void drainAllOutputBuffers(bool eos);
     void drainOneOutputBuffer(int32_t picId, uint8_t *data);
     void saveFirstOutputBuffer(int32_t pidId, uint8_t *data);
     bool handleCropRectEvent(const CropParams* crop);
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 4115324..51bb958 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -424,6 +424,8 @@
         delete mVi;
         mVi = NULL;
     }
+
+    mOutputPortSettingsChange = NONE;
 }
 
 void SoftVorbis::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
diff --git a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
index 40c5a3c..f7a00d8 100644
--- a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
+++ b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AHierarchicalStateMachine"
+#include <utils/Log.h>
+
 #include <media/stagefright/foundation/AHierarchicalStateMachine.h>
 
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/foundation/ALooper.cpp b/media/libstagefright/foundation/ALooper.cpp
index 22777a2..ebf9d8d 100644
--- a/media/libstagefright/foundation/ALooper.cpp
+++ b/media/libstagefright/foundation/ALooper.cpp
@@ -72,6 +72,10 @@
 
 ALooper::~ALooper() {
     stop();
+
+    // Since this looper is "dead" (or as good as dead by now),
+    // have ALooperRoster unregister any handlers still registered for it.
+    gLooperRoster.unregisterStaleHandlers();
 }
 
 void ALooper::setName(const char *name) {
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index ad10d2b..0c181ff 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -71,6 +71,20 @@
     mHandlers.removeItemsAt(index);
 }
 
+void ALooperRoster::unregisterStaleHandlers() {
+    Mutex::Autolock autoLock(mLock);
+
+    for (size_t i = mHandlers.size(); i-- > 0;) {
+        const HandlerInfo &info = mHandlers.valueAt(i);
+
+        sp<ALooper> looper = info.mLooper.promote();
+        if (looper == NULL) {
+            ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
+            mHandlers.removeItemsAt(i);
+        }
+    }
+}
+
 status_t ALooperRoster::postMessage(
         const sp<AMessage> &msg, int64_t delayUs) {
     Mutex::Autolock autoLock(mLock);
diff --git a/media/libstagefright/wifi-display/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
similarity index 85%
rename from media/libstagefright/wifi-display/ANetworkSession.cpp
rename to media/libstagefright/foundation/ANetworkSession.cpp
index 938d601..e629588 100644
--- a/media/libstagefright/wifi-display/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -34,10 +34,21 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/Utils.h>
 
 namespace android {
 
+static uint16_t U16_AT(const uint8_t *ptr) {
+    return ptr[0] << 8 | ptr[1];
+}
+
+static uint32_t U32_AT(const uint8_t *ptr) {
+    return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
+}
+
+static uint64_t U64_AT(const uint8_t *ptr) {
+    return ((uint64_t)U32_AT(ptr)) << 32 | U32_AT(ptr + 4);
+}
+
 static const size_t kMaxUDPSize = 1500;
 static const int32_t kMaxUDPRetries = 200;
 
@@ -56,6 +67,12 @@
 };
 
 struct ANetworkSession::Session : public RefBase {
+    enum Mode {
+        MODE_RTSP,
+        MODE_DATAGRAM,
+        MODE_WEBSOCKET,
+    };
+
     enum State {
         CONNECTING,
         CONNECTED,
@@ -85,7 +102,9 @@
     status_t sendRequest(
             const void *data, ssize_t size, bool timeValid, int64_t timeUs);
 
-    void setIsRTSPConnection(bool yesno);
+    void setMode(Mode mode);
+
+    status_t switchToWebSocketMode();
 
 protected:
     virtual ~Session();
@@ -102,7 +121,7 @@
 
     int32_t mSessionID;
     State mState;
-    bool mIsRTSPConnection;
+    Mode mMode;
     int mSocket;
     sp<AMessage> mNotify;
     bool mSawReceiveFailure, mSawSendFailure;
@@ -145,7 +164,7 @@
         const sp<AMessage> &notify)
     : mSessionID(sessionID),
       mState(state),
-      mIsRTSPConnection(false),
+      mMode(MODE_DATAGRAM),
       mSocket(s),
       mNotify(notify),
       mSawReceiveFailure(false),
@@ -209,8 +228,18 @@
     return mSocket;
 }
 
-void ANetworkSession::Session::setIsRTSPConnection(bool yesno) {
-    mIsRTSPConnection = yesno;
+void ANetworkSession::Session::setMode(Mode mode) {
+    mMode = mode;
+}
+
+status_t ANetworkSession::Session::switchToWebSocketMode() {
+    if (mState != CONNECTED || mMode != MODE_RTSP) {
+        return INVALID_OPERATION;
+    }
+
+    mMode = MODE_WEBSOCKET;
+
+    return OK;
 }
 
 sp<AMessage> ANetworkSession::Session::getNotificationMessage() const {
@@ -238,6 +267,8 @@
 
 status_t ANetworkSession::Session::readMore() {
     if (mState == DATAGRAM) {
+        CHECK_EQ(mMode, MODE_DATAGRAM);
+
         status_t err;
         do {
             sp<ABuffer> buf = new ABuffer(kMaxUDPSize);
@@ -326,7 +357,7 @@
         err = -ECONNRESET;
     }
 
-    if (!mIsRTSPConnection) {
+    if (mMode == MODE_DATAGRAM) {
         // TCP stream carrying 16-bit length-prefixed datagrams.
 
         while (mInBuffer.size() >= 2) {
@@ -350,7 +381,7 @@
 
             mInBuffer.erase(0, packetSize + 2);
         }
-    } else {
+    } else if (mMode == MODE_RTSP) {
         for (;;) {
             size_t length;
 
@@ -417,6 +448,69 @@
                 break;
             }
         }
+    } else {
+        CHECK_EQ(mMode, MODE_WEBSOCKET);
+
+        const uint8_t *data = (const uint8_t *)mInBuffer.c_str();
+        // hexdump(data, mInBuffer.size());
+
+        while (mInBuffer.size() >= 2) {
+            size_t offset = 2;
+
+            unsigned payloadLen = data[1] & 0x7f;
+            if (payloadLen == 126) {
+                if (offset + 2 > mInBuffer.size()) {
+                    break;
+                }
+
+                payloadLen = U16_AT(&data[offset]);
+                offset += 2;
+            } else if (payloadLen == 127) {
+                if (offset + 8 > mInBuffer.size()) {
+                    break;
+                }
+
+                payloadLen = U64_AT(&data[offset]);
+                offset += 8;
+            }
+
+            uint32_t mask = 0;
+            if (data[1] & 0x80) {
+                // MASK==1
+                if (offset + 4 > mInBuffer.size()) {
+                    break;
+                }
+
+                mask = U32_AT(&data[offset]);
+                offset += 4;
+            }
+
+            if (offset + payloadLen > mInBuffer.size()) {
+                break;
+            }
+
+            // We have the full message.
+
+            sp<ABuffer> packet = new ABuffer(payloadLen);
+            memcpy(packet->data(), &data[offset], payloadLen);
+
+            if (mask != 0) {
+                for (size_t i = 0; i < payloadLen; ++i) {
+                    packet->data()[i] =
+                        data[offset + i]
+                            ^ ((mask >> (8 * (3 - (i % 4)))) & 0xff);
+                }
+            }
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("sessionID", mSessionID);
+            notify->setInt32("reason", kWhatWebSocketMessage);
+            notify->setBuffer("data", packet);
+            notify->setInt32("headerByte", data[0]);
+            notify->post();
+
+            mInBuffer.erase(0, offset + payloadLen);
+        }
     }
 
     if (err != OK) {
@@ -608,13 +702,61 @@
 
     sp<ABuffer> buffer;
 
-    if (mState == CONNECTED && !mIsRTSPConnection) {
+    if (mState == CONNECTED && mMode == MODE_DATAGRAM) {
         CHECK_LE(size, 65535);
 
         buffer = new ABuffer(size + 2);
         buffer->data()[0] = size >> 8;
         buffer->data()[1] = size & 0xff;
         memcpy(buffer->data() + 2, data, size);
+    } else if (mState == CONNECTED && mMode == MODE_WEBSOCKET) {
+        static const bool kUseMask = false;  // Chromium doesn't like it.
+
+        size_t numHeaderBytes = 2 + (kUseMask ? 4 : 0);
+        if (size > 65535) {
+            numHeaderBytes += 8;
+        } else if (size > 125) {
+            numHeaderBytes += 2;
+        }
+
+        buffer = new ABuffer(numHeaderBytes + size);
+        buffer->data()[0] = 0x81;  // FIN==1 | opcode=1 (text)
+        buffer->data()[1] = kUseMask ? 0x80 : 0x00;
+
+        if (size > 65535) {
+            buffer->data()[1] |= 127;
+            buffer->data()[2] = 0x00;
+            buffer->data()[3] = 0x00;
+            buffer->data()[4] = 0x00;
+            buffer->data()[5] = 0x00;
+            buffer->data()[6] = (size >> 24) & 0xff;
+            buffer->data()[7] = (size >> 16) & 0xff;
+            buffer->data()[8] = (size >> 8) & 0xff;
+            buffer->data()[9] = size & 0xff;
+        } else if (size > 125) {
+            buffer->data()[1] |= 126;
+            buffer->data()[2] = (size >> 8) & 0xff;
+            buffer->data()[3] = size & 0xff;
+        } else {
+            buffer->data()[1] |= size;
+        }
+
+        if (kUseMask) {
+            uint32_t mask = rand();
+
+            buffer->data()[numHeaderBytes - 4] = (mask >> 24) & 0xff;
+            buffer->data()[numHeaderBytes - 3] = (mask >> 16) & 0xff;
+            buffer->data()[numHeaderBytes - 2] = (mask >> 8) & 0xff;
+            buffer->data()[numHeaderBytes - 1] = mask & 0xff;
+
+            for (size_t i = 0; i < (size_t)size; ++i) {
+                buffer->data()[numHeaderBytes + i] =
+                    ((const uint8_t *)data)[i]
+                        ^ ((mask >> (8 * (3 - (i % 4)))) & 0xff);
+            }
+        } else {
+            memcpy(buffer->data() + numHeaderBytes, data, size);
+        }
     } else {
         buffer = new ABuffer(size);
         memcpy(buffer->data(), data, size);
@@ -1001,9 +1143,9 @@
             notify);
 
     if (mode == kModeCreateTCPDatagramSessionActive) {
-        session->setIsRTSPConnection(false);
+        session->setMode(Session::MODE_DATAGRAM);
     } else if (mode == kModeCreateRTSPClient) {
-        session->setIsRTSPConnection(true);
+        session->setMode(Session::MODE_RTSP);
     }
 
     mSessions.add(session->sessionID(), session);
@@ -1080,6 +1222,19 @@
     return err;
 }
 
+status_t ANetworkSession::switchToWebSocketMode(int32_t sessionID) {
+    Mutex::Autolock autoLock(mLock);
+
+    ssize_t index = mSessions.indexOfKey(sessionID);
+
+    if (index < 0) {
+        return -ENOENT;
+    }
+
+    const sp<Session> session = mSessions.valueAt(index);
+    return session->switchToWebSocketMode();
+}
+
 void ANetworkSession::interrupt() {
     static const char dummy = 0;
 
@@ -1213,8 +1368,10 @@
                                         clientSocket,
                                         session->getNotificationMessage());
 
-                            clientSession->setIsRTSPConnection(
-                                    session->isRTSPServer());
+                            clientSession->setMode(
+                                    session->isRTSPServer()
+                                        ? Session::MODE_RTSP
+                                        : Session::MODE_DATAGRAM);
 
                             sessionsToAdd.push_back(clientSession);
                         }
diff --git a/media/libstagefright/foundation/Android.mk b/media/libstagefright/foundation/Android.mk
index d65e213..ad2dab5 100644
--- a/media/libstagefright/foundation/Android.mk
+++ b/media/libstagefright/foundation/Android.mk
@@ -10,7 +10,9 @@
     ALooper.cpp                   \
     ALooperRoster.cpp             \
     AMessage.cpp                  \
+    ANetworkSession.cpp           \
     AString.cpp                   \
+    ParsedMessage.cpp             \
     base64.cpp                    \
     hexdump.cpp
 
diff --git a/media/libstagefright/wifi-display/ParsedMessage.cpp b/media/libstagefright/foundation/ParsedMessage.cpp
similarity index 89%
rename from media/libstagefright/wifi-display/ParsedMessage.cpp
rename to media/libstagefright/foundation/ParsedMessage.cpp
index c0e60c3..049c9ad 100644
--- a/media/libstagefright/wifi-display/ParsedMessage.cpp
+++ b/media/libstagefright/foundation/ParsedMessage.cpp
@@ -19,6 +19,7 @@
 #include <ctype.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
 
 namespace android {
 
@@ -89,6 +90,7 @@
     ssize_t lastDictIndex = -1;
 
     size_t offset = 0;
+    bool headersComplete = false;
     while (offset < size) {
         size_t lineEndOffset = offset;
         while (lineEndOffset + 1 < size
@@ -113,6 +115,8 @@
         }
 
         if (lineEndOffset == offset) {
+            // An empty line separates headers from body.
+            headersComplete = true;
             offset += 2;
             break;
         }
@@ -146,12 +150,17 @@
         offset = lineEndOffset + 2;
     }
 
+    if (!headersComplete && (!noMoreData || offset == 0)) {
+        // We either saw the empty line separating headers from body
+        // or we saw at least the status line and know that no more data
+        // is going to follow.
+        return -1;
+    }
+
     for (size_t i = 0; i < mDict.size(); ++i) {
         mDict.editValueAt(i).trim();
     }
 
-    // Found the end of headers.
-
     int32_t contentLength;
     if (!findInt32("content-length", &contentLength) || contentLength < 0) {
         contentLength = 0;
@@ -168,13 +177,17 @@
     return totalLength;
 }
 
-void ParsedMessage::getRequestField(size_t index, AString *field) const {
+bool ParsedMessage::getRequestField(size_t index, AString *field) const {
     AString line;
     CHECK(findString("_", &line));
 
     size_t prevOffset = 0;
     size_t offset = 0;
     for (size_t i = 0; i <= index; ++i) {
+        if (offset >= line.size()) {
+            return false;
+        }
+
         ssize_t spacePos = line.find(" ", offset);
 
         if (spacePos < 0) {
@@ -186,11 +199,16 @@
     }
 
     field->setTo(line, prevOffset, offset - prevOffset - 1);
+
+    return true;
 }
 
 bool ParsedMessage::getStatusCode(int32_t *statusCode) const {
     AString statusCodeString;
-    getRequestField(1, &statusCodeString);
+    if (!getRequestField(1, &statusCodeString)) {
+        *statusCode = 0;
+        return false;
+    }
 
     char *end;
     *statusCode = strtol(statusCodeString.c_str(), &end, 10);
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index a3fa7a3..85bd492 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -6,16 +6,25 @@
         LiveDataSource.cpp      \
         LiveSession.cpp         \
         M3UParser.cpp           \
+        PlaylistFetcher.cpp     \
 
 LOCAL_C_INCLUDES:= \
 	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax \
 	$(TOP)/external/openssl/include
 
+LOCAL_SHARED_LIBRARIES := \
+        libcrypto \
+        libcutils \
+        libmedia \
+        libstagefright \
+        libstagefright_foundation \
+        libutils \
+
 LOCAL_MODULE:= libstagefright_httplive
 
 ifeq ($(TARGET_ARCH),arm)
     LOCAL_CFLAGS += -Wno-psabi
 endif
 
-include $(BUILD_STATIC_LIBRARY)
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 505bdb3..e91c60b 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -18,12 +18,13 @@
 #define LOG_TAG "LiveSession"
 #include <utils/Log.h>
 
-#include "include/LiveSession.h"
+#include "LiveSession.h"
 
-#include "LiveDataSource.h"
+#include "M3UParser.h"
+#include "PlaylistFetcher.h"
 
-#include "include/M3UParser.h"
 #include "include/HTTPBase.h"
+#include "mpeg2ts/AnotherPacketSource.h"
 
 #include <cutils/properties.h>
 #include <media/stagefright/foundation/hexdump.h>
@@ -33,6 +34,8 @@
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 
 #include <ctype.h>
 #include <openssl/aes.h>
@@ -47,37 +50,107 @@
       mUIDValid(uidValid),
       mUID(uid),
       mInPreparationPhase(true),
-      mDataSource(new LiveDataSource),
       mHTTPDataSource(
               HTTPBase::Create(
                   (mFlags & kFlagIncognito)
                     ? HTTPBase::kFlagIncognito
                     : 0)),
       mPrevBandwidthIndex(-1),
-      mLastPlaylistFetchTimeUs(-1),
-      mSeqNumber(-1),
-      mSeekTimeUs(-1),
-      mNumRetries(0),
-      mStartOfPlayback(true),
-      mDurationUs(-1),
-      mDurationFixed(false),
-      mSeekDone(false),
-      mDisconnectPending(false),
-      mMonitorQueueGeneration(0),
-      mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY) {
+      mStreamMask(0),
+      mCheckBandwidthGeneration(0),
+      mLastDequeuedTimeUs(0ll),
+      mReconfigurationInProgress(false),
+      mDisconnectReplyID(0) {
     if (mUIDValid) {
         mHTTPDataSource->setUID(mUID);
     }
+
+    mPacketSources.add(
+            STREAMTYPE_AUDIO, new AnotherPacketSource(NULL /* meta */));
+
+    mPacketSources.add(
+            STREAMTYPE_VIDEO, new AnotherPacketSource(NULL /* meta */));
+
+    mPacketSources.add(
+            STREAMTYPE_SUBTITLES, new AnotherPacketSource(NULL /* meta */));
 }
 
 LiveSession::~LiveSession() {
 }
 
-sp<DataSource> LiveSession::getDataSource() {
-    return mDataSource;
+status_t LiveSession::dequeueAccessUnit(
+        StreamType stream, sp<ABuffer> *accessUnit) {
+    if (!(mStreamMask & stream)) {
+        return UNKNOWN_ERROR;
+    }
+
+    sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
+
+    status_t finalResult;
+    if (!packetSource->hasBufferAvailable(&finalResult)) {
+        return finalResult == OK ? -EAGAIN : finalResult;
+    }
+
+    status_t err = packetSource->dequeueAccessUnit(accessUnit);
+
+    const char *streamStr;
+    switch (stream) {
+        case STREAMTYPE_AUDIO:
+            streamStr = "audio";
+            break;
+        case STREAMTYPE_VIDEO:
+            streamStr = "video";
+            break;
+        case STREAMTYPE_SUBTITLES:
+            streamStr = "subs";
+            break;
+        default:
+            TRESPASS();
+    }
+
+    if (err == INFO_DISCONTINUITY) {
+        int32_t type;
+        CHECK((*accessUnit)->meta()->findInt32("discontinuity", &type));
+
+        sp<AMessage> extra;
+        if (!(*accessUnit)->meta()->findMessage("extra", &extra)) {
+            extra.clear();
+        }
+
+        ALOGI("[%s] read discontinuity of type %d, extra = %s",
+              streamStr,
+              type,
+              extra == NULL ? "NULL" : extra->debugString().c_str());
+    } else if (err == OK) {
+        int64_t timeUs;
+        CHECK((*accessUnit)->meta()->findInt64("timeUs",  &timeUs));
+        ALOGV("[%s] read buffer at time %lld us", streamStr, timeUs);
+
+        mLastDequeuedTimeUs = timeUs;
+    } else {
+        ALOGI("[%s] encountered error %d", streamStr, err);
+    }
+
+    return err;
 }
 
-void LiveSession::connect(
+status_t LiveSession::getStreamFormat(StreamType stream, sp<AMessage> *format) {
+    if (!(mStreamMask & stream)) {
+        return UNKNOWN_ERROR;
+    }
+
+    sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
+
+    sp<MetaData> meta = packetSource->getFormat();
+
+    if (meta == NULL) {
+        return -EAGAIN;
+    }
+
+    return convertMetaDataToMessage(meta, format);
+}
+
+void LiveSession::connectAsync(
         const char *url, const KeyedVector<String8, String8> *headers) {
     sp<AMessage> msg = new AMessage(kWhatConnect, id());
     msg->setString("url", url);
@@ -91,55 +164,184 @@
     msg->post();
 }
 
-void LiveSession::disconnect() {
-    Mutex::Autolock autoLock(mLock);
-    mDisconnectPending = true;
+status_t LiveSession::disconnect() {
+    sp<AMessage> msg = new AMessage(kWhatDisconnect, id());
 
-    mHTTPDataSource->disconnect();
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
 
-    (new AMessage(kWhatDisconnect, id()))->post();
+    return err;
 }
 
-void LiveSession::seekTo(int64_t timeUs) {
-    Mutex::Autolock autoLock(mLock);
-    mSeekDone = false;
-
+status_t LiveSession::seekTo(int64_t timeUs) {
     sp<AMessage> msg = new AMessage(kWhatSeek, id());
     msg->setInt64("timeUs", timeUs);
-    msg->post();
 
-    while (!mSeekDone) {
-        mCondition.wait(mLock);
-    }
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+
+    return err;
 }
 
 void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatConnect:
+        {
             onConnect(msg);
             break;
+        }
 
         case kWhatDisconnect:
-            onDisconnect();
-            break;
-
-        case kWhatMonitorQueue:
         {
-            int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
+            CHECK(msg->senderAwaitsResponse(&mDisconnectReplyID));
 
-            if (generation != mMonitorQueueGeneration) {
-                // Stale event
+            if (mReconfigurationInProgress) {
                 break;
             }
 
-            onMonitorQueue();
+            finishDisconnect();
             break;
         }
 
         case kWhatSeek:
-            onSeek(msg);
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            status_t err = onSeek(msg);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+
+            response->postReply(replyID);
             break;
+        }
+
+        case kWhatFetcherNotify:
+        {
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            switch (what) {
+                case PlaylistFetcher::kWhatStarted:
+                    break;
+                case PlaylistFetcher::kWhatPaused:
+                case PlaylistFetcher::kWhatStopped:
+                {
+                    if (what == PlaylistFetcher::kWhatStopped) {
+                        AString uri;
+                        CHECK(msg->findString("uri", &uri));
+                        mFetcherInfos.removeItem(uri);
+                    }
+
+                    if (mContinuation != NULL) {
+                        CHECK_GT(mContinuationCounter, 0);
+                        if (--mContinuationCounter == 0) {
+                            mContinuation->post();
+                        }
+                    }
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatDurationUpdate:
+                {
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+
+                    int64_t durationUs;
+                    CHECK(msg->findInt64("durationUs", &durationUs));
+
+                    FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
+                    info->mDurationUs = durationUs;
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatError:
+                {
+                    status_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    ALOGE("XXX Received error %d from PlaylistFetcher.", err);
+
+                    if (mInPreparationPhase) {
+                        postPrepared(err);
+                    }
+
+                    mPacketSources.valueFor(STREAMTYPE_AUDIO)->signalEOS(err);
+
+                    mPacketSources.valueFor(STREAMTYPE_VIDEO)->signalEOS(err);
+
+                    mPacketSources.valueFor(
+                            STREAMTYPE_SUBTITLES)->signalEOS(err);
+
+                    sp<AMessage> notify = mNotify->dup();
+                    notify->setInt32("what", kWhatError);
+                    notify->setInt32("err", err);
+                    notify->post();
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatTemporarilyDoneFetching:
+                {
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+
+                    FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
+                    info->mIsPrepared = true;
+
+                    if (mInPreparationPhase) {
+                        bool allFetchersPrepared = true;
+                        for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+                            if (!mFetcherInfos.valueAt(i).mIsPrepared) {
+                                allFetchersPrepared = false;
+                                break;
+                            }
+                        }
+
+                        if (allFetchersPrepared) {
+                            postPrepared(OK);
+                        }
+                    }
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+
+            break;
+        }
+
+        case kWhatCheckBandwidth:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mCheckBandwidthGeneration) {
+                break;
+            }
+
+            onCheckBandwidth();
+            break;
+        }
+
+        case kWhatChangeConfiguration2:
+        {
+            onChangeConfiguration2(msg);
+            break;
+        }
+
+        case kWhatChangeConfiguration3:
+        {
+            onChangeConfiguration3(msg);
+            break;
+        }
+
+        case kWhatFinishDisconnect2:
+        {
+            onFinishDisconnect2();
+            break;
+        }
 
         default:
             TRESPASS();
@@ -172,48 +374,127 @@
         headers = NULL;
     }
 
+#if 1
     ALOGI("onConnect <URL suppressed>");
+#else
+    ALOGI("onConnect %s", url.c_str());
+#endif
 
     mMasterURL = url;
 
     bool dummy;
-    sp<M3UParser> playlist = fetchPlaylist(url.c_str(), &dummy);
+    mPlaylist = fetchPlaylist(url.c_str(), NULL /* curPlaylistHash */, &dummy);
 
-    if (playlist == NULL) {
+    if (mPlaylist == NULL) {
         ALOGE("unable to fetch master playlist '%s'.", url.c_str());
 
-        signalEOS(ERROR_IO);
+        postPrepared(ERROR_IO);
         return;
     }
 
-    if (playlist->isVariantPlaylist()) {
-        for (size_t i = 0; i < playlist->size(); ++i) {
+    // We trust the content provider to make a reasonable choice of preferred
+    // initial bandwidth by listing it first in the variant playlist.
+    // At startup we really don't have a good estimate on the available
+    // network bandwidth since we haven't tranferred any data yet. Once
+    // we have we can make a better informed choice.
+    size_t initialBandwidth = 0;
+    size_t initialBandwidthIndex = 0;
+
+    if (mPlaylist->isVariantPlaylist()) {
+        for (size_t i = 0; i < mPlaylist->size(); ++i) {
             BandwidthItem item;
 
+            item.mPlaylistIndex = i;
+
             sp<AMessage> meta;
-            playlist->itemAt(i, &item.mURI, &meta);
+            AString uri;
+            mPlaylist->itemAt(i, &uri, &meta);
 
             unsigned long bandwidth;
             CHECK(meta->findInt32("bandwidth", (int32_t *)&item.mBandwidth));
 
+            if (initialBandwidth == 0) {
+                initialBandwidth = item.mBandwidth;
+            }
+
             mBandwidthItems.push(item);
         }
 
         CHECK_GT(mBandwidthItems.size(), 0u);
 
         mBandwidthItems.sort(SortByBandwidth);
+
+        for (size_t i = 0; i < mBandwidthItems.size(); ++i) {
+            if (mBandwidthItems.itemAt(i).mBandwidth == initialBandwidth) {
+                initialBandwidthIndex = i;
+                break;
+            }
+        }
+    } else {
+        // dummy item.
+        BandwidthItem item;
+        item.mPlaylistIndex = 0;
+        item.mBandwidth = 0;
+        mBandwidthItems.push(item);
     }
 
-    postMonitorQueue();
+    changeConfiguration(0ll /* timeUs */, initialBandwidthIndex);
 }
 
-void LiveSession::onDisconnect() {
-    ALOGI("onDisconnect");
+void LiveSession::finishDisconnect() {
+    // No reconfiguration is currently pending, make sure none will trigger
+    // during disconnection either.
+    cancelCheckBandwidthEvent();
 
-    signalEOS(ERROR_END_OF_STREAM);
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        mFetcherInfos.valueAt(i).mFetcher->stopAsync();
+    }
 
-    Mutex::Autolock autoLock(mLock);
-    mDisconnectPending = false;
+    sp<AMessage> msg = new AMessage(kWhatFinishDisconnect2, id());
+
+    mContinuationCounter = mFetcherInfos.size();
+    mContinuation = msg;
+
+    if (mContinuationCounter == 0) {
+        msg->post();
+    }
+}
+
+void LiveSession::onFinishDisconnect2() {
+    mContinuation.clear();
+
+    mPacketSources.valueFor(STREAMTYPE_AUDIO)->signalEOS(ERROR_END_OF_STREAM);
+    mPacketSources.valueFor(STREAMTYPE_VIDEO)->signalEOS(ERROR_END_OF_STREAM);
+
+    mPacketSources.valueFor(
+            STREAMTYPE_SUBTITLES)->signalEOS(ERROR_END_OF_STREAM);
+
+    sp<AMessage> response = new AMessage;
+    response->setInt32("err", OK);
+
+    response->postReply(mDisconnectReplyID);
+    mDisconnectReplyID = 0;
+}
+
+sp<PlaylistFetcher> LiveSession::addFetcher(const char *uri) {
+    ssize_t index = mFetcherInfos.indexOfKey(uri);
+
+    if (index >= 0) {
+        return NULL;
+    }
+
+    sp<AMessage> notify = new AMessage(kWhatFetcherNotify, id());
+    notify->setString("uri", uri);
+
+    FetcherInfo info;
+    info.mFetcher = new PlaylistFetcher(notify, this, uri);
+    info.mDurationUs = -1ll;
+    info.mIsPrepared = false;
+    looper()->registerHandler(info.mFetcher);
+
+    mFetcherInfos.add(uri, info);
+
+    return info.mFetcher;
 }
 
 status_t LiveSession::fetchFile(
@@ -229,14 +510,6 @@
             && strncasecmp(url, "https://", 8)) {
         return ERROR_UNSUPPORTED;
     } else {
-        {
-            Mutex::Autolock autoLock(mLock);
-
-            if (mDisconnectPending) {
-                return ERROR_IO;
-            }
-        }
-
         KeyedVector<String8, String8> headers = mExtraHeaders;
         if (range_offset > 0 || range_length >= 0) {
             headers.add(
@@ -315,7 +588,8 @@
     return OK;
 }
 
-sp<M3UParser> LiveSession::fetchPlaylist(const char *url, bool *unchanged) {
+sp<M3UParser> LiveSession::fetchPlaylist(
+        const char *url, uint8_t *curPlaylistHash, bool *unchanged) {
     ALOGV("fetchPlaylist '%s'", url);
 
     *unchanged = false;
@@ -339,13 +613,8 @@
 
     MD5_Final(hash, &m);
 
-    if (mPlaylist != NULL && !memcmp(hash, mPlaylistHash, 16)) {
+    if (curPlaylistHash != NULL && !memcmp(hash, curPlaylistHash, 16)) {
         // playlist unchanged
-
-        if (mRefreshState != THIRD_UNCHANGED_RELOAD_ATTEMPT) {
-            mRefreshState = (RefreshState)(mRefreshState + 1);
-        }
-
         *unchanged = true;
 
         ALOGV("Playlist unchanged, refresh state is now %d",
@@ -354,9 +623,9 @@
         return NULL;
     }
 
-    memcpy(mPlaylistHash, hash, sizeof(hash));
-
-    mRefreshState = INITIAL_MINIMUM_RELOAD_DELAY;
+    if (curPlaylistHash != NULL) {
+        memcpy(curPlaylistHash, hash, sizeof(hash));
+    }
 #endif
 
     sp<M3UParser> playlist =
@@ -371,37 +640,6 @@
     return playlist;
 }
 
-int64_t LiveSession::getSegmentStartTimeUs(int32_t seqNumber) const {
-    CHECK(mPlaylist != NULL);
-
-    int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                "media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-
-    int32_t lastSeqNumberInPlaylist =
-        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
-
-    CHECK_GE(seqNumber, firstSeqNumberInPlaylist);
-    CHECK_LE(seqNumber, lastSeqNumberInPlaylist);
-
-    int64_t segmentStartUs = 0ll;
-    for (int32_t index = 0;
-            index < seqNumber - firstSeqNumberInPlaylist; ++index) {
-        sp<AMessage> itemMeta;
-        CHECK(mPlaylist->itemAt(
-                    index, NULL /* uri */, &itemMeta));
-
-        int64_t itemDurationUs;
-        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-        segmentStartUs += itemDurationUs;
-    }
-
-    return segmentStartUs;
-}
-
 static double uniformRand() {
     return (double)rand() / RAND_MAX;
 }
@@ -412,36 +650,50 @@
     }
 
 #if 1
-    int32_t bandwidthBps;
-    if (mHTTPDataSource != NULL
-            && mHTTPDataSource->estimateBandwidth(&bandwidthBps)) {
-        ALOGV("bandwidth estimated at %.2f kbps", bandwidthBps / 1024.0f);
-    } else {
-        ALOGV("no bandwidth estimate.");
-        return 0;  // Pick the lowest bandwidth stream by default.
-    }
-
     char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.httplive.max-bw", value, NULL)) {
+    ssize_t index = -1;
+    if (property_get("media.httplive.bw-index", value, NULL)) {
         char *end;
-        long maxBw = strtoul(value, &end, 10);
-        if (end > value && *end == '\0') {
-            if (maxBw > 0 && bandwidthBps > maxBw) {
-                ALOGV("bandwidth capped to %ld bps", maxBw);
-                bandwidthBps = maxBw;
-            }
+        index = strtol(value, &end, 10);
+        CHECK(end > value && *end == '\0');
+
+        if (index >= 0 && (size_t)index >= mBandwidthItems.size()) {
+            index = mBandwidthItems.size() - 1;
         }
     }
 
-    // Consider only 80% of the available bandwidth usable.
-    bandwidthBps = (bandwidthBps * 8) / 10;
+    if (index < 0) {
+        int32_t bandwidthBps;
+        if (mHTTPDataSource != NULL
+                && mHTTPDataSource->estimateBandwidth(&bandwidthBps)) {
+            ALOGV("bandwidth estimated at %.2f kbps", bandwidthBps / 1024.0f);
+        } else {
+            ALOGV("no bandwidth estimate.");
+            return 0;  // Pick the lowest bandwidth stream by default.
+        }
 
-    // Pick the highest bandwidth stream below or equal to estimated bandwidth.
+        char value[PROPERTY_VALUE_MAX];
+        if (property_get("media.httplive.max-bw", value, NULL)) {
+            char *end;
+            long maxBw = strtoul(value, &end, 10);
+            if (end > value && *end == '\0') {
+                if (maxBw > 0 && bandwidthBps > maxBw) {
+                    ALOGV("bandwidth capped to %ld bps", maxBw);
+                    bandwidthBps = maxBw;
+                }
+            }
+        }
 
-    size_t index = mBandwidthItems.size() - 1;
-    while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth
-                            > (size_t)bandwidthBps) {
-        --index;
+        // Consider only 80% of the available bandwidth usable.
+        bandwidthBps = (bandwidthBps * 8) / 10;
+
+        // Pick the highest bandwidth stream below or equal to estimated bandwidth.
+
+        index = mBandwidthItems.size() - 1;
+        while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth
+                                > (size_t)bandwidthBps) {
+            --index;
+        }
     }
 #elif 0
     // Change bandwidth at random()
@@ -452,6 +704,8 @@
     // to lowest)
     const size_t kMinIndex = 0;
 
+    static ssize_t mPrevBandwidthIndex = -1;
+
     size_t index;
     if (mPrevBandwidthIndex < 0) {
         index = kMinIndex;
@@ -463,6 +717,7 @@
             index = kMinIndex;
         }
     }
+    mPrevBandwidthIndex = index;
 #elif 0
     // Pick the highest bandwidth stream below or equal to 1.2 Mbit/sec
 
@@ -470,559 +725,51 @@
     while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth > 1200000) {
         --index;
     }
+#elif 1
+    char value[PROPERTY_VALUE_MAX];
+    size_t index;
+    if (property_get("media.httplive.bw-index", value, NULL)) {
+        char *end;
+        index = strtoul(value, &end, 10);
+        CHECK(end > value && *end == '\0');
+
+        if (index >= mBandwidthItems.size()) {
+            index = mBandwidthItems.size() - 1;
+        }
+    } else {
+        index = 0;
+    }
 #else
     size_t index = mBandwidthItems.size() - 1;  // Highest bandwidth stream
 #endif
 
+    CHECK_GE(index, 0);
+
     return index;
 }
 
-bool LiveSession::timeToRefreshPlaylist(int64_t nowUs) const {
-    if (mPlaylist == NULL) {
-        CHECK_EQ((int)mRefreshState, (int)INITIAL_MINIMUM_RELOAD_DELAY);
-        return true;
+status_t LiveSession::onSeek(const sp<AMessage> &msg) {
+    int64_t timeUs;
+    CHECK(msg->findInt64("timeUs", &timeUs));
+
+    if (!mReconfigurationInProgress) {
+        changeConfiguration(timeUs, getBandwidthIndex());
     }
 
-    int32_t targetDurationSecs;
-    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
-
-    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
-
-    int64_t minPlaylistAgeUs;
-
-    switch (mRefreshState) {
-        case INITIAL_MINIMUM_RELOAD_DELAY:
-        {
-            size_t n = mPlaylist->size();
-            if (n > 0) {
-                sp<AMessage> itemMeta;
-                CHECK(mPlaylist->itemAt(n - 1, NULL /* uri */, &itemMeta));
-
-                int64_t itemDurationUs;
-                CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-                minPlaylistAgeUs = itemDurationUs;
-                break;
-            }
-
-            // fall through
-        }
-
-        case FIRST_UNCHANGED_RELOAD_ATTEMPT:
-        {
-            minPlaylistAgeUs = targetDurationUs / 2;
-            break;
-        }
-
-        case SECOND_UNCHANGED_RELOAD_ATTEMPT:
-        {
-            minPlaylistAgeUs = (targetDurationUs * 3) / 2;
-            break;
-        }
-
-        case THIRD_UNCHANGED_RELOAD_ATTEMPT:
-        {
-            minPlaylistAgeUs = targetDurationUs * 3;
-            break;
-        }
-
-        default:
-            TRESPASS();
-            break;
-    }
-
-    return mLastPlaylistFetchTimeUs + minPlaylistAgeUs <= nowUs;
-}
-
-void LiveSession::onDownloadNext() {
-    size_t bandwidthIndex = getBandwidthIndex();
-
-rinse_repeat:
-    int64_t nowUs = ALooper::GetNowUs();
-
-    if (mLastPlaylistFetchTimeUs < 0
-            || (ssize_t)bandwidthIndex != mPrevBandwidthIndex
-            || (!mPlaylist->isComplete() && timeToRefreshPlaylist(nowUs))) {
-        AString url;
-        if (mBandwidthItems.size() > 0) {
-            url = mBandwidthItems.editItemAt(bandwidthIndex).mURI;
-        } else {
-            url = mMasterURL;
-        }
-
-        if ((ssize_t)bandwidthIndex != mPrevBandwidthIndex) {
-            // If we switch bandwidths, do not pay any heed to whether
-            // playlists changed since the last time...
-            mPlaylist.clear();
-        }
-
-        bool unchanged;
-        sp<M3UParser> playlist = fetchPlaylist(url.c_str(), &unchanged);
-        if (playlist == NULL) {
-            if (unchanged) {
-                // We succeeded in fetching the playlist, but it was
-                // unchanged from the last time we tried.
-            } else {
-                ALOGE("failed to load playlist at url '%s'", url.c_str());
-                signalEOS(ERROR_IO);
-
-                return;
-            }
-        } else {
-            mPlaylist = playlist;
-        }
-
-        if (!mDurationFixed) {
-            Mutex::Autolock autoLock(mLock);
-
-            if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
-                mDurationUs = -1;
-                mDurationFixed = true;
-            } else {
-                mDurationUs = 0;
-                for (size_t i = 0; i < mPlaylist->size(); ++i) {
-                    sp<AMessage> itemMeta;
-                    CHECK(mPlaylist->itemAt(
-                                i, NULL /* uri */, &itemMeta));
-
-                    int64_t itemDurationUs;
-                    CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-                    mDurationUs += itemDurationUs;
-                }
-
-                mDurationFixed = mPlaylist->isComplete();
-            }
-        }
-
-        mLastPlaylistFetchTimeUs = ALooper::GetNowUs();
-    }
-
-    int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                "media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-
-    bool seekDiscontinuity = false;
-    bool explicitDiscontinuity = false;
-    bool bandwidthChanged = false;
-
-    if (mSeekTimeUs >= 0) {
-        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-            size_t index = 0;
-            int64_t segmentStartUs = 0;
-            while (index < mPlaylist->size()) {
-                sp<AMessage> itemMeta;
-                CHECK(mPlaylist->itemAt(
-                            index, NULL /* uri */, &itemMeta));
-
-                int64_t itemDurationUs;
-                CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-                if (mSeekTimeUs < segmentStartUs + itemDurationUs) {
-                    break;
-                }
-
-                segmentStartUs += itemDurationUs;
-                ++index;
-            }
-
-            if (index < mPlaylist->size()) {
-                int32_t newSeqNumber = firstSeqNumberInPlaylist + index;
-
-                ALOGI("seeking to seq no %d", newSeqNumber);
-
-                mSeqNumber = newSeqNumber;
-
-                mDataSource->reset();
-
-                // reseting the data source will have had the
-                // side effect of discarding any previously queued
-                // bandwidth change discontinuity.
-                // Therefore we'll need to treat these seek
-                // discontinuities as involving a bandwidth change
-                // even if they aren't directly.
-                seekDiscontinuity = true;
-                bandwidthChanged = true;
-            }
-        }
-
-        mSeekTimeUs = -1;
-
-        Mutex::Autolock autoLock(mLock);
-        mSeekDone = true;
-        mCondition.broadcast();
-    }
-
-    const int32_t lastSeqNumberInPlaylist =
-        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
-
-    if (mSeqNumber < 0) {
-        if (mPlaylist->isComplete()) {
-            mSeqNumber = firstSeqNumberInPlaylist;
-        } else {
-            // If this is a live session, start 3 segments from the end.
-            mSeqNumber = lastSeqNumberInPlaylist - 3;
-            if (mSeqNumber < firstSeqNumberInPlaylist) {
-                mSeqNumber = firstSeqNumberInPlaylist;
-            }
-        }
-    }
-
-    if (mSeqNumber < firstSeqNumberInPlaylist
-            || mSeqNumber > lastSeqNumberInPlaylist) {
-        if (mPrevBandwidthIndex != (ssize_t)bandwidthIndex) {
-            // Go back to the previous bandwidth.
-
-            ALOGI("new bandwidth does not have the sequence number "
-                 "we're looking for, switching back to previous bandwidth");
-
-            mLastPlaylistFetchTimeUs = -1;
-            bandwidthIndex = mPrevBandwidthIndex;
-            goto rinse_repeat;
-        }
-
-        if (!mPlaylist->isComplete() && mNumRetries < kMaxNumRetries) {
-            ++mNumRetries;
-
-            if (mSeqNumber > lastSeqNumberInPlaylist) {
-                mLastPlaylistFetchTimeUs = -1;
-                postMonitorQueue(3000000ll);
-                return;
-            }
-
-            // we've missed the boat, let's start from the lowest sequence
-            // number available and signal a discontinuity.
-
-            ALOGI("We've missed the boat, restarting playback.");
-            mSeqNumber = lastSeqNumberInPlaylist;
-            explicitDiscontinuity = true;
-
-            // fall through
-        } else {
-            ALOGE("Cannot find sequence number %d in playlist "
-                 "(contains %d - %d)",
-                 mSeqNumber, firstSeqNumberInPlaylist,
-                 firstSeqNumberInPlaylist + mPlaylist->size() - 1);
-
-            signalEOS(ERROR_END_OF_STREAM);
-            return;
-        }
-    }
-
-    mNumRetries = 0;
-
-    AString uri;
-    sp<AMessage> itemMeta;
-    CHECK(mPlaylist->itemAt(
-                mSeqNumber - firstSeqNumberInPlaylist,
-                &uri,
-                &itemMeta));
-
-    int32_t val;
-    if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
-        explicitDiscontinuity = true;
-    }
-
-    int64_t range_offset, range_length;
-    if (!itemMeta->findInt64("range-offset", &range_offset)
-            || !itemMeta->findInt64("range-length", &range_length)) {
-        range_offset = 0;
-        range_length = -1;
-    }
-
-    ALOGV("fetching segment %d from (%d .. %d)",
-          mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
-
-    sp<ABuffer> buffer;
-    status_t err = fetchFile(uri.c_str(), &buffer, range_offset, range_length);
-    if (err != OK) {
-        ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
-        signalEOS(err);
-        return;
-    }
-
-    CHECK(buffer != NULL);
-
-    err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, buffer);
-
-    if (err != OK) {
-        ALOGE("decryptBuffer failed w/ error %d", err);
-
-        signalEOS(err);
-        return;
-    }
-
-    if (buffer->size() == 0 || buffer->data()[0] != 0x47) {
-        // Not a transport stream???
-
-        ALOGE("This doesn't look like a transport stream...");
-
-        mBandwidthItems.removeAt(bandwidthIndex);
-
-        if (mBandwidthItems.isEmpty()) {
-            signalEOS(ERROR_UNSUPPORTED);
-            return;
-        }
-
-        ALOGI("Retrying with a different bandwidth stream.");
-
-        mLastPlaylistFetchTimeUs = -1;
-        bandwidthIndex = getBandwidthIndex();
-        mPrevBandwidthIndex = bandwidthIndex;
-        mSeqNumber = -1;
-
-        goto rinse_repeat;
-    }
-
-    if ((size_t)mPrevBandwidthIndex != bandwidthIndex) {
-        bandwidthChanged = true;
-    }
-
-    if (mPrevBandwidthIndex < 0) {
-        // Don't signal a bandwidth change at the very beginning of
-        // playback.
-        bandwidthChanged = false;
-    }
-
-    if (mStartOfPlayback) {
-        seekDiscontinuity = true;
-        mStartOfPlayback = false;
-    }
-
-    if (seekDiscontinuity || explicitDiscontinuity || bandwidthChanged) {
-        // Signal discontinuity.
-
-        ALOGI("queueing discontinuity (seek=%d, explicit=%d, bandwidthChanged=%d)",
-             seekDiscontinuity, explicitDiscontinuity, bandwidthChanged);
-
-        sp<ABuffer> tmp = new ABuffer(188);
-        memset(tmp->data(), 0, tmp->size());
-
-        // signal a 'hard' discontinuity for explicit or bandwidthChanged.
-        uint8_t type = (explicitDiscontinuity || bandwidthChanged) ? 1 : 0;
-
-        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-            // If this was a live event this made no sense since
-            // we don't have access to all the segment before the current
-            // one.
-            int64_t segmentStartTimeUs = getSegmentStartTimeUs(mSeqNumber);
-            memcpy(tmp->data() + 2, &segmentStartTimeUs, sizeof(segmentStartTimeUs));
-
-            type |= 2;
-        }
-
-        tmp->data()[1] = type;
-
-        mDataSource->queueBuffer(tmp);
-    }
-
-    mDataSource->queueBuffer(buffer);
-
-    mPrevBandwidthIndex = bandwidthIndex;
-    ++mSeqNumber;
-
-    postMonitorQueue();
-}
-
-void LiveSession::signalEOS(status_t err) {
-    if (mInPreparationPhase && mNotify != NULL) {
-        sp<AMessage> notify = mNotify->dup();
-
-        notify->setInt32(
-                "what",
-                err == ERROR_END_OF_STREAM
-                    ? kWhatPrepared : kWhatPreparationFailed);
-
-        if (err != ERROR_END_OF_STREAM) {
-            notify->setInt32("err", err);
-        }
-
-        notify->post();
-
-        mInPreparationPhase = false;
-    }
-
-    mDataSource->queueEOS(err);
-}
-
-void LiveSession::onMonitorQueue() {
-    if (mSeekTimeUs >= 0
-            || mDataSource->countQueuedBuffers() < kMaxNumQueuedFragments) {
-        onDownloadNext();
-    } else {
-        if (mInPreparationPhase) {
-            if (mNotify != NULL) {
-                sp<AMessage> notify = mNotify->dup();
-                notify->setInt32("what", kWhatPrepared);
-                notify->post();
-            }
-
-            mInPreparationPhase = false;
-        }
-
-        postMonitorQueue(1000000ll);
-    }
-}
-
-status_t LiveSession::decryptBuffer(
-        size_t playlistIndex, const sp<ABuffer> &buffer) {
-    sp<AMessage> itemMeta;
-    bool found = false;
-    AString method;
-
-    for (ssize_t i = playlistIndex; i >= 0; --i) {
-        AString uri;
-        CHECK(mPlaylist->itemAt(i, &uri, &itemMeta));
-
-        if (itemMeta->findString("cipher-method", &method)) {
-            found = true;
-            break;
-        }
-    }
-
-    if (!found) {
-        method = "NONE";
-    }
-
-    if (method == "NONE") {
-        return OK;
-    } else if (!(method == "AES-128")) {
-        ALOGE("Unsupported cipher method '%s'", method.c_str());
-        return ERROR_UNSUPPORTED;
-    }
-
-    AString keyURI;
-    if (!itemMeta->findString("cipher-uri", &keyURI)) {
-        ALOGE("Missing key uri");
-        return ERROR_MALFORMED;
-    }
-
-    ssize_t index = mAESKeyForURI.indexOfKey(keyURI);
-
-    sp<ABuffer> key;
-    if (index >= 0) {
-        key = mAESKeyForURI.valueAt(index);
-    } else {
-        key = new ABuffer(16);
-
-        sp<HTTPBase> keySource =
-              HTTPBase::Create(
-                  (mFlags & kFlagIncognito)
-                    ? HTTPBase::kFlagIncognito
-                    : 0);
-
-        if (mUIDValid) {
-            keySource->setUID(mUID);
-        }
-
-        status_t err =
-            keySource->connect(
-                    keyURI.c_str(),
-                    mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
-
-        if (err == OK) {
-            size_t offset = 0;
-            while (offset < 16) {
-                ssize_t n = keySource->readAt(
-                        offset, key->data() + offset, 16 - offset);
-                if (n <= 0) {
-                    err = ERROR_IO;
-                    break;
-                }
-
-                offset += n;
-            }
-        }
-
-        if (err != OK) {
-            ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
-            return ERROR_IO;
-        }
-
-        mAESKeyForURI.add(keyURI, key);
-    }
-
-    AES_KEY aes_key;
-    if (AES_set_decrypt_key(key->data(), 128, &aes_key) != 0) {
-        ALOGE("failed to set AES decryption key.");
-        return UNKNOWN_ERROR;
-    }
-
-    unsigned char aes_ivec[16];
-
-    AString iv;
-    if (itemMeta->findString("cipher-iv", &iv)) {
-        if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
-                || iv.size() != 16 * 2 + 2) {
-            ALOGE("malformed cipher IV '%s'.", iv.c_str());
-            return ERROR_MALFORMED;
-        }
-
-        memset(aes_ivec, 0, sizeof(aes_ivec));
-        for (size_t i = 0; i < 16; ++i) {
-            char c1 = tolower(iv.c_str()[2 + 2 * i]);
-            char c2 = tolower(iv.c_str()[3 + 2 * i]);
-            if (!isxdigit(c1) || !isxdigit(c2)) {
-                ALOGE("malformed cipher IV '%s'.", iv.c_str());
-                return ERROR_MALFORMED;
-            }
-            uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
-            uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
-
-            aes_ivec[i] = nibble1 << 4 | nibble2;
-        }
-    } else {
-        memset(aes_ivec, 0, sizeof(aes_ivec));
-        aes_ivec[15] = mSeqNumber & 0xff;
-        aes_ivec[14] = (mSeqNumber >> 8) & 0xff;
-        aes_ivec[13] = (mSeqNumber >> 16) & 0xff;
-        aes_ivec[12] = (mSeqNumber >> 24) & 0xff;
-    }
-
-    AES_cbc_encrypt(
-            buffer->data(), buffer->data(), buffer->size(),
-            &aes_key, aes_ivec, AES_DECRYPT);
-
-    // hexdump(buffer->data(), buffer->size());
-
-    size_t n = buffer->size();
-    CHECK_GT(n, 0u);
-
-    size_t pad = buffer->data()[n - 1];
-
-    CHECK_GT(pad, 0u);
-    CHECK_LE(pad, 16u);
-    CHECK_GE((size_t)n, pad);
-    for (size_t i = 0; i < pad; ++i) {
-        CHECK_EQ((unsigned)buffer->data()[n - 1 - i], pad);
-    }
-
-    n -= pad;
-
-    buffer->setRange(buffer->offset(), n);
-
     return OK;
 }
 
-void LiveSession::postMonitorQueue(int64_t delayUs) {
-    sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
-    msg->setInt32("generation", ++mMonitorQueueGeneration);
-    msg->post(delayUs);
-}
-
-void LiveSession::onSeek(const sp<AMessage> &msg) {
-    int64_t timeUs;
-    CHECK(msg->findInt64("timeUs", &timeUs));
-
-    mSeekTimeUs = timeUs;
-    postMonitorQueue();
-}
-
 status_t LiveSession::getDuration(int64_t *durationUs) const {
-    Mutex::Autolock autoLock(mLock);
-    *durationUs = mDurationUs;
+    int64_t maxDurationUs = 0ll;
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        int64_t fetcherDurationUs = mFetcherInfos.valueAt(i).mDurationUs;
+
+        if (fetcherDurationUs >= 0ll && fetcherDurationUs > maxDurationUs) {
+            maxDurationUs = fetcherDurationUs;
+        }
+    }
+
+    *durationUs = maxDurationUs;
 
     return OK;
 }
@@ -1033,7 +780,326 @@
 }
 
 bool LiveSession::hasDynamicDuration() const {
-    return !mDurationFixed;
+    return false;
+}
+
+void LiveSession::changeConfiguration(int64_t timeUs, size_t bandwidthIndex) {
+    CHECK(!mReconfigurationInProgress);
+    mReconfigurationInProgress = true;
+
+    mPrevBandwidthIndex = bandwidthIndex;
+
+    ALOGV("changeConfiguration => timeUs:%lld us, bwIndex:%d",
+          timeUs, bandwidthIndex);
+
+    mPlaylist->pickRandomMediaItems();
+
+    CHECK_LT(bandwidthIndex, mBandwidthItems.size());
+    const BandwidthItem &item = mBandwidthItems.itemAt(bandwidthIndex);
+
+    uint32_t streamMask = 0;
+
+    AString audioURI;
+    if (mPlaylist->getAudioURI(item.mPlaylistIndex, &audioURI)) {
+        streamMask |= STREAMTYPE_AUDIO;
+    }
+
+    AString videoURI;
+    if (mPlaylist->getVideoURI(item.mPlaylistIndex, &videoURI)) {
+        streamMask |= STREAMTYPE_VIDEO;
+    }
+
+    AString subtitleURI;
+    if (mPlaylist->getSubtitleURI(item.mPlaylistIndex, &subtitleURI)) {
+        streamMask |= STREAMTYPE_SUBTITLES;
+    }
+
+    // Step 1, stop and discard fetchers that are no longer needed.
+    // Pause those that we'll reuse.
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        const AString &uri = mFetcherInfos.keyAt(i);
+
+        bool discardFetcher = true;
+
+        // If we're seeking all current fetchers are discarded.
+        if (timeUs < 0ll) {
+            if (((streamMask & STREAMTYPE_AUDIO) && uri == audioURI)
+                    || ((streamMask & STREAMTYPE_VIDEO) && uri == videoURI)
+                    || ((streamMask & STREAMTYPE_SUBTITLES) && uri == subtitleURI)) {
+                discardFetcher = false;
+            }
+        }
+
+        if (discardFetcher) {
+            mFetcherInfos.valueAt(i).mFetcher->stopAsync();
+        } else {
+            mFetcherInfos.valueAt(i).mFetcher->pauseAsync();
+        }
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatChangeConfiguration2, id());
+    msg->setInt32("streamMask", streamMask);
+    msg->setInt64("timeUs", timeUs);
+    if (streamMask & STREAMTYPE_AUDIO) {
+        msg->setString("audioURI", audioURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_VIDEO) {
+        msg->setString("videoURI", videoURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_SUBTITLES) {
+        msg->setString("subtitleURI", subtitleURI.c_str());
+    }
+
+    // Every time a fetcher acknowledges the stopAsync or pauseAsync request
+    // we'll decrement mContinuationCounter, once it reaches zero, i.e. all
+    // fetchers have completed their asynchronous operation, we'll post
+    // mContinuation, which then is handled below in onChangeConfiguration2.
+    mContinuationCounter = mFetcherInfos.size();
+    mContinuation = msg;
+
+    if (mContinuationCounter == 0) {
+        msg->post();
+    }
+}
+
+void LiveSession::onChangeConfiguration2(const sp<AMessage> &msg) {
+    mContinuation.clear();
+
+    // All fetchers are either suspended or have been removed now.
+
+    uint32_t streamMask;
+    CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
+
+    AString audioURI, videoURI, subtitleURI;
+    if (streamMask & STREAMTYPE_AUDIO) {
+        CHECK(msg->findString("audioURI", &audioURI));
+        ALOGV("audioURI = '%s'", audioURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_VIDEO) {
+        CHECK(msg->findString("videoURI", &videoURI));
+        ALOGV("videoURI = '%s'", videoURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_SUBTITLES) {
+        CHECK(msg->findString("subtitleURI", &subtitleURI));
+        ALOGV("subtitleURI = '%s'", subtitleURI.c_str());
+    }
+
+    // Determine which decoders to shutdown on the player side,
+    // a decoder has to be shutdown if either
+    // 1) its streamtype was active before but now longer isn't.
+    // or
+    // 2) its streamtype was already active and still is but the URI
+    //    has changed.
+    uint32_t changedMask = 0;
+    if (((mStreamMask & streamMask & STREAMTYPE_AUDIO)
+                && !(audioURI == mAudioURI))
+        || (mStreamMask & ~streamMask & STREAMTYPE_AUDIO)) {
+        changedMask |= STREAMTYPE_AUDIO;
+    }
+    if (((mStreamMask & streamMask & STREAMTYPE_VIDEO)
+                && !(videoURI == mVideoURI))
+        || (mStreamMask & ~streamMask & STREAMTYPE_VIDEO)) {
+        changedMask |= STREAMTYPE_VIDEO;
+    }
+
+    if (changedMask == 0) {
+        // If nothing changed as far as the audio/video decoders
+        // are concerned we can proceed.
+        onChangeConfiguration3(msg);
+        return;
+    }
+
+    // Something changed, inform the player which will shutdown the
+    // corresponding decoders and will post the reply once that's done.
+    // Handling the reply will continue executing below in
+    // onChangeConfiguration3.
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatStreamsChanged);
+    notify->setInt32("changedMask", changedMask);
+
+    msg->setWhat(kWhatChangeConfiguration3);
+    msg->setTarget(id());
+
+    notify->setMessage("reply", msg);
+    notify->post();
+}
+
+void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
+    // All remaining fetchers are still suspended, the player has shutdown
+    // any decoders that needed it.
+
+    uint32_t streamMask;
+    CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
+
+    AString audioURI, videoURI, subtitleURI;
+    if (streamMask & STREAMTYPE_AUDIO) {
+        CHECK(msg->findString("audioURI", &audioURI));
+    }
+    if (streamMask & STREAMTYPE_VIDEO) {
+        CHECK(msg->findString("videoURI", &videoURI));
+    }
+    if (streamMask & STREAMTYPE_SUBTITLES) {
+        CHECK(msg->findString("subtitleURI", &subtitleURI));
+    }
+
+    int64_t timeUs;
+    CHECK(msg->findInt64("timeUs", &timeUs));
+
+    if (timeUs < 0ll) {
+        timeUs = mLastDequeuedTimeUs;
+    }
+
+    mStreamMask = streamMask;
+    mAudioURI = audioURI;
+    mVideoURI = videoURI;
+    mSubtitleURI = subtitleURI;
+
+    // Resume all existing fetchers and assign them packet sources.
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        const AString &uri = mFetcherInfos.keyAt(i);
+
+        uint32_t resumeMask = 0;
+
+        sp<AnotherPacketSource> audioSource;
+        if ((streamMask & STREAMTYPE_AUDIO) && uri == audioURI) {
+            audioSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
+            resumeMask |= STREAMTYPE_AUDIO;
+        }
+
+        sp<AnotherPacketSource> videoSource;
+        if ((streamMask & STREAMTYPE_VIDEO) && uri == videoURI) {
+            videoSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
+            resumeMask |= STREAMTYPE_VIDEO;
+        }
+
+        sp<AnotherPacketSource> subtitleSource;
+        if ((streamMask & STREAMTYPE_SUBTITLES) && uri == subtitleURI) {
+            subtitleSource = mPacketSources.valueFor(STREAMTYPE_SUBTITLES);
+            resumeMask |= STREAMTYPE_SUBTITLES;
+        }
+
+        CHECK_NE(resumeMask, 0u);
+
+        ALOGV("resuming fetchers for mask 0x%08x", resumeMask);
+
+        streamMask &= ~resumeMask;
+
+        mFetcherInfos.valueAt(i).mFetcher->startAsync(
+                audioSource, videoSource, subtitleSource);
+    }
+
+    // streamMask now only contains the types that need a new fetcher created.
+
+    if (streamMask != 0) {
+        ALOGV("creating new fetchers for mask 0x%08x", streamMask);
+    }
+
+    while (streamMask != 0) {
+        StreamType streamType = (StreamType)(streamMask & ~(streamMask - 1));
+
+        AString uri;
+        switch (streamType) {
+            case STREAMTYPE_AUDIO:
+                uri = audioURI;
+                break;
+            case STREAMTYPE_VIDEO:
+                uri = videoURI;
+                break;
+            case STREAMTYPE_SUBTITLES:
+                uri = subtitleURI;
+                break;
+            default:
+                TRESPASS();
+        }
+
+        sp<PlaylistFetcher> fetcher = addFetcher(uri.c_str());
+        CHECK(fetcher != NULL);
+
+        sp<AnotherPacketSource> audioSource;
+        if ((streamMask & STREAMTYPE_AUDIO) && uri == audioURI) {
+            audioSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
+            audioSource->clear();
+
+            streamMask &= ~STREAMTYPE_AUDIO;
+        }
+
+        sp<AnotherPacketSource> videoSource;
+        if ((streamMask & STREAMTYPE_VIDEO) && uri == videoURI) {
+            videoSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
+            videoSource->clear();
+
+            streamMask &= ~STREAMTYPE_VIDEO;
+        }
+
+        sp<AnotherPacketSource> subtitleSource;
+        if ((streamMask & STREAMTYPE_SUBTITLES) && uri == subtitleURI) {
+            subtitleSource = mPacketSources.valueFor(STREAMTYPE_SUBTITLES);
+            subtitleSource->clear();
+
+            streamMask &= ~STREAMTYPE_SUBTITLES;
+        }
+
+        fetcher->startAsync(audioSource, videoSource, subtitleSource, timeUs);
+    }
+
+    // All fetchers have now been started, the configuration change
+    // has completed.
+
+    scheduleCheckBandwidthEvent();
+
+    ALOGV("XXX configuration change completed.");
+
+    mReconfigurationInProgress = false;
+
+    if (mDisconnectReplyID != 0) {
+        finishDisconnect();
+    }
+}
+
+void LiveSession::scheduleCheckBandwidthEvent() {
+    sp<AMessage> msg = new AMessage(kWhatCheckBandwidth, id());
+    msg->setInt32("generation", mCheckBandwidthGeneration);
+    msg->post(10000000ll);
+}
+
+void LiveSession::cancelCheckBandwidthEvent() {
+    ++mCheckBandwidthGeneration;
+}
+
+void LiveSession::onCheckBandwidth() {
+    if (mReconfigurationInProgress) {
+        scheduleCheckBandwidthEvent();
+        return;
+    }
+
+    size_t bandwidthIndex = getBandwidthIndex();
+    if (mPrevBandwidthIndex < 0
+            || bandwidthIndex != (size_t)mPrevBandwidthIndex) {
+        changeConfiguration(-1ll /* timeUs */, bandwidthIndex);
+    }
+
+    // Handling the kWhatCheckBandwidth even here does _not_ automatically
+    // schedule another one on return, only an explicit call to
+    // scheduleCheckBandwidthEvent will do that.
+    // This ensures that only one configuration change is ongoing at any
+    // one time, once that completes it'll schedule another check bandwidth
+    // event.
+}
+
+void LiveSession::postPrepared(status_t err) {
+    CHECK(mInPreparationPhase);
+
+    sp<AMessage> notify = mNotify->dup();
+    if (err == OK || err == ERROR_END_OF_STREAM) {
+        notify->setInt32("what", kWhatPrepared);
+    } else {
+        notify->setInt32("what", kWhatPreparationFailed);
+        notify->setInt32("err", err);
+    }
+
+    notify->post();
+
+    mInPreparationPhase = false;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
new file mode 100644
index 0000000..b134725
--- /dev/null
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIVE_SESSION_H_
+
+#define LIVE_SESSION_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include <utils/String8.h>
+
+namespace android {
+
+struct ABuffer;
+struct AnotherPacketSource;
+struct DataSource;
+struct HTTPBase;
+struct LiveDataSource;
+struct M3UParser;
+struct PlaylistFetcher;
+
+struct LiveSession : public AHandler {
+    enum Flags {
+        // Don't log any URLs.
+        kFlagIncognito = 1,
+    };
+    LiveSession(
+            const sp<AMessage> &notify,
+            uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
+
+    enum StreamType {
+        STREAMTYPE_AUDIO        = 1,
+        STREAMTYPE_VIDEO        = 2,
+        STREAMTYPE_SUBTITLES    = 4,
+    };
+    status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
+
+    status_t getStreamFormat(StreamType stream, sp<AMessage> *format);
+
+    void connectAsync(
+            const char *url,
+            const KeyedVector<String8, String8> *headers = NULL);
+
+    status_t disconnect();
+
+    // Blocks until seek is complete.
+    status_t seekTo(int64_t timeUs);
+
+    status_t getDuration(int64_t *durationUs) const;
+
+    bool isSeekable() const;
+    bool hasDynamicDuration() const;
+
+    enum {
+        kWhatStreamsChanged,
+        kWhatError,
+        kWhatPrepared,
+        kWhatPreparationFailed,
+    };
+
+protected:
+    virtual ~LiveSession();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    friend struct PlaylistFetcher;
+
+    enum {
+        kWhatConnect                    = 'conn',
+        kWhatDisconnect                 = 'disc',
+        kWhatSeek                       = 'seek',
+        kWhatFetcherNotify              = 'notf',
+        kWhatCheckBandwidth             = 'bndw',
+        kWhatChangeConfiguration2       = 'chC2',
+        kWhatChangeConfiguration3       = 'chC3',
+        kWhatFinishDisconnect2          = 'fin2',
+    };
+
+    struct BandwidthItem {
+        size_t mPlaylistIndex;
+        unsigned long mBandwidth;
+    };
+
+    struct FetcherInfo {
+        sp<PlaylistFetcher> mFetcher;
+        int64_t mDurationUs;
+        bool mIsPrepared;
+    };
+
+    sp<AMessage> mNotify;
+    uint32_t mFlags;
+    bool mUIDValid;
+    uid_t mUID;
+
+    bool mInPreparationPhase;
+
+    sp<HTTPBase> mHTTPDataSource;
+    KeyedVector<String8, String8> mExtraHeaders;
+
+    AString mMasterURL;
+
+    Vector<BandwidthItem> mBandwidthItems;
+    ssize_t mPrevBandwidthIndex;
+
+    sp<M3UParser> mPlaylist;
+
+    KeyedVector<AString, FetcherInfo> mFetcherInfos;
+    AString mAudioURI, mVideoURI, mSubtitleURI;
+    uint32_t mStreamMask;
+
+    KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources;
+
+    int32_t mCheckBandwidthGeneration;
+
+    size_t mContinuationCounter;
+    sp<AMessage> mContinuation;
+
+    int64_t mLastDequeuedTimeUs;
+
+    bool mReconfigurationInProgress;
+    uint32_t mDisconnectReplyID;
+
+    sp<PlaylistFetcher> addFetcher(const char *uri);
+
+    void onConnect(const sp<AMessage> &msg);
+    status_t onSeek(const sp<AMessage> &msg);
+    void onFinishDisconnect2();
+
+    status_t fetchFile(
+            const char *url, sp<ABuffer> *out,
+            int64_t range_offset = 0, int64_t range_length = -1);
+
+    sp<M3UParser> fetchPlaylist(
+            const char *url, uint8_t *curPlaylistHash, bool *unchanged);
+
+    size_t getBandwidthIndex();
+
+    static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
+
+    void changeConfiguration(int64_t timeUs, size_t bandwidthIndex);
+    void onChangeConfiguration2(const sp<AMessage> &msg);
+    void onChangeConfiguration3(const sp<AMessage> &msg);
+
+    void scheduleCheckBandwidthEvent();
+    void cancelCheckBandwidthEvent();
+
+    void onCheckBandwidth();
+
+    void finishDisconnect();
+
+    void postPrepared(status_t err);
+
+    DISALLOW_EVIL_CONSTRUCTORS(LiveSession);
+};
+
+}  // namespace android
+
+#endif  // LIVE_SESSION_H_
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 68bbca2..be66252 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -18,14 +18,153 @@
 #define LOG_TAG "M3UParser"
 #include <utils/Log.h>
 
-#include "include/M3UParser.h"
+#include "M3UParser.h"
 
+#include <cutils/properties.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
 
 namespace android {
 
+struct M3UParser::MediaGroup : public RefBase {
+    enum Type {
+        TYPE_AUDIO,
+        TYPE_VIDEO,
+        TYPE_SUBS,
+    };
+
+    enum FlagBits {
+        FLAG_AUTOSELECT         = 1,
+        FLAG_DEFAULT            = 2,
+        FLAG_FORCED             = 4,
+        FLAG_HAS_LANGUAGE       = 8,
+        FLAG_HAS_URI            = 16,
+    };
+
+    MediaGroup(Type type);
+
+    Type type() const;
+
+    status_t addMedia(
+            const char *name,
+            const char *uri,
+            const char *language,
+            uint32_t flags);
+
+    bool getActiveURI(AString *uri) const;
+
+    void pickRandomMediaItems();
+
+protected:
+    virtual ~MediaGroup();
+
+private:
+    struct Media {
+        AString mName;
+        AString mURI;
+        AString mLanguage;
+        uint32_t mFlags;
+    };
+
+    Type mType;
+    Vector<Media> mMediaItems;
+
+    ssize_t mSelectedIndex;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaGroup);
+};
+
+M3UParser::MediaGroup::MediaGroup(Type type)
+    : mType(type),
+      mSelectedIndex(-1) {
+}
+
+M3UParser::MediaGroup::~MediaGroup() {
+}
+
+M3UParser::MediaGroup::Type M3UParser::MediaGroup::type() const {
+    return mType;
+}
+
+status_t M3UParser::MediaGroup::addMedia(
+        const char *name,
+        const char *uri,
+        const char *language,
+        uint32_t flags) {
+    mMediaItems.push();
+    Media &item = mMediaItems.editItemAt(mMediaItems.size() - 1);
+
+    item.mName = name;
+
+    if (uri) {
+        item.mURI = uri;
+    }
+
+    if (language) {
+        item.mLanguage = language;
+    }
+
+    item.mFlags = flags;
+
+    return OK;
+}
+
+void M3UParser::MediaGroup::pickRandomMediaItems() {
+#if 1
+    switch (mType) {
+        case TYPE_AUDIO:
+        {
+            char value[PROPERTY_VALUE_MAX];
+            if (property_get("media.httplive.audio-index", value, NULL)) {
+                char *end;
+                mSelectedIndex = strtoul(value, &end, 10);
+                CHECK(end > value && *end == '\0');
+
+                if (mSelectedIndex >= mMediaItems.size()) {
+                    mSelectedIndex = mMediaItems.size() - 1;
+                }
+            } else {
+                mSelectedIndex = 0;
+            }
+            break;
+        }
+
+        case TYPE_VIDEO:
+        {
+            mSelectedIndex = 0;
+            break;
+        }
+
+        case TYPE_SUBS:
+        {
+            mSelectedIndex = -1;
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+#else
+    mSelectedIndex = (rand() * mMediaItems.size()) / RAND_MAX;
+#endif
+}
+
+bool M3UParser::MediaGroup::getActiveURI(AString *uri) const {
+    for (size_t i = 0; i < mMediaItems.size(); ++i) {
+        if (mSelectedIndex >= 0 && i == (size_t)mSelectedIndex) {
+            const Media &item = mMediaItems.itemAt(i);
+
+            *uri = item.mURI;
+            return true;
+        }
+    }
+
+    return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
 M3UParser::M3UParser(
         const char *baseURI, const void *data, size_t size)
     : mInitCheck(NO_INIT),
@@ -92,6 +231,58 @@
     return true;
 }
 
+void M3UParser::pickRandomMediaItems() {
+    for (size_t i = 0; i < mMediaGroups.size(); ++i) {
+        mMediaGroups.valueAt(i)->pickRandomMediaItems();
+    }
+}
+
+bool M3UParser::getTypeURI(size_t index, const char *key, AString *uri) const {
+    if (!mIsVariantPlaylist) {
+        *uri = mBaseURI;
+
+        // Assume media without any more specific attribute contains
+        // audio and video, but no subtitles.
+        return !strcmp("audio", key) || !strcmp("video", key);
+    }
+
+    CHECK_LT(index, mItems.size());
+
+    sp<AMessage> meta = mItems.itemAt(index).mMeta;
+
+    AString groupID;
+    if (!meta->findString(key, &groupID)) {
+        *uri = mItems.itemAt(index).mURI;
+
+        // Assume media without any more specific attribute contains
+        // audio and video, but no subtitles.
+        return !strcmp("audio", key) || !strcmp("video", key);
+    }
+
+    sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
+    if (!group->getActiveURI(uri)) {
+        return false;
+    }
+
+    if ((*uri).empty()) {
+        *uri = mItems.itemAt(index).mURI;
+    }
+
+    return true;
+}
+
+bool M3UParser::getAudioURI(size_t index, AString *uri) const {
+    return getTypeURI(index, "audio", uri);
+}
+
+bool M3UParser::getVideoURI(size_t index, AString *uri) const {
+    return getTypeURI(index, "video", uri);
+}
+
+bool M3UParser::getSubtitleURI(size_t index, AString *uri) const {
+    return getTypeURI(index, "subtitles", uri);
+}
+
 static bool MakeURL(const char *baseURL, const char *url, AString *out) {
     out->clear();
 
@@ -241,6 +432,8 @@
 
                     segmentRangeOffset = offset + length;
                 }
+            } else if (line.startsWith("#EXT-X-MEDIA")) {
+                err = parseMedia(line);
             }
 
             if (err != OK) {
@@ -322,9 +515,31 @@
     return OK;
 }
 
-// static
+// Find the next occurence of the character "what" at or after "offset",
+// but ignore occurences between quotation marks.
+// Return the index of the occurrence or -1 if not found.
+static ssize_t FindNextUnquoted(
+        const AString &line, char what, size_t offset) {
+    CHECK_NE((int)what, (int)'"');
+
+    bool quoted = false;
+    while (offset < line.size()) {
+        char c = line.c_str()[offset];
+
+        if (c == '"') {
+            quoted = !quoted;
+        } else if (c == what && !quoted) {
+            return offset;
+        }
+
+        ++offset;
+    }
+
+    return -1;
+}
+
 status_t M3UParser::parseStreamInf(
-        const AString &line, sp<AMessage> *meta) {
+        const AString &line, sp<AMessage> *meta) const {
     ssize_t colonPos = line.find(":");
 
     if (colonPos < 0) {
@@ -334,7 +549,7 @@
     size_t offset = colonPos + 1;
 
     while (offset < line.size()) {
-        ssize_t end = line.find(",", offset);
+        ssize_t end = FindNextUnquoted(line, ',', offset);
         if (end < 0) {
             end = line.size();
         }
@@ -371,35 +586,37 @@
                 *meta = new AMessage;
             }
             (*meta)->setInt32("bandwidth", x);
+        } else if (!strcasecmp("audio", key.c_str())
+                || !strcasecmp("video", key.c_str())
+                || !strcasecmp("subtitles", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for %s attribute, "
+                      "got '%s' instead.",
+                      key.c_str(), val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            AString groupID(val, 1, val.size() - 2);
+            ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
+
+            if (groupIndex < 0) {
+                ALOGE("Undefined media group '%s' referenced in stream info.",
+                      groupID.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            key.tolower();
+            (*meta)->setString(key.c_str(), groupID.c_str());
         }
     }
 
     return OK;
 }
 
-// Find the next occurence of the character "what" at or after "offset",
-// but ignore occurences between quotation marks.
-// Return the index of the occurrence or -1 if not found.
-static ssize_t FindNextUnquoted(
-        const AString &line, char what, size_t offset) {
-    CHECK_NE((int)what, (int)'"');
-
-    bool quoted = false;
-    while (offset < line.size()) {
-        char c = line.c_str()[offset];
-
-        if (c == '"') {
-            quoted = !quoted;
-        } else if (c == what && !quoted) {
-            return offset;
-        }
-
-        ++offset;
-    }
-
-    return -1;
-}
-
 // static
 status_t M3UParser::parseCipherInfo(
         const AString &line, sp<AMessage> *meta, const AString &baseURI) {
@@ -515,6 +732,234 @@
     return OK;
 }
 
+status_t M3UParser::parseMedia(const AString &line) {
+    ssize_t colonPos = line.find(":");
+
+    if (colonPos < 0) {
+        return ERROR_MALFORMED;
+    }
+
+    bool haveGroupType = false;
+    MediaGroup::Type groupType = MediaGroup::TYPE_AUDIO;
+
+    bool haveGroupID = false;
+    AString groupID;
+
+    bool haveGroupLanguage = false;
+    AString groupLanguage;
+
+    bool haveGroupName = false;
+    AString groupName;
+
+    bool haveGroupAutoselect = false;
+    bool groupAutoselect = false;
+
+    bool haveGroupDefault = false;
+    bool groupDefault = false;
+
+    bool haveGroupForced = false;
+    bool groupForced = false;
+
+    bool haveGroupURI = false;
+    AString groupURI;
+
+    size_t offset = colonPos + 1;
+
+    while (offset < line.size()) {
+        ssize_t end = FindNextUnquoted(line, ',', offset);
+        if (end < 0) {
+            end = line.size();
+        }
+
+        AString attr(line, offset, end - offset);
+        attr.trim();
+
+        offset = end + 1;
+
+        ssize_t equalPos = attr.find("=");
+        if (equalPos < 0) {
+            continue;
+        }
+
+        AString key(attr, 0, equalPos);
+        key.trim();
+
+        AString val(attr, equalPos + 1, attr.size() - equalPos - 1);
+        val.trim();
+
+        ALOGV("key=%s value=%s", key.c_str(), val.c_str());
+
+        if (!strcasecmp("type", key.c_str())) {
+            if (!strcasecmp("subtitles", val.c_str())) {
+                groupType = MediaGroup::TYPE_SUBS;
+            } else if (!strcasecmp("audio", val.c_str())) {
+                groupType = MediaGroup::TYPE_AUDIO;
+            } else if (!strcasecmp("video", val.c_str())) {
+                groupType = MediaGroup::TYPE_VIDEO;
+            } else {
+                ALOGE("Invalid media group type '%s'", val.c_str());
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupType = true;
+        } else if (!strcasecmp("group-id", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for GROUP-ID, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            groupID.setTo(val, 1, val.size() - 2);
+            haveGroupID = true;
+        } else if (!strcasecmp("language", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for LANGUAGE, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            groupLanguage.setTo(val, 1, val.size() - 2);
+            haveGroupLanguage = true;
+        } else if (!strcasecmp("name", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for NAME, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            groupName.setTo(val, 1, val.size() - 2);
+            haveGroupName = true;
+        } else if (!strcasecmp("autoselect", key.c_str())) {
+            groupAutoselect = false;
+            if (!strcasecmp("YES", val.c_str())) {
+                groupAutoselect = true;
+            } else if (!strcasecmp("NO", val.c_str())) {
+                groupAutoselect = false;
+            } else {
+                ALOGE("Expected YES or NO for AUTOSELECT attribute, "
+                      "got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupAutoselect = true;
+        } else if (!strcasecmp("default", key.c_str())) {
+            groupDefault = false;
+            if (!strcasecmp("YES", val.c_str())) {
+                groupDefault = true;
+            } else if (!strcasecmp("NO", val.c_str())) {
+                groupDefault = false;
+            } else {
+                ALOGE("Expected YES or NO for DEFAULT attribute, "
+                      "got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupDefault = true;
+        } else if (!strcasecmp("forced", key.c_str())) {
+            groupForced = false;
+            if (!strcasecmp("YES", val.c_str())) {
+                groupForced = true;
+            } else if (!strcasecmp("NO", val.c_str())) {
+                groupForced = false;
+            } else {
+                ALOGE("Expected YES or NO for FORCED attribute, "
+                      "got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupForced = true;
+        } else if (!strcasecmp("uri", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for URI, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            AString tmp(val, 1, val.size() - 2);
+
+            if (!MakeURL(mBaseURI.c_str(), tmp.c_str(), &groupURI)) {
+                ALOGI("Failed to make absolute URI from '%s'.", tmp.c_str());
+            }
+
+            haveGroupURI = true;
+        }
+    }
+
+    if (!haveGroupType || !haveGroupID || !haveGroupName) {
+        ALOGE("Incomplete EXT-X-MEDIA element.");
+        return ERROR_MALFORMED;
+    }
+
+    uint32_t flags = 0;
+    if (haveGroupAutoselect && groupAutoselect) {
+        flags |= MediaGroup::FLAG_AUTOSELECT;
+    }
+    if (haveGroupDefault && groupDefault) {
+        flags |= MediaGroup::FLAG_DEFAULT;
+    }
+    if (haveGroupForced) {
+        if (groupType != MediaGroup::TYPE_SUBS) {
+            ALOGE("The FORCED attribute MUST not be present on anything "
+                  "but SUBS media.");
+
+            return ERROR_MALFORMED;
+        }
+
+        if (groupForced) {
+            flags |= MediaGroup::FLAG_FORCED;
+        }
+    }
+    if (haveGroupLanguage) {
+        flags |= MediaGroup::FLAG_HAS_LANGUAGE;
+    }
+    if (haveGroupURI) {
+        flags |= MediaGroup::FLAG_HAS_URI;
+    }
+
+    ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
+    sp<MediaGroup> group;
+
+    if (groupIndex < 0) {
+        group = new MediaGroup(groupType);
+        mMediaGroups.add(groupID, group);
+    } else {
+        group = mMediaGroups.valueAt(groupIndex);
+
+        if (group->type() != groupType) {
+            ALOGE("Attempt to put media item under group of different type "
+                  "(groupType = %d, item type = %d",
+                  group->type(),
+                  groupType);
+
+            return ERROR_MALFORMED;
+        }
+    }
+
+    return group->addMedia(
+            groupName.c_str(),
+            haveGroupURI ? groupURI.c_str() : NULL,
+            haveGroupLanguage ? groupLanguage.c_str() : NULL,
+            flags);
+}
+
 // static
 status_t M3UParser::ParseInt32(const char *s, int32_t *x) {
     char *end;
diff --git a/media/libstagefright/include/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
similarity index 80%
rename from media/libstagefright/include/M3UParser.h
rename to media/libstagefright/httplive/M3UParser.h
index 2d2f50f..abea286 100644
--- a/media/libstagefright/include/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -40,10 +40,18 @@
     size_t size();
     bool itemAt(size_t index, AString *uri, sp<AMessage> *meta = NULL);
 
+    void pickRandomMediaItems();
+
+    bool getAudioURI(size_t index, AString *uri) const;
+    bool getVideoURI(size_t index, AString *uri) const;
+    bool getSubtitleURI(size_t index, AString *uri) const;
+
 protected:
     virtual ~M3UParser();
 
 private:
+    struct MediaGroup;
+
     struct Item {
         AString mURI;
         sp<AMessage> mMeta;
@@ -60,6 +68,9 @@
     sp<AMessage> mMeta;
     Vector<Item> mItems;
 
+    // Media groups keyed by group ID.
+    KeyedVector<AString, sp<MediaGroup> > mMediaGroups;
+
     status_t parse(const void *data, size_t size);
 
     static status_t parseMetaData(
@@ -68,8 +79,8 @@
     static status_t parseMetaDataDuration(
             const AString &line, sp<AMessage> *meta, const char *key);
 
-    static status_t parseStreamInf(
-            const AString &line, sp<AMessage> *meta);
+    status_t parseStreamInf(
+            const AString &line, sp<AMessage> *meta) const;
 
     static status_t parseCipherInfo(
             const AString &line, sp<AMessage> *meta, const AString &baseURI);
@@ -78,6 +89,10 @@
             const AString &line, uint64_t curOffset,
             uint64_t *length, uint64_t *offset);
 
+    status_t parseMedia(const AString &line);
+
+    bool getTypeURI(size_t index, const char *key, AString *uri) const;
+
     static status_t ParseInt32(const char *s, int32_t *x);
     static status_t ParseDouble(const char *s, double *x);
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
new file mode 100644
index 0000000..8ae70b7
--- /dev/null
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -0,0 +1,969 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PlaylistFetcher"
+#include <utils/Log.h>
+
+#include "PlaylistFetcher.h"
+
+#include "LiveDataSource.h"
+#include "LiveSession.h"
+#include "M3UParser.h"
+
+#include "include/avc_utils.h"
+#include "include/HTTPBase.h"
+#include "include/ID3.h"
+#include "mpeg2ts/AnotherPacketSource.h"
+
+#include <media/IStreamSource.h>
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+#include <ctype.h>
+#include <openssl/aes.h>
+#include <openssl/md5.h>
+
+namespace android {
+
+// static
+const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
+
+PlaylistFetcher::PlaylistFetcher(
+        const sp<AMessage> &notify,
+        const sp<LiveSession> &session,
+        const char *uri)
+    : mNotify(notify),
+      mSession(session),
+      mURI(uri),
+      mStreamTypeMask(0),
+      mStartTimeUs(-1ll),
+      mLastPlaylistFetchTimeUs(-1ll),
+      mSeqNumber(-1),
+      mNumRetries(0),
+      mStartup(true),
+      mNextPTSTimeUs(-1ll),
+      mMonitorQueueGeneration(0),
+      mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
+      mFirstPTSValid(false),
+      mAbsoluteTimeAnchorUs(0ll) {
+    memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
+}
+
+PlaylistFetcher::~PlaylistFetcher() {
+}
+
+int64_t PlaylistFetcher::getSegmentStartTimeUs(int32_t seqNumber) const {
+    CHECK(mPlaylist != NULL);
+
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    int32_t lastSeqNumberInPlaylist =
+        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+
+    CHECK_GE(seqNumber, firstSeqNumberInPlaylist);
+    CHECK_LE(seqNumber, lastSeqNumberInPlaylist);
+
+    int64_t segmentStartUs = 0ll;
+    for (int32_t index = 0;
+            index < seqNumber - firstSeqNumberInPlaylist; ++index) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt(
+                    index, NULL /* uri */, &itemMeta));
+
+        int64_t itemDurationUs;
+        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+        segmentStartUs += itemDurationUs;
+    }
+
+    return segmentStartUs;
+}
+
+bool PlaylistFetcher::timeToRefreshPlaylist(int64_t nowUs) const {
+    if (mPlaylist == NULL) {
+        CHECK_EQ((int)mRefreshState, (int)INITIAL_MINIMUM_RELOAD_DELAY);
+        return true;
+    }
+
+    int32_t targetDurationSecs;
+    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+
+    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+
+    int64_t minPlaylistAgeUs;
+
+    switch (mRefreshState) {
+        case INITIAL_MINIMUM_RELOAD_DELAY:
+        {
+            size_t n = mPlaylist->size();
+            if (n > 0) {
+                sp<AMessage> itemMeta;
+                CHECK(mPlaylist->itemAt(n - 1, NULL /* uri */, &itemMeta));
+
+                int64_t itemDurationUs;
+                CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+                minPlaylistAgeUs = itemDurationUs;
+                break;
+            }
+
+            // fall through
+        }
+
+        case FIRST_UNCHANGED_RELOAD_ATTEMPT:
+        {
+            minPlaylistAgeUs = targetDurationUs / 2;
+            break;
+        }
+
+        case SECOND_UNCHANGED_RELOAD_ATTEMPT:
+        {
+            minPlaylistAgeUs = (targetDurationUs * 3) / 2;
+            break;
+        }
+
+        case THIRD_UNCHANGED_RELOAD_ATTEMPT:
+        {
+            minPlaylistAgeUs = targetDurationUs * 3;
+            break;
+        }
+
+        default:
+            TRESPASS();
+            break;
+    }
+
+    return mLastPlaylistFetchTimeUs + minPlaylistAgeUs <= nowUs;
+}
+
+status_t PlaylistFetcher::decryptBuffer(
+        size_t playlistIndex, const sp<ABuffer> &buffer) {
+    sp<AMessage> itemMeta;
+    bool found = false;
+    AString method;
+
+    for (ssize_t i = playlistIndex; i >= 0; --i) {
+        AString uri;
+        CHECK(mPlaylist->itemAt(i, &uri, &itemMeta));
+
+        if (itemMeta->findString("cipher-method", &method)) {
+            found = true;
+            break;
+        }
+    }
+
+    if (!found) {
+        method = "NONE";
+    }
+
+    if (method == "NONE") {
+        return OK;
+    } else if (!(method == "AES-128")) {
+        ALOGE("Unsupported cipher method '%s'", method.c_str());
+        return ERROR_UNSUPPORTED;
+    }
+
+    AString keyURI;
+    if (!itemMeta->findString("cipher-uri", &keyURI)) {
+        ALOGE("Missing key uri");
+        return ERROR_MALFORMED;
+    }
+
+    ssize_t index = mAESKeyForURI.indexOfKey(keyURI);
+
+    sp<ABuffer> key;
+    if (index >= 0) {
+        key = mAESKeyForURI.valueAt(index);
+    } else {
+        status_t err = mSession->fetchFile(keyURI.c_str(), &key);
+
+        if (err != OK) {
+            ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
+            return ERROR_IO;
+        } else if (key->size() != 16) {
+            ALOGE("key file '%s' wasn't 16 bytes in size.", keyURI.c_str());
+            return ERROR_MALFORMED;
+        }
+
+        mAESKeyForURI.add(keyURI, key);
+    }
+
+    AES_KEY aes_key;
+    if (AES_set_decrypt_key(key->data(), 128, &aes_key) != 0) {
+        ALOGE("failed to set AES decryption key.");
+        return UNKNOWN_ERROR;
+    }
+
+    unsigned char aes_ivec[16];
+
+    AString iv;
+    if (itemMeta->findString("cipher-iv", &iv)) {
+        if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
+                || iv.size() != 16 * 2 + 2) {
+            ALOGE("malformed cipher IV '%s'.", iv.c_str());
+            return ERROR_MALFORMED;
+        }
+
+        memset(aes_ivec, 0, sizeof(aes_ivec));
+        for (size_t i = 0; i < 16; ++i) {
+            char c1 = tolower(iv.c_str()[2 + 2 * i]);
+            char c2 = tolower(iv.c_str()[3 + 2 * i]);
+            if (!isxdigit(c1) || !isxdigit(c2)) {
+                ALOGE("malformed cipher IV '%s'.", iv.c_str());
+                return ERROR_MALFORMED;
+            }
+            uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
+            uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
+
+            aes_ivec[i] = nibble1 << 4 | nibble2;
+        }
+    } else {
+        memset(aes_ivec, 0, sizeof(aes_ivec));
+        aes_ivec[15] = mSeqNumber & 0xff;
+        aes_ivec[14] = (mSeqNumber >> 8) & 0xff;
+        aes_ivec[13] = (mSeqNumber >> 16) & 0xff;
+        aes_ivec[12] = (mSeqNumber >> 24) & 0xff;
+    }
+
+    AES_cbc_encrypt(
+            buffer->data(), buffer->data(), buffer->size(),
+            &aes_key, aes_ivec, AES_DECRYPT);
+
+    // hexdump(buffer->data(), buffer->size());
+
+    size_t n = buffer->size();
+    CHECK_GT(n, 0u);
+
+    size_t pad = buffer->data()[n - 1];
+
+    CHECK_GT(pad, 0u);
+    CHECK_LE(pad, 16u);
+    CHECK_GE((size_t)n, pad);
+    for (size_t i = 0; i < pad; ++i) {
+        CHECK_EQ((unsigned)buffer->data()[n - 1 - i], pad);
+    }
+
+    n -= pad;
+
+    buffer->setRange(buffer->offset(), n);
+
+    return OK;
+}
+
+void PlaylistFetcher::postMonitorQueue(int64_t delayUs) {
+    sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
+    msg->setInt32("generation", mMonitorQueueGeneration);
+    msg->post(delayUs);
+}
+
+void PlaylistFetcher::cancelMonitorQueue() {
+    ++mMonitorQueueGeneration;
+}
+
+void PlaylistFetcher::startAsync(
+        const sp<AnotherPacketSource> &audioSource,
+        const sp<AnotherPacketSource> &videoSource,
+        const sp<AnotherPacketSource> &subtitleSource,
+        int64_t startTimeUs) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+
+    uint32_t streamTypeMask = 0ul;
+
+    if (audioSource != NULL) {
+        msg->setPointer("audioSource", audioSource.get());
+        streamTypeMask |= LiveSession::STREAMTYPE_AUDIO;
+    }
+
+    if (videoSource != NULL) {
+        msg->setPointer("videoSource", videoSource.get());
+        streamTypeMask |= LiveSession::STREAMTYPE_VIDEO;
+    }
+
+    if (subtitleSource != NULL) {
+        msg->setPointer("subtitleSource", subtitleSource.get());
+        streamTypeMask |= LiveSession::STREAMTYPE_SUBTITLES;
+    }
+
+    msg->setInt32("streamTypeMask", streamTypeMask);
+    msg->setInt64("startTimeUs", startTimeUs);
+    msg->post();
+}
+
+void PlaylistFetcher::pauseAsync() {
+    (new AMessage(kWhatPause, id()))->post();
+}
+
+void PlaylistFetcher::stopAsync() {
+    (new AMessage(kWhatStop, id()))->post();
+}
+
+void PlaylistFetcher::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatStart:
+        {
+            status_t err = onStart(msg);
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatStarted);
+            notify->setInt32("err", err);
+            notify->post();
+            break;
+        }
+
+        case kWhatPause:
+        {
+            onPause();
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatPaused);
+            notify->post();
+            break;
+        }
+
+        case kWhatStop:
+        {
+            onStop();
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatStopped);
+            notify->post();
+            break;
+        }
+
+        case kWhatMonitorQueue:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mMonitorQueueGeneration) {
+                // Stale event
+                break;
+            }
+
+            onMonitorQueue();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+status_t PlaylistFetcher::onStart(const sp<AMessage> &msg) {
+    mPacketSources.clear();
+
+    uint32_t streamTypeMask;
+    CHECK(msg->findInt32("streamTypeMask", (int32_t *)&streamTypeMask));
+
+    int64_t startTimeUs;
+    CHECK(msg->findInt64("startTimeUs", &startTimeUs));
+
+    if (streamTypeMask & LiveSession::STREAMTYPE_AUDIO) {
+        void *ptr;
+        CHECK(msg->findPointer("audioSource", &ptr));
+
+        mPacketSources.add(
+                LiveSession::STREAMTYPE_AUDIO,
+                static_cast<AnotherPacketSource *>(ptr));
+    }
+
+    if (streamTypeMask & LiveSession::STREAMTYPE_VIDEO) {
+        void *ptr;
+        CHECK(msg->findPointer("videoSource", &ptr));
+
+        mPacketSources.add(
+                LiveSession::STREAMTYPE_VIDEO,
+                static_cast<AnotherPacketSource *>(ptr));
+    }
+
+    if (streamTypeMask & LiveSession::STREAMTYPE_SUBTITLES) {
+        void *ptr;
+        CHECK(msg->findPointer("subtitleSource", &ptr));
+
+        mPacketSources.add(
+                LiveSession::STREAMTYPE_SUBTITLES,
+                static_cast<AnotherPacketSource *>(ptr));
+    }
+
+    mStreamTypeMask = streamTypeMask;
+    mStartTimeUs = startTimeUs;
+
+    if (mStartTimeUs >= 0ll) {
+        mSeqNumber = -1;
+        mStartup = true;
+    }
+
+    postMonitorQueue();
+
+    return OK;
+}
+
+void PlaylistFetcher::onPause() {
+    cancelMonitorQueue();
+
+    mPacketSources.clear();
+    mStreamTypeMask = 0;
+}
+
+void PlaylistFetcher::onStop() {
+    cancelMonitorQueue();
+
+    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+        mPacketSources.valueAt(i)->clear();
+    }
+
+    mPacketSources.clear();
+    mStreamTypeMask = 0;
+}
+
+void PlaylistFetcher::notifyError(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void PlaylistFetcher::queueDiscontinuity(
+        ATSParser::DiscontinuityType type, const sp<AMessage> &extra) {
+    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+        mPacketSources.valueAt(i)->queueDiscontinuity(type, extra);
+    }
+}
+
+void PlaylistFetcher::onMonitorQueue() {
+    bool downloadMore = false;
+
+    status_t finalResult;
+    if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) {
+        sp<AnotherPacketSource> packetSource =
+            mPacketSources.valueFor(LiveSession::STREAMTYPE_SUBTITLES);
+
+        downloadMore = packetSource->hasBufferAvailable(&finalResult);
+    } else {
+        bool first = true;
+        int64_t minBufferedDurationUs = 0ll;
+
+        for (size_t i = 0; i < mPacketSources.size(); ++i) {
+            if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0) {
+                continue;
+            }
+
+            int64_t bufferedDurationUs =
+                mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult);
+
+            if (first || bufferedDurationUs < minBufferedDurationUs) {
+                minBufferedDurationUs = bufferedDurationUs;
+                first = false;
+            }
+        }
+
+        downloadMore =
+            !first && (minBufferedDurationUs < kMinBufferedDurationUs);
+    }
+
+    if (finalResult == OK && downloadMore) {
+        onDownloadNext();
+    } else {
+        // Nothing to do yet, try again in a second.
+
+        sp<AMessage> msg = mNotify->dup();
+        msg->setInt32("what", kWhatTemporarilyDoneFetching);
+        msg->post();
+
+        postMonitorQueue(1000000ll);
+    }
+}
+
+void PlaylistFetcher::onDownloadNext() {
+    int64_t nowUs = ALooper::GetNowUs();
+
+    if (mLastPlaylistFetchTimeUs < 0ll
+            || (!mPlaylist->isComplete() && timeToRefreshPlaylist(nowUs))) {
+        bool unchanged;
+        sp<M3UParser> playlist = mSession->fetchPlaylist(
+                mURI.c_str(), mPlaylistHash, &unchanged);
+
+        if (playlist == NULL) {
+            if (unchanged) {
+                // We succeeded in fetching the playlist, but it was
+                // unchanged from the last time we tried.
+
+                if (mRefreshState != THIRD_UNCHANGED_RELOAD_ATTEMPT) {
+                    mRefreshState = (RefreshState)(mRefreshState + 1);
+                }
+            } else {
+                ALOGE("failed to load playlist at url '%s'", mURI.c_str());
+                notifyError(ERROR_IO);
+                return;
+            }
+        } else {
+            mRefreshState = INITIAL_MINIMUM_RELOAD_DELAY;
+            mPlaylist = playlist;
+
+            if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+                updateDuration();
+            }
+        }
+
+        mLastPlaylistFetchTimeUs = ALooper::GetNowUs();
+    }
+
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    bool seekDiscontinuity = false;
+    bool explicitDiscontinuity = false;
+
+    const int32_t lastSeqNumberInPlaylist =
+        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+
+    if (mSeqNumber < 0) {
+        CHECK_GE(mStartTimeUs, 0ll);
+
+        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+            mSeqNumber = getSeqNumberForTime(mStartTimeUs);
+        } else {
+            // If this is a live session, start 3 segments from the end.
+            mSeqNumber = lastSeqNumberInPlaylist - 3;
+            if (mSeqNumber < firstSeqNumberInPlaylist) {
+                mSeqNumber = firstSeqNumberInPlaylist;
+            }
+        }
+
+        mStartTimeUs = -1ll;
+    }
+
+    if (mSeqNumber < firstSeqNumberInPlaylist
+            || mSeqNumber > lastSeqNumberInPlaylist) {
+        if (!mPlaylist->isComplete() && mNumRetries < kMaxNumRetries) {
+            ++mNumRetries;
+
+            if (mSeqNumber > lastSeqNumberInPlaylist) {
+                mLastPlaylistFetchTimeUs = -1;
+                postMonitorQueue(3000000ll);
+                return;
+            }
+
+            // we've missed the boat, let's start from the lowest sequence
+            // number available and signal a discontinuity.
+
+            ALOGI("We've missed the boat, restarting playback.");
+            mSeqNumber = lastSeqNumberInPlaylist;
+            explicitDiscontinuity = true;
+
+            // fall through
+        } else {
+            ALOGE("Cannot find sequence number %d in playlist "
+                 "(contains %d - %d)",
+                 mSeqNumber, firstSeqNumberInPlaylist,
+                 firstSeqNumberInPlaylist + mPlaylist->size() - 1);
+
+            notifyError(ERROR_END_OF_STREAM);
+            return;
+        }
+    }
+
+    mNumRetries = 0;
+
+    AString uri;
+    sp<AMessage> itemMeta;
+    CHECK(mPlaylist->itemAt(
+                mSeqNumber - firstSeqNumberInPlaylist,
+                &uri,
+                &itemMeta));
+
+    int32_t val;
+    if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
+        explicitDiscontinuity = true;
+    }
+
+    int64_t range_offset, range_length;
+    if (!itemMeta->findInt64("range-offset", &range_offset)
+            || !itemMeta->findInt64("range-length", &range_length)) {
+        range_offset = 0;
+        range_length = -1;
+    }
+
+    ALOGV("fetching segment %d from (%d .. %d)",
+          mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
+
+    ALOGV("fetching '%s'", uri.c_str());
+
+    sp<ABuffer> buffer;
+    status_t err = mSession->fetchFile(
+            uri.c_str(), &buffer, range_offset, range_length);
+
+    if (err != OK) {
+        ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
+        notifyError(err);
+        return;
+    }
+
+    CHECK(buffer != NULL);
+
+    err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, buffer);
+
+    if (err != OK) {
+        ALOGE("decryptBuffer failed w/ error %d", err);
+
+        notifyError(err);
+        return;
+    }
+
+    if (mStartup || seekDiscontinuity || explicitDiscontinuity) {
+        // Signal discontinuity.
+
+        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+            // If this was a live event this made no sense since
+            // we don't have access to all the segment before the current
+            // one.
+            mNextPTSTimeUs = getSegmentStartTimeUs(mSeqNumber);
+        }
+
+        if (seekDiscontinuity || explicitDiscontinuity) {
+            ALOGI("queueing discontinuity (seek=%d, explicit=%d)",
+                 seekDiscontinuity, explicitDiscontinuity);
+
+            queueDiscontinuity(
+                    explicitDiscontinuity
+                        ? ATSParser::DISCONTINUITY_FORMATCHANGE
+                        : ATSParser::DISCONTINUITY_SEEK,
+                    NULL /* extra */);
+        }
+    }
+
+    err = extractAndQueueAccessUnits(buffer);
+
+    if (err != OK) {
+        notifyError(err);
+        return;
+    }
+
+    ++mSeqNumber;
+
+    postMonitorQueue();
+
+    mStartup = false;
+}
+
+int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    size_t index = 0;
+    int64_t segmentStartUs = 0;
+    while (index < mPlaylist->size()) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt(
+                    index, NULL /* uri */, &itemMeta));
+
+        int64_t itemDurationUs;
+        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+        if (timeUs < segmentStartUs + itemDurationUs) {
+            break;
+        }
+
+        segmentStartUs += itemDurationUs;
+        ++index;
+    }
+
+    if (index >= mPlaylist->size()) {
+        index = mPlaylist->size() - 1;
+    }
+
+    return firstSeqNumberInPlaylist + index;
+}
+
+status_t PlaylistFetcher::extractAndQueueAccessUnits(
+        const sp<ABuffer> &buffer) {
+    if (buffer->size() > 0 && buffer->data()[0] == 0x47) {
+        // Let's assume this is an MPEG2 transport stream.
+
+        if ((buffer->size() % 188) != 0) {
+            ALOGE("MPEG2 transport stream is not an even multiple of 188 "
+                  "bytes in length.");
+            return ERROR_MALFORMED;
+        }
+
+        if (mTSParser == NULL) {
+            mTSParser = new ATSParser;
+        }
+
+        if (mNextPTSTimeUs >= 0ll) {
+            sp<AMessage> extra = new AMessage;
+            extra->setInt64(IStreamListener::kKeyMediaTimeUs, mNextPTSTimeUs);
+
+            mTSParser->signalDiscontinuity(
+                    ATSParser::DISCONTINUITY_SEEK, extra);
+
+            mNextPTSTimeUs = -1ll;
+        }
+
+        size_t offset = 0;
+        while (offset < buffer->size()) {
+            status_t err = mTSParser->feedTSPacket(buffer->data() + offset, 188);
+
+            if (err != OK) {
+                return err;
+            }
+
+            offset += 188;
+        }
+
+        for (size_t i = mPacketSources.size(); i-- > 0;) {
+            sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
+
+            ATSParser::SourceType type;
+            switch (mPacketSources.keyAt(i)) {
+                case LiveSession::STREAMTYPE_VIDEO:
+                    type = ATSParser::VIDEO;
+                    break;
+
+                case LiveSession::STREAMTYPE_AUDIO:
+                    type = ATSParser::AUDIO;
+                    break;
+
+                case LiveSession::STREAMTYPE_SUBTITLES:
+                {
+                    ALOGE("MPEG2 Transport streams do not contain subtitles.");
+                    return ERROR_MALFORMED;
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+
+            sp<AnotherPacketSource> source =
+                static_cast<AnotherPacketSource *>(
+                        mTSParser->getSource(type).get());
+
+            if (source == NULL) {
+                ALOGW("MPEG2 Transport stream does not contain %s data.",
+                      type == ATSParser::VIDEO ? "video" : "audio");
+
+                mStreamTypeMask &= ~mPacketSources.keyAt(i);
+                mPacketSources.removeItemsAt(i);
+                continue;
+            }
+
+            sp<ABuffer> accessUnit;
+            status_t finalResult;
+            while (source->hasBufferAvailable(&finalResult)
+                    && source->dequeueAccessUnit(&accessUnit) == OK) {
+                // Note that we do NOT dequeue any discontinuities.
+
+                packetSource->queueAccessUnit(accessUnit);
+            }
+
+            if (packetSource->getFormat() == NULL) {
+                packetSource->setFormat(source->getFormat());
+            }
+        }
+
+        return OK;
+    } else if (buffer->size() >= 7 && !memcmp("WEBVTT\n", buffer->data(), 7)) {
+        if (mStreamTypeMask != LiveSession::STREAMTYPE_SUBTITLES) {
+            ALOGE("This stream only contains subtitles.");
+            return ERROR_MALFORMED;
+        }
+
+        const sp<AnotherPacketSource> packetSource =
+            mPacketSources.valueFor(LiveSession::STREAMTYPE_SUBTITLES);
+
+        buffer->meta()->setInt64("timeUs", 0ll);
+
+        packetSource->queueAccessUnit(buffer);
+        return OK;
+    }
+
+    if (mNextPTSTimeUs >= 0ll) {
+        mFirstPTSValid = false;
+        mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
+        mNextPTSTimeUs = -1ll;
+    }
+
+    // This better be an ISO 13818-7 (AAC) or ISO 13818-1 (MPEG) audio
+    // stream prefixed by an ID3 tag.
+
+    bool firstID3Tag = true;
+    uint64_t PTS = 0;
+
+    for (;;) {
+        // Make sure to skip all ID3 tags preceding the audio data.
+        // At least one must be present to provide the PTS timestamp.
+
+        ID3 id3(buffer->data(), buffer->size(), true /* ignoreV1 */);
+        if (!id3.isValid()) {
+            if (firstID3Tag) {
+                ALOGE("Unable to parse ID3 tag.");
+                return ERROR_MALFORMED;
+            } else {
+                break;
+            }
+        }
+
+        if (firstID3Tag) {
+            bool found = false;
+
+            ID3::Iterator it(id3, "PRIV");
+            while (!it.done()) {
+                size_t length;
+                const uint8_t *data = it.getData(&length);
+
+                static const char *kMatchName =
+                    "com.apple.streaming.transportStreamTimestamp";
+                static const size_t kMatchNameLen = strlen(kMatchName);
+
+                if (length == kMatchNameLen + 1 + 8
+                        && !strncmp((const char *)data, kMatchName, kMatchNameLen)) {
+                    found = true;
+                    PTS = U64_AT(&data[kMatchNameLen + 1]);
+                }
+
+                it.next();
+            }
+
+            if (!found) {
+                ALOGE("Unable to extract transportStreamTimestamp from ID3 tag.");
+                return ERROR_MALFORMED;
+            }
+        }
+
+        // skip the ID3 tag
+        buffer->setRange(
+                buffer->offset() + id3.rawSize(), buffer->size() - id3.rawSize());
+
+        firstID3Tag = false;
+    }
+
+    if (!mFirstPTSValid) {
+        mFirstPTSValid = true;
+        mFirstPTS = PTS;
+    }
+    PTS -= mFirstPTS;
+
+    int64_t timeUs = (PTS * 100ll) / 9ll + mAbsoluteTimeAnchorUs;
+
+    if (mStreamTypeMask != LiveSession::STREAMTYPE_AUDIO) {
+        ALOGW("This stream only contains audio data!");
+
+        mStreamTypeMask &= LiveSession::STREAMTYPE_AUDIO;
+
+        if (mStreamTypeMask == 0) {
+            return OK;
+        }
+    }
+
+    sp<AnotherPacketSource> packetSource =
+        mPacketSources.valueFor(LiveSession::STREAMTYPE_AUDIO);
+
+    if (packetSource->getFormat() == NULL && buffer->size() >= 7) {
+        ABitReader bits(buffer->data(), buffer->size());
+
+        // adts_fixed_header
+
+        CHECK_EQ(bits.getBits(12), 0xfffu);
+        bits.skipBits(3);  // ID, layer
+        bool protection_absent = bits.getBits(1) != 0;
+
+        unsigned profile = bits.getBits(2);
+        CHECK_NE(profile, 3u);
+        unsigned sampling_freq_index = bits.getBits(4);
+        bits.getBits(1);  // private_bit
+        unsigned channel_configuration = bits.getBits(3);
+        CHECK_NE(channel_configuration, 0u);
+        bits.skipBits(2);  // original_copy, home
+
+        sp<MetaData> meta = MakeAACCodecSpecificData(
+                profile, sampling_freq_index, channel_configuration);
+
+        meta->setInt32(kKeyIsADTS, true);
+
+        packetSource->setFormat(meta);
+    }
+
+    int64_t numSamples = 0ll;
+    int32_t sampleRate;
+    CHECK(packetSource->getFormat()->findInt32(kKeySampleRate, &sampleRate));
+
+    size_t offset = 0;
+    while (offset < buffer->size()) {
+        const uint8_t *adtsHeader = buffer->data() + offset;
+        CHECK_LT(offset + 5, buffer->size());
+
+        unsigned aac_frame_length =
+            ((adtsHeader[3] & 3) << 11)
+            | (adtsHeader[4] << 3)
+            | (adtsHeader[5] >> 5);
+
+        CHECK_LE(offset + aac_frame_length, buffer->size());
+
+        sp<ABuffer> unit = new ABuffer(aac_frame_length);
+        memcpy(unit->data(), adtsHeader, aac_frame_length);
+
+        int64_t unitTimeUs = timeUs + numSamples * 1000000ll / sampleRate;
+        unit->meta()->setInt64("timeUs", unitTimeUs);
+
+        // Each AAC frame encodes 1024 samples.
+        numSamples += 1024;
+
+        packetSource->queueAccessUnit(unit);
+
+        offset += aac_frame_length;
+    }
+
+    return OK;
+}
+
+void PlaylistFetcher::updateDuration() {
+    int64_t durationUs = 0ll;
+    for (size_t index = 0; index < mPlaylist->size(); ++index) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt(
+                    index, NULL /* uri */, &itemMeta));
+
+        int64_t itemDurationUs;
+        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+        durationUs += itemDurationUs;
+    }
+
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatDurationUpdate);
+    msg->setInt64("durationUs", durationUs);
+    msg->post();
+}
+
+}  // namespace android
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
new file mode 100644
index 0000000..5a2b901
--- /dev/null
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PLAYLIST_FETCHER_H_
+
+#define PLAYLIST_FETCHER_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include "mpeg2ts/ATSParser.h"
+#include "LiveSession.h"
+
+namespace android {
+
+struct ABuffer;
+struct AnotherPacketSource;
+struct DataSource;
+struct HTTPBase;
+struct LiveDataSource;
+struct M3UParser;
+struct String8;
+
+struct PlaylistFetcher : public AHandler {
+    enum {
+        kWhatStarted,
+        kWhatPaused,
+        kWhatStopped,
+        kWhatError,
+        kWhatDurationUpdate,
+        kWhatTemporarilyDoneFetching,
+        kWhatPrepared,
+        kWhatPreparationFailed,
+    };
+
+    PlaylistFetcher(
+            const sp<AMessage> &notify,
+            const sp<LiveSession> &session,
+            const char *uri);
+
+    sp<DataSource> getDataSource();
+
+    void startAsync(
+            const sp<AnotherPacketSource> &audioSource,
+            const sp<AnotherPacketSource> &videoSource,
+            const sp<AnotherPacketSource> &subtitleSource,
+            int64_t startTimeUs = -1ll);
+
+    void pauseAsync();
+
+    void stopAsync();
+
+protected:
+    virtual ~PlaylistFetcher();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kMaxNumRetries         = 5,
+    };
+
+    enum {
+        kWhatStart          = 'strt',
+        kWhatPause          = 'paus',
+        kWhatStop           = 'stop',
+        kWhatMonitorQueue   = 'moni',
+    };
+
+    static const int64_t kMinBufferedDurationUs;
+
+    sp<AMessage> mNotify;
+    sp<LiveSession> mSession;
+    AString mURI;
+
+    uint32_t mStreamTypeMask;
+    int64_t mStartTimeUs;
+
+    KeyedVector<LiveSession::StreamType, sp<AnotherPacketSource> >
+        mPacketSources;
+
+    KeyedVector<AString, sp<ABuffer> > mAESKeyForURI;
+
+    int64_t mLastPlaylistFetchTimeUs;
+    sp<M3UParser> mPlaylist;
+    int32_t mSeqNumber;
+    int32_t mNumRetries;
+    bool mStartup;
+    int64_t mNextPTSTimeUs;
+
+    int32_t mMonitorQueueGeneration;
+
+    enum RefreshState {
+        INITIAL_MINIMUM_RELOAD_DELAY,
+        FIRST_UNCHANGED_RELOAD_ATTEMPT,
+        SECOND_UNCHANGED_RELOAD_ATTEMPT,
+        THIRD_UNCHANGED_RELOAD_ATTEMPT
+    };
+    RefreshState mRefreshState;
+
+    uint8_t mPlaylistHash[16];
+
+    sp<ATSParser> mTSParser;
+
+    bool mFirstPTSValid;
+    uint64_t mFirstPTS;
+    int64_t mAbsoluteTimeAnchorUs;
+
+    status_t decryptBuffer(
+            size_t playlistIndex, const sp<ABuffer> &buffer);
+
+    void postMonitorQueue(int64_t delayUs = 0);
+    void cancelMonitorQueue();
+
+    bool timeToRefreshPlaylist(int64_t nowUs) const;
+
+    // Returns the media time in us of the segment specified by seqNumber.
+    // This is computed by summing the durations of all segments before it.
+    int64_t getSegmentStartTimeUs(int32_t seqNumber) const;
+
+    status_t onStart(const sp<AMessage> &msg);
+    void onPause();
+    void onStop();
+    void onMonitorQueue();
+    void onDownloadNext();
+
+    status_t extractAndQueueAccessUnits(const sp<ABuffer> &buffer);
+
+    void notifyError(status_t err);
+
+    void queueDiscontinuity(
+            ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
+
+    int32_t getSeqNumberForTime(int64_t timeUs) const;
+
+    void updateDuration();
+
+    DISALLOW_EVIL_CONSTRUCTORS(PlaylistFetcher);
+};
+
+}  // namespace android
+
+#endif  // PLAYLIST_FETCHER_H_
+
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index 80a1a3a..bf6f7bb 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -21,7 +21,7 @@
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_id3
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE := testid3
 
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 22c2f5a..34d671a 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -30,12 +30,55 @@
 
 static const size_t kMaxMetadataSize = 3 * 1024 * 1024;
 
+struct MemorySource : public DataSource {
+    MemorySource(const uint8_t *data, size_t size)
+        : mData(data),
+          mSize(size) {
+    }
+
+    virtual status_t initCheck() const {
+        return OK;
+    }
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
+        off64_t available = (offset >= mSize) ? 0ll : mSize - offset;
+
+        size_t copy = (available > size) ? size : available;
+        memcpy(data, mData + offset, copy);
+
+        return copy;
+    }
+
+private:
+    const uint8_t *mData;
+    size_t mSize;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MemorySource);
+};
+
 ID3::ID3(const sp<DataSource> &source, bool ignoreV1)
     : mIsValid(false),
       mData(NULL),
       mSize(0),
       mFirstFrameOffset(0),
-      mVersion(ID3_UNKNOWN) {
+      mVersion(ID3_UNKNOWN),
+      mRawSize(0) {
+    mIsValid = parseV2(source);
+
+    if (!mIsValid && !ignoreV1) {
+        mIsValid = parseV1(source);
+    }
+}
+
+ID3::ID3(const uint8_t *data, size_t size, bool ignoreV1)
+    : mIsValid(false),
+      mData(NULL),
+      mSize(0),
+      mFirstFrameOffset(0),
+      mVersion(ID3_UNKNOWN),
+      mRawSize(0) {
+    sp<MemorySource> source = new MemorySource(data, size);
+
     mIsValid = parseV2(source);
 
     if (!mIsValid && !ignoreV1) {
@@ -140,6 +183,7 @@
     }
 
     mSize = size;
+    mRawSize = mSize + sizeof(header);
 
     if (source->readAt(sizeof(header), mData, mSize) != (ssize_t)mSize) {
         free(mData);
@@ -313,17 +357,22 @@
         }
 
         if (flags & 2) {
-            // Unsynchronization added.
+            // This file has "unsynchronization", so we have to replace occurrences
+            // of 0xff 0x00 with just 0xff in order to get the real data.
 
+            size_t readOffset = offset + 11;
+            size_t writeOffset = offset + 11;
             for (size_t i = 0; i + 1 < dataSize; ++i) {
-                if (mData[offset + 10 + i] == 0xff
-                        && mData[offset + 11 + i] == 0x00) {
-                    memmove(&mData[offset + 11 + i], &mData[offset + 12 + i],
-                            mSize - offset - 12 - i);
+                if (mData[readOffset - 1] == 0xff
+                        && mData[readOffset] == 0x00) {
+                    ++readOffset;
                     --mSize;
                     --dataSize;
                 }
+                mData[writeOffset++] = mData[readOffset++];
             }
+            // move the remaining data following this frame
+            memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
 
             flags &= ~2;
         }
@@ -505,7 +554,7 @@
         int32_t i = n - 4;
         while(--i >= 0 && *++frameData != 0) ;
         int skipped = (frameData - mFrameData);
-        if (skipped >= n) {
+        if (skipped >= (int)n) {
             return;
         }
         n -= skipped;
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 2306f31..b001cf4 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -25,6 +25,7 @@
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/TimeSource.h>
+#include <media/stagefright/MetaData.h>
 #include <utils/threads.h>
 #include <drm/DrmManagerClient.h>
 
@@ -100,7 +101,7 @@
 
     void postAudioEOS(int64_t delayUs = 0ll);
     void postAudioSeekComplete();
-
+    void postAudioTearDown();
     status_t dump(int fd, const Vector<String16> &args) const;
 
 private:
@@ -168,9 +169,12 @@
     sp<AwesomeRenderer> mVideoRenderer;
     bool mVideoRenderingStarted;
     bool mVideoRendererIsPreview;
+    int32_t mMediaRenderingStartGeneration;
+    int32_t mStartGeneration;
 
     ssize_t mActiveAudioTrackIndex;
     sp<MediaSource> mAudioTrack;
+    sp<MediaSource> mOmxSource;
     sp<MediaSource> mAudioSource;
     AudioPlayer *mAudioPlayer;
     int64_t mDurationUs;
@@ -211,7 +215,8 @@
     bool mAudioStatusEventPending;
     sp<TimedEventQueue::Event> mVideoLagEvent;
     bool mVideoLagEventPending;
-
+    sp<TimedEventQueue::Event> mAudioTearDownEvent;
+    bool mAudioTearDownEventPending;
     sp<TimedEventQueue::Event> mAsyncPrepareEvent;
     Condition mPreparedCondition;
     bool mIsAsyncPrepare;
@@ -223,6 +228,8 @@
     void postStreamDoneEvent_l(status_t status);
     void postCheckAudioStatusEvent(int64_t delayUs);
     void postVideoLagEvent_l();
+    void postAudioTearDownEvent(int64_t delayUs);
+
     status_t play_l();
 
     MediaBuffer *mVideoBuffer;
@@ -257,6 +264,7 @@
     void setAudioSource(sp<MediaSource> source);
     status_t initAudioDecoder();
 
+
     void setVideoSource(sp<MediaSource> source);
     status_t initVideoDecoder(uint32_t flags = 0);
 
@@ -273,6 +281,9 @@
     void abortPrepare(status_t err);
     void finishAsyncPrepare_l();
     void onVideoLagUpdate();
+    void onAudioTearDownEvent();
+
+    void beginPrepareAsync_l();
 
     bool getCachedDuration_l(int64_t *durationUs, bool *eos);
 
@@ -285,6 +296,8 @@
     void finishSeekIfNecessary(int64_t videoTimeUs);
     void ensureCacheIsFetching_l();
 
+    void notifyIfMediaStarted_l();
+    void createAudioPlayer_l();
     status_t startAudioPlayer_l(bool sendErrorNotification = true);
 
     void shutdownVideoDecoder_l();
@@ -327,6 +340,9 @@
         Vector<TrackStat> mTracks;
     } mStats;
 
+    bool    mOffloadAudio;
+    bool    mAudioTearDown;
+
     status_t setVideoScalingMode(int32_t mode);
     status_t setVideoScalingMode_l(int32_t mode);
     status_t getTrackInfo(Parcel* reply) const;
diff --git a/media/libstagefright/include/ESDS.h b/media/libstagefright/include/ESDS.h
index 3a79951..2f40dae 100644
--- a/media/libstagefright/include/ESDS.h
+++ b/media/libstagefright/include/ESDS.h
@@ -33,6 +33,9 @@
 
     status_t getObjectTypeIndication(uint8_t *objectTypeIndication) const;
     status_t getCodecSpecificInfo(const void **data, size_t *size) const;
+    status_t getCodecSpecificOffset(size_t *offset, size_t *size) const;
+    status_t getBitRate(uint32_t *brateMax, uint32_t *brateAvg) const;
+    status_t getStreamType(uint8_t *streamType) const;
 
 private:
     enum {
@@ -49,6 +52,9 @@
     size_t mDecoderSpecificOffset;
     size_t mDecoderSpecificLength;
     uint8_t mObjectTypeIndication;
+    uint8_t mStreamType;
+    uint32_t mBitRateMax;
+    uint32_t mBitRateAvg;
 
     status_t skipDescriptorHeader(
             size_t offset, size_t size,
diff --git a/media/libstagefright/include/HTTPBase.h b/media/libstagefright/include/HTTPBase.h
index c2dc351..d4b7f9f 100644
--- a/media/libstagefright/include/HTTPBase.h
+++ b/media/libstagefright/include/HTTPBase.h
@@ -59,6 +59,9 @@
     static void RegisterSocketUserTag(int sockfd, uid_t uid, uint32_t kTag);
     static void UnRegisterSocketUserTag(int sockfd);
 
+    static void RegisterSocketUserMark(int sockfd, uid_t uid);
+    static void UnRegisterSocketUserMark(int sockfd);
+
 protected:
     void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
 
diff --git a/media/libstagefright/include/ID3.h b/media/libstagefright/include/ID3.h
index 3028f56..cca83ab 100644
--- a/media/libstagefright/include/ID3.h
+++ b/media/libstagefright/include/ID3.h
@@ -36,6 +36,7 @@
     };
 
     ID3(const sp<DataSource> &source, bool ignoreV1 = false);
+    ID3(const uint8_t *data, size_t size, bool ignoreV1 = false);
     ~ID3();
 
     bool isValid() const;
@@ -71,6 +72,8 @@
         Iterator &operator=(const Iterator &);
     };
 
+    size_t rawSize() const { return mRawSize; }
+
 private:
     bool mIsValid;
     uint8_t *mData;
@@ -78,6 +81,10 @@
     size_t mFirstFrameOffset;
     Version mVersion;
 
+    // size of the ID3 tag including header before any unsynchronization.
+    // only valid for IDV2+
+    size_t mRawSize;
+
     bool parseV1(const sp<DataSource> &source);
     bool parseV2(const sp<DataSource> &source);
     void removeUnsynchronization();
diff --git a/media/libstagefright/include/LiveSession.h b/media/libstagefright/include/LiveSession.h
deleted file mode 100644
index db44a33..0000000
--- a/media/libstagefright/include/LiveSession.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIVE_SESSION_H_
-
-#define LIVE_SESSION_H_
-
-#include <media/stagefright/foundation/AHandler.h>
-
-#include <utils/String8.h>
-
-namespace android {
-
-struct ABuffer;
-struct DataSource;
-struct LiveDataSource;
-struct M3UParser;
-struct HTTPBase;
-
-struct LiveSession : public AHandler {
-    enum Flags {
-        // Don't log any URLs.
-        kFlagIncognito = 1,
-    };
-    LiveSession(
-            const sp<AMessage> &notify,
-            uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
-
-    sp<DataSource> getDataSource();
-
-    void connect(
-            const char *url,
-            const KeyedVector<String8, String8> *headers = NULL);
-
-    void disconnect();
-
-    // Blocks until seek is complete.
-    void seekTo(int64_t timeUs);
-
-    status_t getDuration(int64_t *durationUs) const;
-
-    bool isSeekable() const;
-    bool hasDynamicDuration() const;
-
-    // Posted notification's "what" field will carry one of the following:
-    enum {
-        kWhatPrepared,
-        kWhatPreparationFailed,
-    };
-
-protected:
-    virtual ~LiveSession();
-
-    virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
-    enum {
-        kMaxNumQueuedFragments = 3,
-        kMaxNumRetries         = 5,
-    };
-
-    enum {
-        kWhatConnect        = 'conn',
-        kWhatDisconnect     = 'disc',
-        kWhatMonitorQueue   = 'moni',
-        kWhatSeek           = 'seek',
-    };
-
-    struct BandwidthItem {
-        AString mURI;
-        unsigned long mBandwidth;
-    };
-
-    sp<AMessage> mNotify;
-    uint32_t mFlags;
-    bool mUIDValid;
-    uid_t mUID;
-
-    bool mInPreparationPhase;
-
-    sp<LiveDataSource> mDataSource;
-
-    sp<HTTPBase> mHTTPDataSource;
-
-    AString mMasterURL;
-    KeyedVector<String8, String8> mExtraHeaders;
-
-    Vector<BandwidthItem> mBandwidthItems;
-
-    KeyedVector<AString, sp<ABuffer> > mAESKeyForURI;
-
-    ssize_t mPrevBandwidthIndex;
-    int64_t mLastPlaylistFetchTimeUs;
-    sp<M3UParser> mPlaylist;
-    int32_t mSeqNumber;
-    int64_t mSeekTimeUs;
-    int32_t mNumRetries;
-    bool mStartOfPlayback;
-
-    mutable Mutex mLock;
-    Condition mCondition;
-    int64_t mDurationUs;
-    bool mDurationFixed;  // Duration has been determined once and for all.
-    bool mSeekDone;
-    bool mDisconnectPending;
-
-    int32_t mMonitorQueueGeneration;
-
-    enum RefreshState {
-        INITIAL_MINIMUM_RELOAD_DELAY,
-        FIRST_UNCHANGED_RELOAD_ATTEMPT,
-        SECOND_UNCHANGED_RELOAD_ATTEMPT,
-        THIRD_UNCHANGED_RELOAD_ATTEMPT
-    };
-    RefreshState mRefreshState;
-
-    uint8_t mPlaylistHash[16];
-
-    void onConnect(const sp<AMessage> &msg);
-    void onDisconnect();
-    void onDownloadNext();
-    void onMonitorQueue();
-    void onSeek(const sp<AMessage> &msg);
-
-    status_t fetchFile(
-            const char *url, sp<ABuffer> *out,
-            int64_t range_offset = 0, int64_t range_length = -1);
-
-    sp<M3UParser> fetchPlaylist(const char *url, bool *unchanged);
-    size_t getBandwidthIndex();
-
-    status_t decryptBuffer(
-            size_t playlistIndex, const sp<ABuffer> &buffer);
-
-    void postMonitorQueue(int64_t delayUs = 0);
-
-    bool timeToRefreshPlaylist(int64_t nowUs) const;
-
-    static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
-
-    // Returns the media time in us of the segment specified by seqNumber.
-    // This is computed by summing the durations of all segments before it.
-    int64_t getSegmentStartTimeUs(int32_t seqNumber) const;
-
-    void signalEOS(status_t err);
-
-    DISALLOW_EVIL_CONSTRUCTORS(LiveSession);
-};
-
-}  // namespace android
-
-#endif  // LIVE_SESSION_H_
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index fe74a42..c5e86a6 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -31,7 +31,6 @@
 struct DataSource;
 struct MPEG2TSSource;
 struct String8;
-struct LiveSession;
 
 struct MPEG2TSExtractor : public MediaExtractor {
     MPEG2TSExtractor(const sp<DataSource> &source);
@@ -44,16 +43,12 @@
 
     virtual uint32_t flags() const;
 
-    void setLiveSession(const sp<LiveSession> &liveSession);
-    void seekTo(int64_t seekTimeUs);
-
 private:
     friend struct MPEG2TSSource;
 
     mutable Mutex mLock;
 
     sp<DataSource> mDataSource;
-    sp<LiveSession> mLiveSession;
 
     sp<ATSParser> mParser;
 
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 35eff96..bbec1c4 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -82,6 +82,7 @@
     sp<DataSource> mDataSource;
     status_t mInitCheck;
     bool mHasVideo;
+    uint32_t mHeaderTimescale;
 
     Track *mFirstTrack, *mLastTrack;
 
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 24b8d98..7e53af3 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -79,6 +79,10 @@
             node_id node, OMX_U32 port_index,
             const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
 
+    virtual status_t updateGraphicBufferInMeta(
+            node_id node, OMX_U32 port_index,
+            const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
+
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
             sp<IGraphicBufferProducer> *bufferProducer);
@@ -109,6 +113,13 @@
             const char *parameter_name,
             OMX_INDEXTYPE *index);
 
+    virtual status_t setInternalOption(
+            node_id node,
+            OMX_U32 port_index,
+            InternalOptionType type,
+            const void *data,
+            size_t size);
+
     virtual void binderDied(const wp<IBinder> &the_late_who);
 
     OMX_ERRORTYPE OnEvent(
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 67aba6b..ae498b4 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -66,6 +66,10 @@
             OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
             OMX::buffer_id *buffer);
 
+    status_t updateGraphicBufferInMeta(
+            OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+            OMX::buffer_id buffer);
+
     status_t createInputSurface(
             OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer);
 
@@ -96,6 +100,12 @@
     status_t getExtensionIndex(
             const char *parameterName, OMX_INDEXTYPE *index);
 
+    status_t setInternalOption(
+            OMX_U32 portIndex,
+            IOMX::InternalOptionType type,
+            const void *data,
+            size_t size);
+
     void onMessage(const omx_message &msg);
     void onObserverDied(OMXMaster *master);
     void onGetHandleFailed();
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
new file mode 100644
index 0000000..d050fa6
--- /dev/null
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VIDEO_DECODER_OMX_COMPONENT_H_
+
+#define SOFT_VIDEO_DECODER_OMX_COMPONENT_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/IOMX.h>
+
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include <utils/Vector.h>
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
+
+namespace android {
+
+struct SoftVideoDecoderOMXComponent : public SimpleSoftOMXComponent {
+    SoftVideoDecoderOMXComponent(
+            const char *name,
+            const char *componentRole,
+            OMX_VIDEO_CODINGTYPE codingType,
+            const CodecProfileLevel *profileLevels,
+            size_t numProfileLevels,
+            int32_t width,
+            int32_t height,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component);
+
+protected:
+    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
+
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual OMX_ERRORTYPE getConfig(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    void initPorts(OMX_U32 numInputBuffers,
+            OMX_U32 inputBufferSize,
+            OMX_U32 numOutputBuffers,
+            const char *mimeType);
+
+    virtual void updatePortDefinitions();
+
+    enum {
+        kInputPortIndex  = 0,
+        kOutputPortIndex = 1,
+        kMaxPortIndex = 1,
+    };
+
+    uint32_t mWidth, mHeight;
+    uint32_t mCropLeft, mCropTop, mCropWidth, mCropHeight;
+
+    enum {
+        NONE,
+        AWAITING_DISABLED,
+        AWAITING_ENABLED
+    } mOutputPortSettingsChange;
+
+private:
+    const char *mComponentRole;
+    OMX_VIDEO_CODINGTYPE mCodingType;
+    const CodecProfileLevel *mProfileLevels;
+    size_t mNumProfileLevels;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftVideoDecoderOMXComponent);
+};
+
+}  // namespace android
+
+#endif  // SOFT_VIDEO_DECODER_OMX_COMPONENT_H_
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index b304749..d260d0f 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -870,7 +870,9 @@
                         continue;
                     }
                 } else if (!strcmp("V_VP8", codecID)) {
-                    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VPX);
+                    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP8);
+                } else if (!strcmp("V_VP9", codecID)) {
+                    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP9);
                 } else {
                     ALOGW("%s is not supported.", codecID);
                     continue;
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 3de3a61..3153c8b 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -32,9 +32,22 @@
 
 AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta)
     : mIsAudio(false),
-      mFormat(meta),
+      mFormat(NULL),
       mLastQueuedTimeUs(0),
       mEOSResult(OK) {
+    setFormat(meta);
+}
+
+void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
+    CHECK(mFormat == NULL);
+
+    mIsAudio = false;
+
+    if (meta == NULL) {
+        return;
+    }
+
+    mFormat = meta;
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
 
@@ -45,11 +58,6 @@
     }
 }
 
-void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
-    CHECK(mFormat == NULL);
-    mFormat = meta;
-}
-
 AnotherPacketSource::~AnotherPacketSource() {
 }
 
@@ -152,6 +160,15 @@
     mCondition.signal();
 }
 
+void AnotherPacketSource::clear() {
+    Mutex::Autolock autoLock(mLock);
+
+    mBuffers.clear();
+    mEOSResult = OK;
+
+    mFormat = NULL;
+}
+
 void AnotherPacketSource::queueDiscontinuity(
         ATSParser::DiscontinuityType type,
         const sp<AMessage> &extra) {
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 1db4068..e16cf78 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -41,6 +41,8 @@
     virtual status_t read(
             MediaBuffer **buffer, const ReadOptions *options = NULL);
 
+    void clear();
+
     bool hasBufferAvailable(status_t *finalResult);
 
     // Returns the difference between the last and the first queued
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index e1589b4..d449c34 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -19,7 +19,6 @@
 #include <utils/Log.h>
 
 #include "include/MPEG2TSExtractor.h"
-#include "include/LiveSession.h"
 #include "include/NuCachedSource2.h"
 
 #include <media/stagefright/foundation/ADebug.h>
@@ -79,15 +78,7 @@
 }
 
 sp<MetaData> MPEG2TSSource::getFormat() {
-    sp<MetaData> meta = mImpl->getFormat();
-
-    int64_t durationUs;
-    if (mExtractor->mLiveSession != NULL
-            && mExtractor->mLiveSession->getDuration(&durationUs) == OK) {
-        meta->setInt64(kKeyDuration, durationUs);
-    }
-
-    return meta;
+    return mImpl->getFormat();
 }
 
 status_t MPEG2TSSource::read(
@@ -97,7 +88,7 @@
     int64_t seekTimeUs;
     ReadOptions::SeekMode seekMode;
     if (mSeekable && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
-        mExtractor->seekTo(seekTimeUs);
+        return ERROR_UNSUPPORTED;
     }
 
     status_t finalResult;
@@ -216,32 +207,8 @@
     return mParser->feedTSPacket(packet, kTSPacketSize);
 }
 
-void MPEG2TSExtractor::setLiveSession(const sp<LiveSession> &liveSession) {
-    Mutex::Autolock autoLock(mLock);
-
-    mLiveSession = liveSession;
-}
-
-void MPEG2TSExtractor::seekTo(int64_t seekTimeUs) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mLiveSession == NULL) {
-        return;
-    }
-
-    mLiveSession->seekTo(seekTimeUs);
-}
-
 uint32_t MPEG2TSExtractor::flags() const {
-    Mutex::Autolock autoLock(mLock);
-
-    uint32_t flags = CAN_PAUSE;
-
-    if (mLiveSession != NULL && mLiveSession->isSeekable()) {
-        flags |= CAN_SEEK_FORWARD | CAN_SEEK_BACKWARD | CAN_SEEK;
-    }
-
-    return flags;
+    return CAN_PAUSE;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index a8b4939..cd912e7 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -9,6 +9,7 @@
         SimpleSoftOMXComponent.cpp    \
         SoftOMXComponent.cpp          \
         SoftOMXPlugin.cpp             \
+        SoftVideoDecoderOMXComponent.cpp \
 
 LOCAL_C_INCLUDES += \
         $(TOP)/frameworks/av/media/libstagefright \
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index ef27879..cf43e94 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -18,12 +18,13 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
-#include <GraphicBufferSource.h>
+#include "GraphicBufferSource.h"
 
 #include <OMX_Core.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
 #include <ui/GraphicBuffer.h>
 
 namespace android {
@@ -36,9 +37,16 @@
     mInitCheck(UNKNOWN_ERROR),
     mNodeInstance(nodeInstance),
     mExecuting(false),
+    mSuspended(false),
     mNumFramesAvailable(0),
     mEndOfStream(false),
-    mEndOfStreamSent(false) {
+    mEndOfStreamSent(false),
+    mRepeatAfterUs(-1ll),
+    mRepeatLastFrameGeneration(0),
+    mLatestSubmittedBufferId(-1),
+    mLatestSubmittedBufferFrameNum(0),
+    mLatestSubmittedBufferUseCount(0),
+    mRepeatBufferDeferred(false) {
 
     ALOGV("GraphicBufferSource w=%u h=%u c=%u",
             bufferWidth, bufferHeight, bufferCount);
@@ -51,10 +59,9 @@
 
     String8 name("GraphicBufferSource");
 
-    mBufferQueue = new BufferQueue(true);
+    mBufferQueue = new BufferQueue();
     mBufferQueue->setConsumerName(name);
     mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mBufferQueue->setSynchronousMode(true);
     mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
             GRALLOC_USAGE_HW_TEXTURE);
 
@@ -69,13 +76,10 @@
     // reference once the ctor ends, as that would cause the refcount of 'this'
     // dropping to 0 at the end of the ctor.  Since all we need is a wp<...>
     // that's what we create.
-    wp<BufferQueue::ConsumerListener> listener;
-    listener = static_cast<BufferQueue::ConsumerListener*>(this);
+    wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
+    sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
 
-    sp<BufferQueue::ConsumerListener> proxy;
-    proxy = new BufferQueue::ProxyConsumerListener(listener);
-
-    mInitCheck = mBufferQueue->consumerConnect(proxy);
+    mInitCheck = mBufferQueue->consumerConnect(proxy, false);
     if (mInitCheck != NO_ERROR) {
         ALOGE("Error connecting to BufferQueue: %s (%d)",
                 strerror(-mInitCheck), mInitCheck);
@@ -126,14 +130,40 @@
     if (mEndOfStream && mNumFramesAvailable == 0) {
         submitEndOfInputStream_l();
     }
+
+    if (mRepeatAfterUs > 0ll && mLooper == NULL) {
+        mReflector = new AHandlerReflector<GraphicBufferSource>(this);
+
+        mLooper = new ALooper;
+        mLooper->registerHandler(mReflector);
+        mLooper->start();
+
+        if (mLatestSubmittedBufferId >= 0) {
+            sp<AMessage> msg =
+                new AMessage(kWhatRepeatLastFrame, mReflector->id());
+
+            msg->setInt32("generation", ++mRepeatLastFrameGeneration);
+            msg->post(mRepeatAfterUs);
+        }
+    }
 }
 
 void GraphicBufferSource::omxLoaded(){
     Mutex::Autolock autoLock(mMutex);
-    ALOGV("--> loaded");
-    CHECK(mExecuting);
+    if (!mExecuting) {
+        // This can happen if something failed very early.
+        ALOGW("Dropped back down to Loaded without Executing");
+    }
 
-    ALOGV("Dropped down to loaded, avail=%d eos=%d eosSent=%d",
+    if (mLooper != NULL) {
+        mLooper->unregisterHandler(mReflector->id());
+        mReflector.clear();
+
+        mLooper->stop();
+        mLooper.clear();
+    }
+
+    ALOGV("--> loaded; avail=%d eos=%d eosSent=%d",
             mNumFramesAvailable, mEndOfStream, mEndOfStreamSent);
 
     // Codec is no longer executing.  Discard all codec-related state.
@@ -206,24 +236,19 @@
     // Find matching entry in our cached copy of the BufferQueue slots.
     // If we find a match, release that slot.  If we don't, the BufferQueue
     // has dropped that GraphicBuffer, and there's nothing for us to release.
-    //
-    // (We could store "id" in CodecBuffer and avoid the slot search.)
-    int id;
-    for (id = 0; id < BufferQueue::NUM_BUFFER_SLOTS; id++) {
-        if (mBufferSlot[id] == NULL) {
-            continue;
-        }
+    int id = codecBuffer.mBuf;
+    if (mBufferSlot[id] != NULL &&
+        mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
+        ALOGV("cbi %d matches bq slot %d, handle=%p",
+                cbi, id, mBufferSlot[id]->handle);
 
-        if (mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
-            ALOGV("cbi %d matches bq slot %d, handle=%p",
-                    cbi, id, mBufferSlot[id]->handle);
-
-            mBufferQueue->releaseBuffer(id, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
-                    Fence::NO_FENCE);
-            break;
+        if (id == mLatestSubmittedBufferId) {
+            CHECK_GT(mLatestSubmittedBufferUseCount--, 0);
+        } else {
+            mBufferQueue->releaseBuffer(id, codecBuffer.mFrameNumber,
+                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
         }
-    }
-    if (id == BufferQueue::NUM_BUFFER_SLOTS) {
+    } else {
         ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d",
                 cbi);
     }
@@ -242,13 +267,66 @@
         // send that.
         ALOGV("buffer freed, EOS pending");
         submitEndOfInputStream_l();
+    } else if (mRepeatBufferDeferred) {
+        bool success = repeatLatestSubmittedBuffer_l();
+        if (success) {
+            ALOGV("deferred repeatLatestSubmittedBuffer_l SUCCESS");
+        } else {
+            ALOGV("deferred repeatLatestSubmittedBuffer_l FAILURE");
+        }
+        mRepeatBufferDeferred = false;
     }
+
     return;
 }
 
+void GraphicBufferSource::suspend(bool suspend) {
+    Mutex::Autolock autoLock(mMutex);
+
+    if (suspend) {
+        mSuspended = true;
+
+        while (mNumFramesAvailable > 0) {
+            BufferQueue::BufferItem item;
+            status_t err = mBufferQueue->acquireBuffer(&item, 0);
+
+            if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+                // shouldn't happen.
+                ALOGW("suspend: frame was not available");
+                break;
+            } else if (err != OK) {
+                ALOGW("suspend: acquireBuffer returned err=%d", err);
+                break;
+            }
+
+            --mNumFramesAvailable;
+
+            mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+        }
+        return;
+    }
+
+    mSuspended = false;
+
+    if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
+        if (repeatLatestSubmittedBuffer_l()) {
+            ALOGV("suspend/deferred repeatLatestSubmittedBuffer_l SUCCESS");
+
+            mRepeatBufferDeferred = false;
+        } else {
+            ALOGV("suspend/deferred repeatLatestSubmittedBuffer_l FAILURE");
+        }
+    }
+}
+
 bool GraphicBufferSource::fillCodecBuffer_l() {
     CHECK(mExecuting && mNumFramesAvailable > 0);
 
+    if (mSuspended) {
+        return false;
+    }
+
     int cbi = findAvailableCodecBuffer_l();
     if (cbi < 0) {
         // No buffers available, bail.
@@ -260,7 +338,7 @@
     ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%d",
             mNumFramesAvailable);
     BufferQueue::BufferItem item;
-    status_t err = mBufferQueue->acquireBuffer(&item);
+    status_t err = mBufferQueue->acquireBuffer(&item, 0);
     if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
         // shouldn't happen
         ALOGW("fillCodecBuffer_l: frame was not available");
@@ -287,18 +365,75 @@
         mBufferSlot[item.mBuf] = item.mGraphicBuffer;
     }
 
-    err = submitBuffer_l(mBufferSlot[item.mBuf], item.mTimestamp / 1000, cbi);
+    err = submitBuffer_l(item, cbi);
     if (err != OK) {
         ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
-        mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY,
-                EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+        mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
     } else {
         ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi);
+        setLatestSubmittedBuffer_l(item);
     }
 
     return true;
 }
 
+bool GraphicBufferSource::repeatLatestSubmittedBuffer_l() {
+    CHECK(mExecuting && mNumFramesAvailable == 0);
+
+    if (mLatestSubmittedBufferId < 0 || mSuspended) {
+        return false;
+    }
+
+    int cbi = findAvailableCodecBuffer_l();
+    if (cbi < 0) {
+        // No buffers available, bail.
+        ALOGV("repeatLatestSubmittedBuffer_l: no codec buffers.");
+        return false;
+    }
+
+    BufferQueue::BufferItem item;
+    item.mBuf = mLatestSubmittedBufferId;
+    item.mFrameNumber = mLatestSubmittedBufferFrameNum;
+
+    status_t err = submitBuffer_l(item, cbi);
+
+    if (err != OK) {
+        return false;
+    }
+
+    ++mLatestSubmittedBufferUseCount;
+
+    return true;
+}
+
+void GraphicBufferSource::setLatestSubmittedBuffer_l(
+        const BufferQueue::BufferItem &item) {
+    ALOGV("setLatestSubmittedBuffer_l");
+
+    if (mLatestSubmittedBufferId >= 0) {
+        if (mLatestSubmittedBufferUseCount == 0) {
+            mBufferQueue->releaseBuffer(
+                    mLatestSubmittedBufferId,
+                    mLatestSubmittedBufferFrameNum,
+                    EGL_NO_DISPLAY,
+                    EGL_NO_SYNC_KHR,
+                    Fence::NO_FENCE);
+        }
+    }
+
+    mLatestSubmittedBufferId = item.mBuf;
+    mLatestSubmittedBufferFrameNum = item.mFrameNumber;
+    mLatestSubmittedBufferUseCount = 1;
+    mRepeatBufferDeferred = false;
+
+    if (mReflector != NULL) {
+        sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector->id());
+        msg->setInt32("generation", ++mRepeatLastFrameGeneration);
+        msg->post(mRepeatAfterUs);
+    }
+}
+
 status_t GraphicBufferSource::signalEndOfInputStream() {
     Mutex::Autolock autoLock(mMutex);
     ALOGV("signalEndOfInputStream: exec=%d avail=%d eos=%d",
@@ -326,11 +461,13 @@
     return OK;
 }
 
-status_t GraphicBufferSource::submitBuffer_l(sp<GraphicBuffer>& graphicBuffer,
-        int64_t timestampUsec, int cbi) {
+status_t GraphicBufferSource::submitBuffer_l(
+        const BufferQueue::BufferItem &item, int cbi) {
     ALOGV("submitBuffer_l cbi=%d", cbi);
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
-    codecBuffer.mGraphicBuffer = graphicBuffer;
+    codecBuffer.mGraphicBuffer = mBufferSlot[item.mBuf];
+    codecBuffer.mBuf = item.mBuf;
+    codecBuffer.mFrameNumber = item.mFrameNumber;
 
     OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
     CHECK(header->nAllocLen >= 4 + sizeof(buffer_handle_t));
@@ -342,7 +479,7 @@
 
     status_t err = mNodeInstance->emptyDirectBuffer(header, 0,
             4 + sizeof(buffer_handle_t), OMX_BUFFERFLAG_ENDOFFRAME,
-            timestampUsec);
+            item.mTimestamp / 1000);
     if (err != OK) {
         ALOGW("WARNING: emptyDirectBuffer failed: 0x%x", err);
         codecBuffer.mGraphicBuffer = NULL;
@@ -423,22 +560,30 @@
     ALOGV("onFrameAvailable exec=%d avail=%d",
             mExecuting, mNumFramesAvailable);
 
-    if (mEndOfStream) {
-        // This should only be possible if a new buffer was queued after
-        // EOS was signaled, i.e. the app is misbehaving.
-        ALOGW("onFrameAvailable: EOS is set, ignoring frame");
+    if (mEndOfStream || mSuspended) {
+        if (mEndOfStream) {
+            // This should only be possible if a new buffer was queued after
+            // EOS was signaled, i.e. the app is misbehaving.
+
+            ALOGW("onFrameAvailable: EOS is set, ignoring frame");
+        } else {
+            ALOGV("onFrameAvailable: suspended, ignoring frame");
+        }
 
         BufferQueue::BufferItem item;
-        status_t err = mBufferQueue->acquireBuffer(&item);
+        status_t err = mBufferQueue->acquireBuffer(&item, 0);
         if (err == OK) {
-            mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY,
-                EGL_NO_SYNC_KHR, item.mFence);
+            mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
         }
         return;
     }
 
     mNumFramesAvailable++;
 
+    mRepeatBufferDeferred = false;
+    ++mRepeatLastFrameGeneration;
+
     if (mExecuting) {
         fillCodecBuffer_l();
     }
@@ -464,4 +609,51 @@
     }
 }
 
+status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
+        int64_t repeatAfterUs) {
+    Mutex::Autolock autoLock(mMutex);
+
+    if (mExecuting || repeatAfterUs <= 0ll) {
+        return INVALID_OPERATION;
+    }
+
+    mRepeatAfterUs = repeatAfterUs;
+
+    return OK;
+}
+
+void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatRepeatLastFrame:
+        {
+            Mutex::Autolock autoLock(mMutex);
+
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mRepeatLastFrameGeneration) {
+                // stale
+                break;
+            }
+
+            if (!mExecuting || mNumFramesAvailable > 0) {
+                break;
+            }
+
+            bool success = repeatLatestSubmittedBuffer_l();
+
+            if (success) {
+                ALOGV("repeatLatestSubmittedBuffer_l SUCCESS");
+            } else {
+                ALOGV("repeatLatestSubmittedBuffer_l FAILURE");
+                mRepeatBufferDeferred = true;
+            }
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
 }  // namespace android
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 562d342..244a843 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -25,6 +25,8 @@
 #include <OMX_Core.h>
 #include "../include/OMXNodeInstance.h"
 #include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/foundation/ALooper.h>
 
 namespace android {
 
@@ -85,6 +87,19 @@
     // have a codec buffer ready, we just set the mEndOfStream flag.
     status_t signalEndOfInputStream();
 
+    // If suspend is true, all incoming buffers (including those currently
+    // in the BufferQueue) will be discarded until the suspension is lifted.
+    void suspend(bool suspend);
+
+    // Specifies the interval after which we requeue the buffer previously
+    // queued to the encoder. This is useful in the case of surface flinger
+    // providing the input surface if the resulting encoded stream is to
+    // be displayed "live". If we were not to push through the extra frame
+    // the decoder on the remote end would be unable to decode the latest frame.
+    // This API must be called before transitioning the encoder to "executing"
+    // state and once this behaviour is specified it cannot be reset.
+    status_t setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs);
+
 protected:
     // BufferQueue::ConsumerListener interface, called when a new frame of
     // data is available.  If we're executing and a codec buffer is
@@ -104,6 +119,13 @@
     // (mGraphicBuffer == NULL) or in use by the codec.
     struct CodecBuffer {
         OMX_BUFFERHEADERTYPE* mHeader;
+
+        // buffer producer's frame-number for buffer
+        uint64_t mFrameNumber;
+
+        // buffer producer's buffer slot for buffer
+        int mBuf;
+
         sp<GraphicBuffer> mGraphicBuffer;
     };
 
@@ -130,13 +152,15 @@
 
     // Marks the mCodecBuffers entry as in-use, copies the GraphicBuffer
     // reference into the codec buffer, and submits the data to the codec.
-    status_t submitBuffer_l(sp<GraphicBuffer>& graphicBuffer,
-            int64_t timestampUsec, int cbi);
+    status_t submitBuffer_l(const BufferQueue::BufferItem &item, int cbi);
 
     // Submits an empty buffer, with the EOS flag set.   Returns without
     // doing anything if we don't have a codec buffer available.
     void submitEndOfInputStream_l();
 
+    void setLatestSubmittedBuffer_l(const BufferQueue::BufferItem &item);
+    bool repeatLatestSubmittedBuffer_l();
+
     // Lock, covers all member variables.
     mutable Mutex mMutex;
 
@@ -149,6 +173,8 @@
     // Set by omxExecuting() / omxIdling().
     bool mExecuting;
 
+    bool mSuspended;
+
     // We consume graphic buffers from this.
     sp<BufferQueue> mBufferQueue;
 
@@ -169,6 +195,30 @@
     // Tracks codec buffers.
     Vector<CodecBuffer> mCodecBuffers;
 
+    ////
+    friend class AHandlerReflector<GraphicBufferSource>;
+
+    enum {
+        kWhatRepeatLastFrame,
+    };
+
+    int64_t mRepeatAfterUs;
+
+    sp<ALooper> mLooper;
+    sp<AHandlerReflector<GraphicBufferSource> > mReflector;
+
+    int32_t mRepeatLastFrameGeneration;
+
+    int mLatestSubmittedBufferId;
+    uint64_t mLatestSubmittedBufferFrameNum;
+    int32_t mLatestSubmittedBufferUseCount;
+
+    // The previously submitted buffer should've been repeated but
+    // no codec buffer was available at the time.
+    bool mRepeatBufferDeferred;
+
+    void onMessageReceived(const sp<AMessage> &msg);
+
     DISALLOW_EVIL_CONSTRUCTORS(GraphicBufferSource);
 };
 
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 3987ead..aaa9f89 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -345,6 +345,13 @@
             port_index, graphicBuffer, buffer);
 }
 
+status_t OMX::updateGraphicBufferInMeta(
+        node_id node, OMX_U32 port_index,
+        const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
+    return findInstance(node)->updateGraphicBufferInMeta(
+            port_index, graphicBuffer, buffer);
+}
+
 status_t OMX::createInputSurface(
         node_id node, OMX_U32 port_index,
         sp<IGraphicBufferProducer> *bufferProducer) {
@@ -396,6 +403,15 @@
             parameter_name, index);
 }
 
+status_t OMX::setInternalOption(
+        node_id node,
+        OMX_U32 port_index,
+        InternalOptionType type,
+        const void *data,
+        size_t size) {
+    return findInstance(node)->setInternalOption(port_index, type, data, size);
+}
+
 OMX_ERRORTYPE OMX::OnEvent(
         node_id node,
         OMX_IN OMX_EVENTTYPE eEvent,
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index a9eb94f..ef683a0 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -70,6 +70,10 @@
                header->nFilledLen);
     }
 
+    void setGraphicBuffer(const sp<GraphicBuffer> &graphicBuffer) {
+        mGraphicBuffer = graphicBuffer;
+    }
+
 private:
     sp<GraphicBuffer> mGraphicBuffer;
     sp<IMemory> mMem;
@@ -238,6 +242,18 @@
 
 status_t OMXNodeInstance::sendCommand(
         OMX_COMMANDTYPE cmd, OMX_S32 param) {
+    const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
+    if (bufferSource != NULL
+            && cmd == OMX_CommandStateSet
+            && param == OMX_StateLoaded) {
+        // Initiating transition from Executing -> Loaded
+        // Buffers are about to be freed.
+        bufferSource->omxLoaded();
+        setGraphicBufferSource(NULL);
+
+        // fall through
+    }
+
     Mutex::Autolock autoLock(mLock);
 
     OMX_ERRORTYPE err = OMX_SendCommand(mHandle, cmd, param, NULL);
@@ -554,6 +570,22 @@
     return OK;
 }
 
+status_t OMXNodeInstance::updateGraphicBufferInMeta(
+        OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
+        OMX::buffer_id buffer) {
+    Mutex::Autolock autoLock(mLock);
+
+    OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)(buffer);
+    VideoDecoderOutputMetaData *metadata =
+        (VideoDecoderOutputMetaData *)(header->pBuffer);
+    BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
+    bufferMeta->setGraphicBuffer(graphicBuffer);
+    metadata->eType = kMetadataBufferTypeGrallocSource;
+    metadata->pHandle = graphicBuffer->handle;
+
+    return OK;
+}
+
 status_t OMXNodeInstance::createInputSurface(
         OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer) {
     Mutex::Autolock autolock(mLock);
@@ -584,7 +616,8 @@
     CHECK(oerr == OMX_ErrorNone);
 
     if (def.format.video.eColorFormat != OMX_COLOR_FormatAndroidOpaque) {
-        ALOGE("createInputSurface requires AndroidOpaque color format");
+        ALOGE("createInputSurface requires COLOR_FormatSurface "
+              "(AndroidOpaque) color format");
         return INVALID_OPERATION;
     }
 
@@ -769,6 +802,47 @@
     return StatusFromOMXError(err);
 }
 
+status_t OMXNodeInstance::setInternalOption(
+        OMX_U32 portIndex,
+        IOMX::InternalOptionType type,
+        const void *data,
+        size_t size) {
+    switch (type) {
+        case IOMX::INTERNAL_OPTION_SUSPEND:
+        case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
+        {
+            const sp<GraphicBufferSource> &bufferSource =
+                getGraphicBufferSource();
+
+            if (bufferSource == NULL || portIndex != kPortIndexInput) {
+                return ERROR_UNSUPPORTED;
+            }
+
+            if (type == IOMX::INTERNAL_OPTION_SUSPEND) {
+                if (size != sizeof(bool)) {
+                    return INVALID_OPERATION;
+                }
+
+                bool suspend = *(bool *)data;
+                bufferSource->suspend(suspend);
+            } else {
+                if (size != sizeof(int64_t)) {
+                    return INVALID_OPERATION;
+                }
+
+                int64_t delayUs = *(int64_t *)data;
+
+                return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
+            }
+
+            return OK;
+        }
+
+        default:
+            return ERROR_UNSUPPORTED;
+    }
+}
+
 void OMXNodeInstance::onMessage(const omx_message &msg) {
     if (msg.type == omx_message::FILL_BUFFER_DONE) {
         OMX_BUFFERHEADERTYPE *buffer =
@@ -818,16 +892,11 @@
         OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
     const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
 
-    if (bufferSource != NULL && event == OMX_EventCmdComplete &&
-            arg1 == OMX_CommandStateSet) {
-        if (arg2 == OMX_StateExecuting) {
-            bufferSource->omxExecuting();
-        } else if (arg2 == OMX_StateLoaded) {
-            // Must be shutting down -- won't have a GraphicBufferSource
-            // on the way up.
-            bufferSource->omxLoaded();
-            setGraphicBufferSource(NULL);
-        }
+    if (bufferSource != NULL
+            && event == OMX_EventCmdComplete
+            && arg1 == OMX_CommandStateSet
+            && arg2 == OMX_StateExecuting) {
+        bufferSource->omxExecuting();
     }
 }
 
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index b3fe98e..d6cde73 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -50,8 +50,9 @@
     { "OMX.google.mpeg4.encoder", "mpeg4enc", "video_encoder.mpeg4" },
     { "OMX.google.mp3.decoder", "mp3dec", "audio_decoder.mp3" },
     { "OMX.google.vorbis.decoder", "vorbisdec", "audio_decoder.vorbis" },
-    { "OMX.google.vpx.decoder", "vpxdec", "video_decoder.vpx" },
-    { "OMX.google.vpx.encoder", "vpxenc", "video_encoder.vpx" },
+    { "OMX.google.vp8.decoder", "vpxdec", "video_decoder.vp8" },
+    { "OMX.google.vp9.decoder", "vpxdec", "video_decoder.vp9" },
+    { "OMX.google.vp8.encoder", "vpxenc", "video_encoder.vp8" },
     { "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
     { "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" },
     { "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" },
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
new file mode 100644
index 0000000..08a3d42
--- /dev/null
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftVideoDecoderOMXComponent"
+#include <utils/Log.h>
+
+#include "include/SoftVideoDecoderOMXComponent.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+SoftVideoDecoderOMXComponent::SoftVideoDecoderOMXComponent(
+        const char *name,
+        const char *componentRole,
+        OMX_VIDEO_CODINGTYPE codingType,
+        const CodecProfileLevel *profileLevels,
+        size_t numProfileLevels,
+        int32_t width,
+        int32_t height,
+        const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+        : SimpleSoftOMXComponent(name, callbacks, appData, component),
+        mWidth(width),
+        mHeight(height),
+        mCropLeft(0),
+        mCropTop(0),
+        mCropWidth(width),
+        mCropHeight(height),
+        mOutputPortSettingsChange(NONE),
+        mComponentRole(componentRole),
+        mCodingType(codingType),
+        mProfileLevels(profileLevels),
+        mNumProfileLevels(numProfileLevels) {
+}
+
+void SoftVideoDecoderOMXComponent::initPorts(
+        OMX_U32 numInputBuffers,
+        OMX_U32 inputBufferSize,
+        OMX_U32 numOutputBuffers,
+        const char *mimeType) {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = kInputPortIndex;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = numInputBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = inputBufferSize;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainVideo;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.video.cMIMEType = const_cast<char *>(mimeType);
+    def.format.video.pNativeRender = NULL;
+    /* size is initialized in updatePortDefinitions() */
+    def.format.video.nBitrate = 0;
+    def.format.video.xFramerate = 0;
+    def.format.video.bFlagErrorConcealment = OMX_FALSE;
+    def.format.video.eCompressionFormat = mCodingType;
+    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
+    def.format.video.pNativeWindow = NULL;
+
+    addPort(def);
+
+    def.nPortIndex = kOutputPortIndex;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = numOutputBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainVideo;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.video.cMIMEType = const_cast<char *>("video/raw");
+    def.format.video.pNativeRender = NULL;
+    /* size is initialized in updatePortDefinitions() */
+    def.format.video.nBitrate = 0;
+    def.format.video.xFramerate = 0;
+    def.format.video.bFlagErrorConcealment = OMX_FALSE;
+    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
+    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
+    def.format.video.pNativeWindow = NULL;
+
+    addPort(def);
+
+    updatePortDefinitions();
+}
+
+void SoftVideoDecoderOMXComponent::updatePortDefinitions() {
+    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+    def->format.video.nFrameWidth = mWidth;
+    def->format.video.nFrameHeight = mHeight;
+    def->format.video.nStride = def->format.video.nFrameWidth;
+    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+
+    def = &editPortInfo(kOutputPortIndex)->mDef;
+    def->format.video.nFrameWidth = mWidth;
+    def->format.video.nFrameHeight = mHeight;
+    def->format.video.nStride = def->format.video.nFrameWidth;
+    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+
+    def->nBufferSize =
+            (def->format.video.nFrameWidth *
+             def->format.video.nFrameHeight * 3) / 2;
+
+    mCropLeft = 0;
+    mCropTop = 0;
+    mCropWidth = mWidth;
+    mCropHeight = mHeight;
+}
+
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamVideoPortFormat:
+        {
+            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > kMaxPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex != 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            if (formatParams->nPortIndex == kInputPortIndex) {
+                formatParams->eCompressionFormat = mCodingType;
+                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
+                formatParams->xFramerate = 0;
+            } else {
+                CHECK_EQ(formatParams->nPortIndex, 1u);
+
+                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
+                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
+                formatParams->xFramerate = 0;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoProfileLevelQuerySupported:
+        {
+            OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
+                  (OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
+
+            if (profileLevel->nPortIndex != kInputPortIndex) {
+                ALOGE("Invalid port index: %ld", profileLevel->nPortIndex);
+                return OMX_ErrorUnsupportedIndex;
+            }
+
+            if (index >= mNumProfileLevels) {
+                return OMX_ErrorNoMore;
+            }
+
+            profileLevel->eProfile = mProfileLevels[index].mProfile;
+            profileLevel->eLevel   = mProfileLevels[index].mLevel;
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        mComponentRole,
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoPortFormat:
+        {
+            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > kMaxPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex != 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getConfig(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexConfigCommonOutputCrop:
+        {
+            OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
+
+            if (rectParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            rectParams->nLeft = mCropLeft;
+            rectParams->nTop = mCropTop;
+            rectParams->nWidth = mCropWidth;
+            rectParams->nHeight = mCropHeight;
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return OMX_ErrorUnsupportedIndex;
+    }
+}
+
+void SoftVideoDecoderOMXComponent::onReset() {
+    mOutputPortSettingsChange = NONE;
+}
+
+void SoftVideoDecoderOMXComponent::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
+    if (portIndex != kOutputPortIndex) {
+        return;
+    }
+
+    switch (mOutputPortSettingsChange) {
+        case NONE:
+            break;
+
+        case AWAITING_DISABLED:
+        {
+            CHECK(!enabled);
+            mOutputPortSettingsChange = AWAITING_ENABLED;
+            break;
+        }
+
+        default:
+        {
+            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
+            CHECK(enabled);
+            mOutputPortSettingsChange = NONE;
+            break;
+        }
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 6cca8da..4bee808 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -449,7 +449,8 @@
         { "video_decoder.avc", "video/avc" },
         { "video_decoder.mpeg4", "video/mp4v-es" },
         { "video_decoder.h263", "video/3gpp" },
-        { "video_decoder.vpx", "video/x-vnd.on2.vp8" },
+        { "video_decoder.vp8", "video/x-vnd.on2.vp8" },
+        { "video_decoder.vp9", "video/x-vnd.on2.vp9" },
 
         // we appear to use this as a synonym to amrnb.
         { "audio_decoder.amr", "audio/3gpp" },
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 8b209c4..5116550 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -60,6 +60,7 @@
         ALOGE("Connection is still open, closing the socket.");
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
@@ -214,6 +215,7 @@
     if (mState != DISCONNECTED) {
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
@@ -266,6 +268,7 @@
     if (mUIDValid) {
         HTTPBase::RegisterSocketUserTag(mSocket, mUID,
                                         (uint32_t)*(uint32_t*) "RTSP");
+        HTTPBase::RegisterSocketUserMark(mSocket, mUID);
     }
 
     MakeSocketBlocking(mSocket, false);
@@ -295,6 +298,7 @@
 
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
@@ -312,6 +316,7 @@
 void ARTSPConnection::performDisconnect() {
     if (mUIDValid) {
         HTTPBase::UnRegisterSocketUserTag(mSocket);
+        HTTPBase::UnRegisterSocketUserMark(mSocket);
     }
     close(mSocket);
     mSocket = -1;
@@ -385,6 +390,7 @@
         mState = DISCONNECTED;
         if (mUIDValid) {
             HTTPBase::UnRegisterSocketUserTag(mSocket);
+            HTTPBase::UnRegisterSocketUserMark(mSocket);
         }
         close(mSocket);
         mSocket = -1;
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index 9e2724d..e77c69c 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -51,7 +51,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= rtp_test
 
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index e51d9e3..946f602 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -712,7 +712,9 @@
                             // Clear the tag
                             if (mUIDValid) {
                                 HTTPBase::UnRegisterSocketUserTag(track->mRTPSocket);
+                                HTTPBase::UnRegisterSocketUserMark(track->mRTPSocket);
                                 HTTPBase::UnRegisterSocketUserTag(track->mRTCPSocket);
+                                HTTPBase::UnRegisterSocketUserMark(track->mRTCPSocket);
                             }
 
                             close(track->mRTPSocket);
@@ -843,7 +845,9 @@
                         // Clear the tag
                         if (mUIDValid) {
                             HTTPBase::UnRegisterSocketUserTag(info->mRTPSocket);
+                            HTTPBase::UnRegisterSocketUserMark(info->mRTPSocket);
                             HTTPBase::UnRegisterSocketUserTag(info->mRTCPSocket);
+                            HTTPBase::UnRegisterSocketUserMark(info->mRTCPSocket);
                         }
 
                         close(info->mRTPSocket);
@@ -1599,6 +1603,8 @@
                                                 (uint32_t)*(uint32_t*) "RTP_");
                 HTTPBase::RegisterSocketUserTag(info->mRTCPSocket, mUID,
                                                 (uint32_t)*(uint32_t*) "RTP_");
+                HTTPBase::RegisterSocketUserMark(info->mRTPSocket, mUID);
+                HTTPBase::RegisterSocketUserMark(info->mRTCPSocket, mUID);
             }
 
             request.append("Transport: RTP/AVP/UDP;unicast;client_port=");
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index a5459fe..49ffcd6 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -23,6 +23,8 @@
 #include <fcntl.h>
 #include <unistd.h>
 
+#include <GLES2/gl2.h>
+
 #include <media/stagefright/SurfaceMediaSource.h>
 #include <media/mediarecorder.h>
 
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index 061ae89..c7d107e 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -3,11 +3,16 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-        ANetworkSession.cpp             \
+        MediaReceiver.cpp               \
         MediaSender.cpp                 \
         Parameters.cpp                  \
-        ParsedMessage.cpp               \
+        rtp/RTPAssembler.cpp            \
+        rtp/RTPReceiver.cpp             \
         rtp/RTPSender.cpp               \
+        sink/DirectRenderer.cpp         \
+        sink/WifiDisplaySink.cpp        \
+        SNTPClient.cpp                  \
+        TimeSyncer.cpp                  \
         source/Converter.cpp            \
         source/MediaPuller.cpp          \
         source/PlaybackSession.cpp      \
@@ -57,6 +62,67 @@
 
 LOCAL_MODULE:= wfd
 
-LOCAL_MODULE_TAGS := debug
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        udptest.cpp                 \
+
+LOCAL_SHARED_LIBRARIES:= \
+        libbinder                       \
+        libgui                          \
+        libmedia                        \
+        libstagefright                  \
+        libstagefright_foundation       \
+        libstagefright_wfd              \
+        libutils                        \
+        liblog                          \
+
+LOCAL_MODULE:= udptest
+
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        rtptest.cpp                 \
+
+LOCAL_SHARED_LIBRARIES:= \
+        libbinder                       \
+        libgui                          \
+        libmedia                        \
+        libstagefright                  \
+        libstagefright_foundation       \
+        libstagefright_wfd              \
+        libutils                        \
+        liblog                          \
+
+LOCAL_MODULE:= rtptest
+
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        nettest.cpp                     \
+
+LOCAL_SHARED_LIBRARIES:= \
+        libbinder                       \
+        libgui                          \
+        libmedia                        \
+        libstagefright                  \
+        libstagefright_foundation       \
+        libstagefright_wfd              \
+        libutils                        \
+        liblog                          \
+
+LOCAL_MODULE:= nettest
 
 include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/wifi-display/MediaReceiver.cpp b/media/libstagefright/wifi-display/MediaReceiver.cpp
new file mode 100644
index 0000000..5524235
--- /dev/null
+++ b/media/libstagefright/wifi-display/MediaReceiver.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaReceiver"
+#include <utils/Log.h>
+
+#include "MediaReceiver.h"
+
+#include "AnotherPacketSource.h"
+#include "rtp/RTPReceiver.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+MediaReceiver::MediaReceiver(
+        const sp<ANetworkSession> &netSession,
+        const sp<AMessage> &notify)
+    : mNetSession(netSession),
+      mNotify(notify),
+      mMode(MODE_UNDEFINED),
+      mGeneration(0),
+      mInitStatus(OK),
+      mInitDoneCount(0) {
+}
+
+MediaReceiver::~MediaReceiver() {
+}
+
+ssize_t MediaReceiver::addTrack(
+        RTPReceiver::TransportMode rtpMode,
+        RTPReceiver::TransportMode rtcpMode,
+        int32_t *localRTPPort) {
+    if (mMode != MODE_UNDEFINED) {
+        return INVALID_OPERATION;
+    }
+
+    size_t trackIndex = mTrackInfos.size();
+
+    TrackInfo info;
+
+    sp<AMessage> notify = new AMessage(kWhatReceiverNotify, id());
+    notify->setInt32("generation", mGeneration);
+    notify->setSize("trackIndex", trackIndex);
+
+    info.mReceiver = new RTPReceiver(mNetSession, notify);
+    looper()->registerHandler(info.mReceiver);
+
+    info.mReceiver->registerPacketType(
+            33, RTPReceiver::PACKETIZATION_TRANSPORT_STREAM);
+
+    info.mReceiver->registerPacketType(
+            96, RTPReceiver::PACKETIZATION_AAC);
+
+    info.mReceiver->registerPacketType(
+            97, RTPReceiver::PACKETIZATION_H264);
+
+    status_t err = info.mReceiver->initAsync(
+            rtpMode,
+            rtcpMode,
+            localRTPPort);
+
+    if (err != OK) {
+        looper()->unregisterHandler(info.mReceiver->id());
+        info.mReceiver.clear();
+
+        return err;
+    }
+
+    mTrackInfos.push_back(info);
+
+    return trackIndex;
+}
+
+status_t MediaReceiver::connectTrack(
+        size_t trackIndex,
+        const char *remoteHost,
+        int32_t remoteRTPPort,
+        int32_t remoteRTCPPort) {
+    if (trackIndex >= mTrackInfos.size()) {
+        return -ERANGE;
+    }
+
+    TrackInfo *info = &mTrackInfos.editItemAt(trackIndex);
+    return info->mReceiver->connect(remoteHost, remoteRTPPort, remoteRTCPPort);
+}
+
+status_t MediaReceiver::initAsync(Mode mode) {
+    if ((mode == MODE_TRANSPORT_STREAM || mode == MODE_TRANSPORT_STREAM_RAW)
+            && mTrackInfos.size() > 1) {
+        return INVALID_OPERATION;
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatInit, id());
+    msg->setInt32("mode", mode);
+    msg->post();
+
+    return OK;
+}
+
+void MediaReceiver::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatInit:
+        {
+            int32_t mode;
+            CHECK(msg->findInt32("mode", &mode));
+
+            CHECK_EQ(mMode, MODE_UNDEFINED);
+            mMode = (Mode)mode;
+
+            if (mInitStatus != OK || mInitDoneCount == mTrackInfos.size()) {
+                notifyInitDone(mInitStatus);
+            }
+
+            mTSParser = new ATSParser(
+                    ATSParser::ALIGNED_VIDEO_DATA
+                        | ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
+
+            mFormatKnownMask = 0;
+            break;
+        }
+
+        case kWhatReceiverNotify:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation != mGeneration) {
+                break;
+            }
+
+            onReceiverNotify(msg);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void MediaReceiver::onReceiverNotify(const sp<AMessage> &msg) {
+    int32_t what;
+    CHECK(msg->findInt32("what", &what));
+
+    switch (what) {
+        case RTPReceiver::kWhatInitDone:
+        {
+            ++mInitDoneCount;
+
+            int32_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            if (err != OK) {
+                mInitStatus = err;
+                ++mGeneration;
+            }
+
+            if (mMode != MODE_UNDEFINED) {
+                if (mInitStatus != OK || mInitDoneCount == mTrackInfos.size()) {
+                    notifyInitDone(mInitStatus);
+                }
+            }
+            break;
+        }
+
+        case RTPReceiver::kWhatError:
+        {
+            int32_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            notifyError(err);
+            break;
+        }
+
+        case RTPReceiver::kWhatAccessUnit:
+        {
+            size_t trackIndex;
+            CHECK(msg->findSize("trackIndex", &trackIndex));
+
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+            int32_t followsDiscontinuity;
+            if (!msg->findInt32(
+                        "followsDiscontinuity", &followsDiscontinuity)) {
+                followsDiscontinuity = 0;
+            }
+
+            if (mMode == MODE_TRANSPORT_STREAM) {
+                if (followsDiscontinuity) {
+                    mTSParser->signalDiscontinuity(
+                            ATSParser::DISCONTINUITY_TIME, NULL /* extra */);
+                }
+
+                for (size_t offset = 0;
+                        offset < accessUnit->size(); offset += 188) {
+                    status_t err = mTSParser->feedTSPacket(
+                             accessUnit->data() + offset, 188);
+
+                    if (err != OK) {
+                        notifyError(err);
+                        break;
+                    }
+                }
+
+                drainPackets(0 /* trackIndex */, ATSParser::VIDEO);
+                drainPackets(1 /* trackIndex */, ATSParser::AUDIO);
+            } else {
+                postAccessUnit(trackIndex, accessUnit, NULL);
+            }
+            break;
+        }
+
+        case RTPReceiver::kWhatPacketLost:
+        {
+            notifyPacketLost();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void MediaReceiver::drainPackets(
+        size_t trackIndex, ATSParser::SourceType type) {
+    sp<AnotherPacketSource> source =
+        static_cast<AnotherPacketSource *>(
+                mTSParser->getSource(type).get());
+
+    if (source == NULL) {
+        return;
+    }
+
+    sp<AMessage> format;
+    if (!(mFormatKnownMask & (1ul << trackIndex))) {
+        sp<MetaData> meta = source->getFormat();
+        CHECK(meta != NULL);
+
+        CHECK_EQ((status_t)OK, convertMetaDataToMessage(meta, &format));
+
+        mFormatKnownMask |= 1ul << trackIndex;
+    }
+
+    status_t finalResult;
+    while (source->hasBufferAvailable(&finalResult)) {
+        sp<ABuffer> accessUnit;
+        status_t err = source->dequeueAccessUnit(&accessUnit);
+        if (err == OK) {
+            postAccessUnit(trackIndex, accessUnit, format);
+            format.clear();
+        } else if (err != INFO_DISCONTINUITY) {
+            notifyError(err);
+        }
+    }
+
+    if (finalResult != OK) {
+        notifyError(finalResult);
+    }
+}
+
+void MediaReceiver::notifyInitDone(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatInitDone);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void MediaReceiver::notifyError(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void MediaReceiver::notifyPacketLost() {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatPacketLost);
+    notify->post();
+}
+
+void MediaReceiver::postAccessUnit(
+        size_t trackIndex,
+        const sp<ABuffer> &accessUnit,
+        const sp<AMessage> &format) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatAccessUnit);
+    notify->setSize("trackIndex", trackIndex);
+    notify->setBuffer("accessUnit", accessUnit);
+
+    if (format != NULL) {
+        notify->setMessage("format", format);
+    }
+
+    notify->post();
+}
+
+status_t MediaReceiver::informSender(
+        size_t trackIndex, const sp<AMessage> &params) {
+    if (trackIndex >= mTrackInfos.size()) {
+        return -ERANGE;
+    }
+
+    TrackInfo *info = &mTrackInfos.editItemAt(trackIndex);
+    return info->mReceiver->informSender(params);
+}
+
+}  // namespace android
+
+
diff --git a/media/libstagefright/wifi-display/MediaReceiver.h b/media/libstagefright/wifi-display/MediaReceiver.h
new file mode 100644
index 0000000..afbb407
--- /dev/null
+++ b/media/libstagefright/wifi-display/MediaReceiver.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include "ATSParser.h"
+#include "rtp/RTPReceiver.h"
+
+namespace android {
+
+struct ABuffer;
+struct ANetworkSession;
+struct AMessage;
+struct ATSParser;
+
+// This class facilitates receiving of media data for one or more tracks
+// over RTP. Either a 1:1 track to RTP channel mapping is used or a single
+// RTP channel provides the data for a transport stream that is consequently
+// demuxed and its track's data provided to the observer.
+struct MediaReceiver : public AHandler {
+    enum {
+        kWhatInitDone,
+        kWhatError,
+        kWhatAccessUnit,
+        kWhatPacketLost,
+    };
+
+    MediaReceiver(
+            const sp<ANetworkSession> &netSession,
+            const sp<AMessage> &notify);
+
+    ssize_t addTrack(
+            RTPReceiver::TransportMode rtpMode,
+            RTPReceiver::TransportMode rtcpMode,
+            int32_t *localRTPPort);
+
+    status_t connectTrack(
+            size_t trackIndex,
+            const char *remoteHost,
+            int32_t remoteRTPPort,
+            int32_t remoteRTCPPort);
+
+    enum Mode {
+        MODE_UNDEFINED,
+        MODE_TRANSPORT_STREAM,
+        MODE_TRANSPORT_STREAM_RAW,
+        MODE_ELEMENTARY_STREAMS,
+    };
+    status_t initAsync(Mode mode);
+
+    status_t informSender(size_t trackIndex, const sp<AMessage> &params);
+
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+    virtual ~MediaReceiver();
+
+private:
+    enum {
+        kWhatInit,
+        kWhatReceiverNotify,
+    };
+
+    struct TrackInfo {
+        sp<RTPReceiver> mReceiver;
+    };
+
+    sp<ANetworkSession> mNetSession;
+    sp<AMessage> mNotify;
+
+    Mode mMode;
+    int32_t mGeneration;
+
+    Vector<TrackInfo> mTrackInfos;
+
+    status_t mInitStatus;
+    size_t mInitDoneCount;
+
+    sp<ATSParser> mTSParser;
+    uint32_t mFormatKnownMask;
+
+    void onReceiverNotify(const sp<AMessage> &msg);
+
+    void drainPackets(size_t trackIndex, ATSParser::SourceType type);
+
+    void notifyInitDone(status_t err);
+    void notifyError(status_t err);
+    void notifyPacketLost();
+
+    void postAccessUnit(
+            size_t trackIndex,
+            const sp<ABuffer> &accessUnit,
+            const sp<AMessage> &format);
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaReceiver);
+};
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index 8a3566f..b1cdec0 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -20,16 +20,18 @@
 
 #include "MediaSender.h"
 
-#include "ANetworkSession.h"
 #include "rtp/RTPSender.h"
 #include "source/TSPacketizer.h"
 
 #include "include/avc_utils.h"
 
 #include <media/IHDCP.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+#include <ui/GraphicBuffer.h>
 
 namespace android {
 
@@ -341,6 +343,22 @@
             break;
         }
 
+        case kWhatInformSender:
+        {
+            int64_t avgLatencyUs;
+            CHECK(msg->findInt64("avgLatencyUs", &avgLatencyUs));
+
+            int64_t maxLatencyUs;
+            CHECK(msg->findInt64("maxLatencyUs", &maxLatencyUs));
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatInformSender);
+            notify->setInt64("avgLatencyUs", avgLatencyUs);
+            notify->setInt64("maxLatencyUs", maxLatencyUs);
+            notify->post();
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -392,11 +410,36 @@
                     info.mPacketizerTrackIndex, accessUnit);
         }
 
-        status_t err = mHDCP->encrypt(
-                accessUnit->data(), accessUnit->size(),
-                trackIndex  /* streamCTR */,
-                &inputCTR,
-                accessUnit->data());
+        status_t err;
+        native_handle_t* handle;
+        if (accessUnit->meta()->findPointer("handle", (void**)&handle)
+                && handle != NULL) {
+            int32_t rangeLength, rangeOffset;
+            sp<AMessage> notify;
+            CHECK(accessUnit->meta()->findInt32("rangeOffset", &rangeOffset));
+            CHECK(accessUnit->meta()->findInt32("rangeLength", &rangeLength));
+            CHECK(accessUnit->meta()->findMessage("notify", &notify)
+                    && notify != NULL);
+            CHECK_GE(accessUnit->size(), rangeLength);
+
+            sp<GraphicBuffer> grbuf(new GraphicBuffer(
+                    rangeOffset + rangeLength, 1, HAL_PIXEL_FORMAT_Y8,
+                    GRALLOC_USAGE_HW_VIDEO_ENCODER, rangeOffset + rangeLength,
+                    handle, false));
+
+            err = mHDCP->encryptNative(
+                    grbuf, rangeOffset, rangeLength,
+                    trackIndex  /* streamCTR */,
+                    &inputCTR,
+                    accessUnit->data());
+            notify->post();
+        } else {
+            err = mHDCP->encrypt(
+                    accessUnit->data(), accessUnit->size(),
+                    trackIndex  /* streamCTR */,
+                    &inputCTR,
+                    accessUnit->data());
+        }
 
         if (err != OK) {
             ALOGE("Failed to HDCP-encrypt media data (err %d)",
diff --git a/media/libstagefright/wifi-display/MediaSender.h b/media/libstagefright/wifi-display/MediaSender.h
index 64722c5..04538ea 100644
--- a/media/libstagefright/wifi-display/MediaSender.h
+++ b/media/libstagefright/wifi-display/MediaSender.h
@@ -43,6 +43,7 @@
         kWhatInitDone,
         kWhatError,
         kWhatNetworkStall,
+        kWhatInformSender,
     };
 
     MediaSender(
diff --git a/media/libstagefright/wifi-display/SNTPClient.cpp b/media/libstagefright/wifi-display/SNTPClient.cpp
new file mode 100644
index 0000000..5c0af6a
--- /dev/null
+++ b/media/libstagefright/wifi-display/SNTPClient.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SNTPClient.h"
+
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/Utils.h>
+
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+namespace android {
+
+SNTPClient::SNTPClient() {
+}
+
+status_t SNTPClient::requestTime(const char *host) {
+    struct hostent *ent;
+    int64_t requestTimeNTP, requestTimeUs;
+    ssize_t n;
+    int64_t responseTimeUs, responseTimeNTP;
+    int64_t originateTimeNTP, receiveTimeNTP, transmitTimeNTP;
+    int64_t roundTripTimeNTP, clockOffsetNTP;
+
+    status_t err = UNKNOWN_ERROR;
+
+    int s = socket(AF_INET, SOCK_DGRAM, 0);
+
+    if (s < 0) {
+        err = -errno;
+
+        goto bail;
+    }
+
+    ent = gethostbyname(host);
+
+    if (ent == NULL) {
+        err = -ENOENT;
+        goto bail2;
+    }
+
+    struct sockaddr_in hostAddr;
+    memset(hostAddr.sin_zero, 0, sizeof(hostAddr.sin_zero));
+    hostAddr.sin_family = AF_INET;
+    hostAddr.sin_port = htons(kNTPPort);
+    hostAddr.sin_addr.s_addr = *(in_addr_t *)ent->h_addr;
+
+    uint8_t packet[kNTPPacketSize];
+    memset(packet, 0, sizeof(packet));
+
+    packet[0] = kNTPModeClient | (kNTPVersion << 3);
+
+    requestTimeNTP = getNowNTP();
+    requestTimeUs = ALooper::GetNowUs();
+    writeTimeStamp(&packet[kNTPTransmitTimeOffset], requestTimeNTP);
+
+    n = sendto(
+            s, packet, sizeof(packet), 0,
+            (const struct sockaddr *)&hostAddr, sizeof(hostAddr));
+
+    if (n < 0) {
+        err = -errno;
+        goto bail2;
+    }
+
+    memset(packet, 0, sizeof(packet));
+
+    do {
+        n = recv(s, packet, sizeof(packet), 0);
+    } while (n < 0 && errno == EINTR);
+
+    if (n < 0) {
+        err = -errno;
+        goto bail2;
+    }
+
+    responseTimeUs = ALooper::GetNowUs();
+
+    responseTimeNTP = requestTimeNTP + makeNTP(responseTimeUs - requestTimeUs);
+
+    originateTimeNTP = readTimeStamp(&packet[kNTPOriginateTimeOffset]);
+    receiveTimeNTP = readTimeStamp(&packet[kNTPReceiveTimeOffset]);
+    transmitTimeNTP = readTimeStamp(&packet[kNTPTransmitTimeOffset]);
+
+    roundTripTimeNTP =
+        makeNTP(responseTimeUs - requestTimeUs)
+            - (transmitTimeNTP - receiveTimeNTP);
+
+    clockOffsetNTP =
+        ((receiveTimeNTP - originateTimeNTP)
+            + (transmitTimeNTP - responseTimeNTP)) / 2;
+
+    mTimeReferenceNTP = responseTimeNTP + clockOffsetNTP;
+    mTimeReferenceUs = responseTimeUs;
+    mRoundTripTimeNTP = roundTripTimeNTP;
+
+    err = OK;
+
+bail2:
+    close(s);
+    s = -1;
+
+bail:
+    return err;
+}
+
+int64_t SNTPClient::adjustTimeUs(int64_t timeUs) const {
+    uint64_t nowNTP =
+        mTimeReferenceNTP + makeNTP(timeUs - mTimeReferenceUs);
+
+    int64_t nowUs =
+        (nowNTP >> 32) * 1000000ll
+        + ((nowNTP & 0xffffffff) * 1000000ll) / (1ll << 32);
+
+    nowUs -= ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
+
+    return nowUs;
+}
+
+// static
+void SNTPClient::writeTimeStamp(uint8_t *dst, uint64_t ntpTime) {
+    *dst++ = (ntpTime >> 56) & 0xff;
+    *dst++ = (ntpTime >> 48) & 0xff;
+    *dst++ = (ntpTime >> 40) & 0xff;
+    *dst++ = (ntpTime >> 32) & 0xff;
+    *dst++ = (ntpTime >> 24) & 0xff;
+    *dst++ = (ntpTime >> 16) & 0xff;
+    *dst++ = (ntpTime >> 8) & 0xff;
+    *dst++ = ntpTime & 0xff;
+}
+
+// static
+uint64_t SNTPClient::readTimeStamp(const uint8_t *dst) {
+    return U64_AT(dst);
+}
+
+// static
+uint64_t SNTPClient::getNowNTP() {
+    struct timeval tv;
+    gettimeofday(&tv, NULL /* time zone */);
+
+    uint64_t nowUs = tv.tv_sec * 1000000ll + tv.tv_usec;
+
+    nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
+
+    return makeNTP(nowUs);
+}
+
+// static
+uint64_t SNTPClient::makeNTP(uint64_t deltaUs) {
+    uint64_t hi = deltaUs / 1000000ll;
+    uint64_t lo = ((1ll << 32) * (deltaUs % 1000000ll)) / 1000000ll;
+
+    return (hi << 32) | lo;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/SNTPClient.h b/media/libstagefright/wifi-display/SNTPClient.h
new file mode 100644
index 0000000..967d1fc
--- /dev/null
+++ b/media/libstagefright/wifi-display/SNTPClient.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SNTP_CLIENT_H_
+
+#define SNTP_CLIENT_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+// Implementation of the SNTP (Simple Network Time Protocol)
+struct SNTPClient {
+    SNTPClient();
+
+    status_t requestTime(const char *host);
+
+    // given a time obtained from ALooper::GetNowUs()
+    // return the number of us elapsed since Jan 1 1970 00:00:00 (UTC).
+    int64_t adjustTimeUs(int64_t timeUs) const;
+
+private:
+    enum {
+        kNTPPort = 123,
+        kNTPPacketSize = 48,
+        kNTPModeClient = 3,
+        kNTPVersion = 3,
+        kNTPTransmitTimeOffset = 40,
+        kNTPOriginateTimeOffset = 24,
+        kNTPReceiveTimeOffset = 32,
+    };
+
+    uint64_t mTimeReferenceNTP;
+    int64_t mTimeReferenceUs;
+    int64_t mRoundTripTimeNTP;
+
+    static void writeTimeStamp(uint8_t *dst, uint64_t ntpTime);
+    static uint64_t readTimeStamp(const uint8_t *dst);
+
+    static uint64_t getNowNTP();
+    static uint64_t makeNTP(uint64_t deltaUs);
+
+    DISALLOW_EVIL_CONSTRUCTORS(SNTPClient);
+};
+
+}  // namespace android
+
+#endif  // SNTP_CLIENT_H_
diff --git a/media/libstagefright/wifi-display/TimeSyncer.cpp b/media/libstagefright/wifi-display/TimeSyncer.cpp
new file mode 100644
index 0000000..0f4d93a
--- /dev/null
+++ b/media/libstagefright/wifi-display/TimeSyncer.cpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "TimeSyncer"
+#include <utils/Log.h>
+
+#include "TimeSyncer.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+TimeSyncer::TimeSyncer(
+        const sp<ANetworkSession> &netSession, const sp<AMessage> &notify)
+    : mNetSession(netSession),
+      mNotify(notify),
+      mIsServer(false),
+      mConnected(false),
+      mUDPSession(0),
+      mSeqNo(0),
+      mTotalTimeUs(0.0),
+      mPendingT1(0ll),
+      mTimeoutGeneration(0) {
+}
+
+TimeSyncer::~TimeSyncer() {
+}
+
+void TimeSyncer::startServer(unsigned localPort) {
+    sp<AMessage> msg = new AMessage(kWhatStartServer, id());
+    msg->setInt32("localPort", localPort);
+    msg->post();
+}
+
+void TimeSyncer::startClient(const char *remoteHost, unsigned remotePort) {
+    sp<AMessage> msg = new AMessage(kWhatStartClient, id());
+    msg->setString("remoteHost", remoteHost);
+    msg->setInt32("remotePort", remotePort);
+    msg->post();
+}
+
+void TimeSyncer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatStartClient:
+        {
+            AString remoteHost;
+            CHECK(msg->findString("remoteHost", &remoteHost));
+
+            int32_t remotePort;
+            CHECK(msg->findInt32("remotePort", &remotePort));
+
+            sp<AMessage> notify = new AMessage(kWhatUDPNotify, id());
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createUDPSession(
+                         0 /* localPort */,
+                         remoteHost.c_str(),
+                         remotePort,
+                         notify,
+                         &mUDPSession));
+
+            postSendPacket();
+            break;
+        }
+
+        case kWhatStartServer:
+        {
+            mIsServer = true;
+
+            int32_t localPort;
+            CHECK(msg->findInt32("localPort", &localPort));
+
+            sp<AMessage> notify = new AMessage(kWhatUDPNotify, id());
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createUDPSession(
+                         localPort, notify, &mUDPSession));
+
+            break;
+        }
+
+        case kWhatSendPacket:
+        {
+            if (mHistory.size() == 0) {
+                ALOGI("starting batch");
+            }
+
+            TimeInfo ti;
+            memset(&ti, 0, sizeof(ti));
+
+            ti.mT1 = ALooper::GetNowUs();
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->sendRequest(
+                         mUDPSession, &ti, sizeof(ti)));
+
+            mPendingT1 = ti.mT1;
+            postTimeout();
+            break;
+        }
+
+        case kWhatTimedOut:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mTimeoutGeneration) {
+                break;
+            }
+
+            ALOGI("timed out, sending another request");
+            postSendPacket();
+            break;
+        }
+
+        case kWhatUDPNotify:
+        {
+            int32_t reason;
+            CHECK(msg->findInt32("reason", &reason));
+
+            switch (reason) {
+                case ANetworkSession::kWhatError:
+                {
+                    int32_t sessionID;
+                    CHECK(msg->findInt32("sessionID", &sessionID));
+
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    AString detail;
+                    CHECK(msg->findString("detail", &detail));
+
+                    ALOGE("An error occurred in session %d (%d, '%s/%s').",
+                          sessionID,
+                          err,
+                          detail.c_str(),
+                          strerror(-err));
+
+                    mNetSession->destroySession(sessionID);
+
+                    cancelTimeout();
+
+                    notifyError(err);
+                    break;
+                }
+
+                case ANetworkSession::kWhatDatagram:
+                {
+                    int32_t sessionID;
+                    CHECK(msg->findInt32("sessionID", &sessionID));
+
+                    sp<ABuffer> packet;
+                    CHECK(msg->findBuffer("data", &packet));
+
+                    int64_t arrivalTimeUs;
+                    CHECK(packet->meta()->findInt64(
+                                "arrivalTimeUs", &arrivalTimeUs));
+
+                    CHECK_EQ(packet->size(), sizeof(TimeInfo));
+
+                    TimeInfo *ti = (TimeInfo *)packet->data();
+
+                    if (mIsServer) {
+                        if (!mConnected) {
+                            AString fromAddr;
+                            CHECK(msg->findString("fromAddr", &fromAddr));
+
+                            int32_t fromPort;
+                            CHECK(msg->findInt32("fromPort", &fromPort));
+
+                            CHECK_EQ((status_t)OK,
+                                     mNetSession->connectUDPSession(
+                                         mUDPSession, fromAddr.c_str(), fromPort));
+
+                            mConnected = true;
+                        }
+
+                        ti->mT2 = arrivalTimeUs;
+                        ti->mT3 = ALooper::GetNowUs();
+
+                        CHECK_EQ((status_t)OK,
+                                 mNetSession->sendRequest(
+                                     mUDPSession, ti, sizeof(*ti)));
+                    } else {
+                        if (ti->mT1 != mPendingT1) {
+                            break;
+                        }
+
+                        cancelTimeout();
+                        mPendingT1 = 0;
+
+                        ti->mT4 = arrivalTimeUs;
+
+                        // One way delay for a packet to travel from client
+                        // to server or back (assumed to be the same either way).
+                        int64_t delay =
+                            (ti->mT2 - ti->mT1 + ti->mT4 - ti->mT3) / 2;
+
+                        // Offset between the client clock (T1, T4) and the
+                        // server clock (T2, T3) timestamps.
+                        int64_t offset =
+                            (ti->mT2 - ti->mT1 - ti->mT4 + ti->mT3) / 2;
+
+                        mHistory.push_back(*ti);
+
+                        ALOGV("delay = %lld us,\toffset %lld us",
+                               delay,
+                               offset);
+
+                        if (mHistory.size() < kNumPacketsPerBatch) {
+                            postSendPacket(1000000ll / 30);
+                        } else {
+                            notifyOffset();
+
+                            ALOGI("batch done");
+
+                            mHistory.clear();
+                            postSendPacket(kBatchDelayUs);
+                        }
+                    }
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void TimeSyncer::postSendPacket(int64_t delayUs) {
+    (new AMessage(kWhatSendPacket, id()))->post(delayUs);
+}
+
+void TimeSyncer::postTimeout() {
+    sp<AMessage> msg = new AMessage(kWhatTimedOut, id());
+    msg->setInt32("generation", mTimeoutGeneration);
+    msg->post(kTimeoutDelayUs);
+}
+
+void TimeSyncer::cancelTimeout() {
+    ++mTimeoutGeneration;
+}
+
+void TimeSyncer::notifyError(status_t err) {
+    if (mNotify == NULL) {
+        looper()->stop();
+        return;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+// static
+int TimeSyncer::CompareRountripTime(const TimeInfo *ti1, const TimeInfo *ti2) {
+    int64_t rt1 = ti1->mT4 - ti1->mT1;
+    int64_t rt2 = ti2->mT4 - ti2->mT1;
+
+    if (rt1 < rt2) {
+        return -1;
+    } else if (rt1 > rt2) {
+        return 1;
+    }
+
+    return 0;
+}
+
+void TimeSyncer::notifyOffset() {
+    mHistory.sort(CompareRountripTime);
+
+    int64_t sum = 0ll;
+    size_t count = 0;
+
+    // Only consider the third of the information associated with the best
+    // (smallest) roundtrip times.
+    for (size_t i = 0; i < mHistory.size() / 3; ++i) {
+        const TimeInfo *ti = &mHistory[i];
+
+#if 0
+        // One way delay for a packet to travel from client
+        // to server or back (assumed to be the same either way).
+        int64_t delay =
+            (ti->mT2 - ti->mT1 + ti->mT4 - ti->mT3) / 2;
+#endif
+
+        // Offset between the client clock (T1, T4) and the
+        // server clock (T2, T3) timestamps.
+        int64_t offset =
+            (ti->mT2 - ti->mT1 - ti->mT4 + ti->mT3) / 2;
+
+        ALOGV("(%d) RT: %lld us, offset: %lld us",
+              i, ti->mT4 - ti->mT1, offset);
+
+        sum += offset;
+        ++count;
+    }
+
+    if (mNotify == NULL) {
+        ALOGI("avg. offset is %lld", sum / count);
+        return;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatTimeOffset);
+    notify->setInt64("offset", sum / count);
+    notify->post();
+}
+
+}  // namespace android
diff --git a/media/libstagefright/wifi-display/TimeSyncer.h b/media/libstagefright/wifi-display/TimeSyncer.h
new file mode 100644
index 0000000..4e7571f
--- /dev/null
+++ b/media/libstagefright/wifi-display/TimeSyncer.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIME_SYNCER_H_
+
+#define TIME_SYNCER_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ANetworkSession;
+
+/*
+   TimeSyncer allows us to synchronize time between a client and a server.
+   The client sends a UDP packet containing its send-time to the server,
+   the server sends that packet back to the client amended with information
+   about when it was received as well as the time the reply was sent back.
+   Finally the client receives the reply and has now enough information to
+   compute the clock offset between client and server assuming that packet
+   exchange is symmetric, i.e. time for a packet client->server and
+   server->client is roughly equal.
+   This exchange is repeated a number of times and the average offset computed
+   over the 30% of packets that had the lowest roundtrip times.
+   The offset is determined every 10 secs to account for slight differences in
+   clock frequency.
+*/
+struct TimeSyncer : public AHandler {
+    enum {
+        kWhatError,
+        kWhatTimeOffset,
+    };
+    TimeSyncer(
+            const sp<ANetworkSession> &netSession,
+            const sp<AMessage> &notify);
+
+    void startServer(unsigned localPort);
+    void startClient(const char *remoteHost, unsigned remotePort);
+
+protected:
+    virtual ~TimeSyncer();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatStartServer,
+        kWhatStartClient,
+        kWhatUDPNotify,
+        kWhatSendPacket,
+        kWhatTimedOut,
+    };
+
+    struct TimeInfo {
+        int64_t mT1;  // client timestamp at send
+        int64_t mT2;  // server timestamp at receive
+        int64_t mT3;  // server timestamp at send
+        int64_t mT4;  // client timestamp at receive
+    };
+
+    enum {
+        kNumPacketsPerBatch = 30,
+    };
+    static const int64_t kTimeoutDelayUs = 500000ll;
+    static const int64_t kBatchDelayUs = 60000000ll;  // every minute
+
+    sp<ANetworkSession> mNetSession;
+    sp<AMessage> mNotify;
+
+    bool mIsServer;
+    bool mConnected;
+    int32_t mUDPSession;
+    uint32_t mSeqNo;
+    double mTotalTimeUs;
+
+    Vector<TimeInfo> mHistory;
+
+    int64_t mPendingT1;
+    int32_t mTimeoutGeneration;
+
+    void postSendPacket(int64_t delayUs = 0ll);
+
+    void postTimeout();
+    void cancelTimeout();
+
+    void notifyError(status_t err);
+    void notifyOffset();
+
+    static int CompareRountripTime(const TimeInfo *ti1, const TimeInfo *ti2);
+
+    DISALLOW_EVIL_CONSTRUCTORS(TimeSyncer);
+};
+
+}  // namespace android
+
+#endif  // TIME_SYNCER_H_
diff --git a/media/libstagefright/wifi-display/VideoFormats.cpp b/media/libstagefright/wifi-display/VideoFormats.cpp
index 458b163..04e02c1 100644
--- a/media/libstagefright/wifi-display/VideoFormats.cpp
+++ b/media/libstagefright/wifi-display/VideoFormats.cpp
@@ -24,7 +24,8 @@
 
 namespace android {
 
-VideoFormats::config_t VideoFormats::mConfigs[][32] = {
+// static
+const VideoFormats::config_t VideoFormats::mResolutionTable[][32] = {
     {
         // CEA Resolutions
         { 640, 480, 60, false, 0, 0},
@@ -133,6 +134,8 @@
 };
 
 VideoFormats::VideoFormats() {
+    memcpy(mConfigs, mResolutionTable, sizeof(mConfigs));
+
     for (size_t i = 0; i < kNumResolutionTypes; ++i) {
         mResolutionEnabled[i] = 0;
     }
@@ -175,6 +178,29 @@
     }
 }
 
+void VideoFormats::enableResolutionUpto(
+        ResolutionType type, size_t index,
+        ProfileType profile, LevelType level) {
+    size_t width, height, fps, score;
+    bool interlaced;
+    if (!GetConfiguration(type, index, &width, &height,
+            &fps, &interlaced)) {
+        ALOGE("Maximum resolution not found!");
+        return;
+    }
+    score = width * height * fps * (!interlaced + 1);
+    for (size_t i = 0; i < kNumResolutionTypes; ++i) {
+        for (size_t j = 0; j < 32; j++) {
+            if (GetConfiguration((ResolutionType)i, j,
+                    &width, &height, &fps, &interlaced)
+                    && score >= width * height * fps * (!interlaced + 1)) {
+                setResolutionEnabled((ResolutionType)i, j);
+                setProfileLevel((ResolutionType)i, j, profile, level);
+            }
+        }
+    }
+}
+
 void VideoFormats::setResolutionEnabled(
         ResolutionType type, size_t index, bool enabled) {
     CHECK_LT(type, kNumResolutionTypes);
@@ -182,11 +208,56 @@
 
     if (enabled) {
         mResolutionEnabled[type] |= (1ul << index);
+        mConfigs[type][index].profile = (1ul << PROFILE_CBP);
+        mConfigs[type][index].level = (1ul << LEVEL_31);
     } else {
         mResolutionEnabled[type] &= ~(1ul << index);
+        mConfigs[type][index].profile = 0;
+        mConfigs[type][index].level = 0;
     }
 }
 
+void VideoFormats::setProfileLevel(
+        ResolutionType type, size_t index,
+        ProfileType profile, LevelType level) {
+    CHECK_LT(type, kNumResolutionTypes);
+    CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
+
+    mConfigs[type][index].profile = (1ul << profile);
+    mConfigs[type][index].level = (1ul << level);
+}
+
+void VideoFormats::getProfileLevel(
+        ResolutionType type, size_t index,
+        ProfileType *profile, LevelType *level) const{
+    CHECK_LT(type, kNumResolutionTypes);
+    CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
+
+    int i, bestProfile = -1, bestLevel = -1;
+
+    for (i = 0; i < kNumProfileTypes; ++i) {
+        if (mConfigs[type][index].profile & (1ul << i)) {
+            bestProfile = i;
+        }
+    }
+
+    for (i = 0; i < kNumLevelTypes; ++i) {
+        if (mConfigs[type][index].level & (1ul << i)) {
+            bestLevel = i;
+        }
+    }
+
+    if (bestProfile == -1 || bestLevel == -1) {
+        ALOGE("Profile or level not set for resolution type %d, index %d",
+              type, index);
+        bestProfile = PROFILE_CBP;
+        bestLevel = LEVEL_31;
+    }
+
+    *profile = (ProfileType) bestProfile;
+    *level = (LevelType) bestLevel;
+}
+
 bool VideoFormats::isResolutionEnabled(
         ResolutionType type, size_t index) const {
     CHECK_LT(type, kNumResolutionTypes);
@@ -207,7 +278,7 @@
         return false;
     }
 
-    const config_t *config = &mConfigs[type][index];
+    const config_t *config = &mResolutionTable[type][index];
 
     if (config->width == 0) {
         return false;
@@ -251,9 +322,12 @@
             if (res[i] & (1ul << j)){
                 mResolutionEnabled[i] |= (1ul << j);
                 if (profile > mConfigs[i][j].profile) {
+                    // prefer higher profile (even if level is lower)
                     mConfigs[i][j].profile = profile;
-                    if (level > mConfigs[i][j].level)
-                        mConfigs[i][j].level = level;
+                    mConfigs[i][j].level = level;
+                } else if (profile == mConfigs[i][j].profile &&
+                           level > mConfigs[i][j].level) {
+                    mConfigs[i][j].level = level;
                 }
             }
         }
@@ -262,9 +336,51 @@
     return true;
 }
 
+// static
+bool VideoFormats::GetProfileLevel(
+        ProfileType profile, LevelType level, unsigned *profileIdc,
+        unsigned *levelIdc, unsigned *constraintSet) {
+    CHECK_LT(profile, kNumProfileTypes);
+    CHECK_LT(level, kNumLevelTypes);
+
+    static const unsigned kProfileIDC[kNumProfileTypes] = {
+        66,     // PROFILE_CBP
+        100,    // PROFILE_CHP
+    };
+
+    static const unsigned kLevelIDC[kNumLevelTypes] = {
+        31,     // LEVEL_31
+        32,     // LEVEL_32
+        40,     // LEVEL_40
+        41,     // LEVEL_41
+        42,     // LEVEL_42
+    };
+
+    static const unsigned kConstraintSet[kNumProfileTypes] = {
+        0xc0,   // PROFILE_CBP
+        0x0c,   // PROFILE_CHP
+    };
+
+    if (profileIdc) {
+        *profileIdc = kProfileIDC[profile];
+    }
+
+    if (levelIdc) {
+        *levelIdc = kLevelIDC[level];
+    }
+
+    if (constraintSet) {
+        *constraintSet = kConstraintSet[profile];
+    }
+
+    return true;
+}
+
 bool VideoFormats::parseFormatSpec(const char *spec) {
     CHECK_EQ(kNumResolutionTypes, 3);
 
+    disableAll();
+
     unsigned native, dummy;
     unsigned res[3];
     size_t size = strlen(spec);
@@ -320,8 +436,10 @@
     //   max-vres (none or 2 byte)
 
     return StringPrintf(
-            "%02x 00 02 02 %08x %08x %08x 00 0000 0000 00 none none",
+            "%02x 00 %02x %02x %08x %08x %08x 00 0000 0000 00 none none",
             forM4Message ? 0x00 : ((mNativeIndex << 3) | mNativeType),
+            mConfigs[mNativeType][mNativeIndex].profile,
+            mConfigs[mNativeType][mNativeIndex].level,
             mResolutionEnabled[0],
             mResolutionEnabled[1],
             mResolutionEnabled[2]);
@@ -332,7 +450,9 @@
         const VideoFormats &sinkSupported,
         const VideoFormats &sourceSupported,
         ResolutionType *chosenType,
-        size_t *chosenIndex) {
+        size_t *chosenIndex,
+        ProfileType *chosenProfile,
+        LevelType *chosenLevel) {
 #if 0
     // Support for the native format is a great idea, the spec includes
     // these features, but nobody supports it and the tests don't validate it.
@@ -412,6 +532,18 @@
     *chosenType = (ResolutionType)bestType;
     *chosenIndex = bestIndex;
 
+    // Pick the best profile/level supported by both sink and source.
+    ProfileType srcProfile, sinkProfile;
+    LevelType srcLevel, sinkLevel;
+    sourceSupported.getProfileLevel(
+                        (ResolutionType)bestType, bestIndex,
+                        &srcProfile, &srcLevel);
+    sinkSupported.getProfileLevel(
+                        (ResolutionType)bestType, bestIndex,
+                        &sinkProfile, &sinkLevel);
+    *chosenProfile = srcProfile < sinkProfile ? srcProfile : sinkProfile;
+    *chosenLevel = srcLevel < sinkLevel ? srcLevel : sinkLevel;
+
     return true;
 }
 
diff --git a/media/libstagefright/wifi-display/VideoFormats.h b/media/libstagefright/wifi-display/VideoFormats.h
index 01de246..fd38fd1 100644
--- a/media/libstagefright/wifi-display/VideoFormats.h
+++ b/media/libstagefright/wifi-display/VideoFormats.h
@@ -69,17 +69,33 @@
 
     void disableAll();
     void enableAll();
+    void enableResolutionUpto(
+            ResolutionType type, size_t index,
+            ProfileType profile, LevelType level);
 
     void setResolutionEnabled(
             ResolutionType type, size_t index, bool enabled = true);
 
     bool isResolutionEnabled(ResolutionType type, size_t index) const;
 
+    void setProfileLevel(
+            ResolutionType type, size_t index,
+            ProfileType profile, LevelType level);
+
+    void getProfileLevel(
+            ResolutionType type, size_t index,
+            ProfileType *profile, LevelType *level) const;
+
     static bool GetConfiguration(
             ResolutionType type, size_t index,
             size_t *width, size_t *height, size_t *framesPerSecond,
             bool *interlaced);
 
+    static bool GetProfileLevel(
+            ProfileType profile, LevelType level,
+            unsigned *profileIdc, unsigned *levelIdc,
+            unsigned *constraintSet);
+
     bool parseFormatSpec(const char *spec);
     AString getFormatSpec(bool forM4Message = false) const;
 
@@ -87,7 +103,9 @@
             const VideoFormats &sinkSupported,
             const VideoFormats &sourceSupported,
             ResolutionType *chosenType,
-            size_t *chosenIndex);
+            size_t *chosenIndex,
+            ProfileType *chosenProfile,
+            LevelType *chosenLevel);
 
 private:
     bool parseH264Codec(const char *spec);
@@ -95,7 +113,8 @@
     size_t mNativeIndex;
 
     uint32_t mResolutionEnabled[kNumResolutionTypes];
-    static config_t mConfigs[kNumResolutionTypes][32];
+    static const config_t mResolutionTable[kNumResolutionTypes][32];
+    config_t mConfigs[kNumResolutionTypes][32];
 
     DISALLOW_EVIL_CONSTRUCTORS(VideoFormats);
 };
diff --git a/media/libstagefright/wifi-display/nettest.cpp b/media/libstagefright/wifi-display/nettest.cpp
new file mode 100644
index 0000000..73c0d80
--- /dev/null
+++ b/media/libstagefright/wifi-display/nettest.cpp
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "nettest"
+#include <utils/Log.h>
+
+#include "TimeSyncer.h"
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+struct TestHandler : public AHandler {
+    TestHandler(const sp<ANetworkSession> &netSession);
+
+    void listen(int32_t port);
+    void connect(const char *host, int32_t port);
+
+protected:
+    virtual ~TestHandler();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kTimeSyncerPort = 8123,
+    };
+
+    enum {
+        kWhatListen,
+        kWhatConnect,
+        kWhatTimeSyncerNotify,
+        kWhatNetNotify,
+        kWhatSendMore,
+        kWhatStop,
+    };
+
+    sp<ANetworkSession> mNetSession;
+    sp<TimeSyncer> mTimeSyncer;
+
+    int32_t mServerSessionID;
+    int32_t mSessionID;
+
+    int64_t mTimeOffsetUs;
+    bool mTimeOffsetValid;
+
+    int32_t mCounter;
+
+    int64_t mMaxDelayMs;
+
+    void dumpDelay(int32_t counter, int64_t delayMs);
+
+    DISALLOW_EVIL_CONSTRUCTORS(TestHandler);
+};
+
+TestHandler::TestHandler(const sp<ANetworkSession> &netSession)
+    : mNetSession(netSession),
+      mServerSessionID(0),
+      mSessionID(0),
+      mTimeOffsetUs(-1ll),
+      mTimeOffsetValid(false),
+      mCounter(0),
+      mMaxDelayMs(-1ll) {
+}
+
+TestHandler::~TestHandler() {
+}
+
+void TestHandler::listen(int32_t port) {
+    sp<AMessage> msg = new AMessage(kWhatListen, id());
+    msg->setInt32("port", port);
+    msg->post();
+}
+
+void TestHandler::connect(const char *host, int32_t port) {
+    sp<AMessage> msg = new AMessage(kWhatConnect, id());
+    msg->setString("host", host);
+    msg->setInt32("port", port);
+    msg->post();
+}
+
+void TestHandler::dumpDelay(int32_t counter, int64_t delayMs) {
+    static const int64_t kMinDelayMs = 0;
+    static const int64_t kMaxDelayMs = 300;
+
+    const char *kPattern = "########################################";
+    size_t kPatternSize = strlen(kPattern);
+
+    int n = (kPatternSize * (delayMs - kMinDelayMs))
+                / (kMaxDelayMs - kMinDelayMs);
+
+    if (n < 0) {
+        n = 0;
+    } else if ((size_t)n > kPatternSize) {
+        n = kPatternSize;
+    }
+
+    if (delayMs > mMaxDelayMs) {
+        mMaxDelayMs = delayMs;
+    }
+
+    ALOGI("[%d] (%4lld ms / %4lld ms) %s",
+          counter,
+          delayMs,
+          mMaxDelayMs,
+          kPattern + kPatternSize - n);
+}
+
+void TestHandler::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatListen:
+        {
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+
+            notify = new AMessage(kWhatNetNotify, id());
+
+            int32_t port;
+            CHECK(msg->findInt32("port", &port));
+
+            struct in_addr ifaceAddr;
+            ifaceAddr.s_addr = INADDR_ANY;
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createTCPDatagramSession(
+                         ifaceAddr,
+                         port,
+                         notify,
+                         &mServerSessionID));
+            break;
+        }
+
+        case kWhatConnect:
+        {
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+            mTimeSyncer->startServer(kTimeSyncerPort);
+
+            AString host;
+            CHECK(msg->findString("host", &host));
+
+            int32_t port;
+            CHECK(msg->findInt32("port", &port));
+
+            notify = new AMessage(kWhatNetNotify, id());
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createTCPDatagramSession(
+                         0 /* localPort */,
+                         host.c_str(),
+                         port,
+                         notify,
+                         &mSessionID));
+            break;
+        }
+
+        case kWhatNetNotify:
+        {
+            int32_t reason;
+            CHECK(msg->findInt32("reason", &reason));
+
+            switch (reason) {
+                case ANetworkSession::kWhatConnected:
+                {
+                    ALOGI("kWhatConnected");
+
+                    (new AMessage(kWhatSendMore, id()))->post();
+                    break;
+                }
+
+                case ANetworkSession::kWhatClientConnected:
+                {
+                    ALOGI("kWhatClientConnected");
+
+                    CHECK_EQ(mSessionID, 0);
+                    CHECK(msg->findInt32("sessionID", &mSessionID));
+
+                    AString clientIP;
+                    CHECK(msg->findString("client-ip", &clientIP));
+
+                    mTimeSyncer->startClient(clientIP.c_str(), kTimeSyncerPort);
+                    break;
+                }
+
+                case ANetworkSession::kWhatDatagram:
+                {
+                    sp<ABuffer> packet;
+                    CHECK(msg->findBuffer("data", &packet));
+
+                    CHECK_EQ(packet->size(), 12u);
+
+                    int32_t counter = U32_AT(packet->data());
+                    int64_t timeUs = U64_AT(packet->data() + 4);
+
+                    if (mTimeOffsetValid) {
+                        timeUs -= mTimeOffsetUs;
+                        int64_t nowUs = ALooper::GetNowUs();
+                        int64_t delayMs = (nowUs - timeUs) / 1000ll;
+
+                        dumpDelay(counter, delayMs);
+                    } else {
+                        ALOGI("received %d", counter);
+                    }
+                    break;
+                }
+
+                case ANetworkSession::kWhatError:
+                {
+                    ALOGE("kWhatError");
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+            break;
+        }
+
+        case kWhatTimeSyncerNotify:
+        {
+            CHECK(msg->findInt64("offset", &mTimeOffsetUs));
+            mTimeOffsetValid = true;
+            break;
+        }
+
+        case kWhatSendMore:
+        {
+            uint8_t buffer[4 + 8];
+            buffer[0] = mCounter >> 24;
+            buffer[1] = (mCounter >> 16) & 0xff;
+            buffer[2] = (mCounter >> 8) & 0xff;
+            buffer[3] = mCounter & 0xff;
+
+            int64_t nowUs = ALooper::GetNowUs();
+
+            buffer[4] = nowUs >> 56;
+            buffer[5] = (nowUs >> 48) & 0xff;
+            buffer[6] = (nowUs >> 40) & 0xff;
+            buffer[7] = (nowUs >> 32) & 0xff;
+            buffer[8] = (nowUs >> 24) & 0xff;
+            buffer[9] = (nowUs >> 16) & 0xff;
+            buffer[10] = (nowUs >> 8) & 0xff;
+            buffer[11] = nowUs & 0xff;
+
+            ++mCounter;
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->sendRequest(
+                         mSessionID,
+                         buffer,
+                         sizeof(buffer),
+                         true /* timeValid */,
+                         nowUs));
+
+            msg->post(100000ll);
+            break;
+        }
+
+        case kWhatStop:
+        {
+            if (mSessionID != 0) {
+                mNetSession->destroySession(mSessionID);
+                mSessionID = 0;
+            }
+
+            if (mServerSessionID != 0) {
+                mNetSession->destroySession(mServerSessionID);
+                mServerSessionID = 0;
+            }
+
+            looper()->stop();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+}  // namespace android
+
+static void usage(const char *me) {
+    fprintf(stderr,
+            "usage: %s -c host:port\tconnect to remote host\n"
+            "               -l port   \tlisten\n",
+            me);
+}
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    // srand(time(NULL));
+
+    ProcessState::self()->startThreadPool();
+
+    DataSource::RegisterDefaultSniffers();
+
+    int32_t connectToPort = -1;
+    AString connectToHost;
+
+    int32_t listenOnPort = -1;
+
+    int res;
+    while ((res = getopt(argc, argv, "hc:l:")) >= 0) {
+        switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    usage(argv[0]);
+                    exit(1);
+                }
+
+                connectToHost.setTo(optarg, colonPos - optarg);
+
+                char *end;
+                connectToPort = strtol(colonPos + 1, &end, 10);
+
+                if (*end != '\0' || end == colonPos + 1
+                        || connectToPort < 0 || connectToPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case 'l':
+            {
+                char *end;
+                listenOnPort = strtol(optarg, &end, 10);
+
+                if (*end != '\0' || end == optarg
+                        || listenOnPort < 0 || listenOnPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case '?':
+            case 'h':
+                usage(argv[0]);
+                exit(1);
+        }
+    }
+
+    if ((listenOnPort < 0 && connectToPort < 0)
+            || (listenOnPort >= 0 && connectToPort >= 0)) {
+        fprintf(stderr,
+                "You need to select either client or server mode.\n");
+        exit(1);
+    }
+
+    sp<ANetworkSession> netSession = new ANetworkSession;
+    netSession->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<TestHandler> handler = new TestHandler(netSession);
+    looper->registerHandler(handler);
+
+    if (listenOnPort) {
+        handler->listen(listenOnPort);
+    }
+
+    if (connectToPort >= 0) {
+        handler->connect(connectToHost.c_str(), connectToPort);
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    return 0;
+}
diff --git a/media/libstagefright/wifi-display/rtp/RTPAssembler.cpp b/media/libstagefright/wifi-display/rtp/RTPAssembler.cpp
new file mode 100644
index 0000000..7a96081
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPAssembler.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RTPAssembler"
+#include <utils/Log.h>
+
+#include "RTPAssembler.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+
+namespace android {
+
+RTPReceiver::Assembler::Assembler(const sp<AMessage> &notify)
+    : mNotify(notify) {
+}
+
+void RTPReceiver::Assembler::postAccessUnit(
+        const sp<ABuffer> &accessUnit, bool followsDiscontinuity) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", RTPReceiver::kWhatAccessUnit);
+    notify->setBuffer("accessUnit", accessUnit);
+    notify->setInt32("followsDiscontinuity", followsDiscontinuity);
+    notify->post();
+}
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::TSAssembler::TSAssembler(const sp<AMessage> &notify)
+    : Assembler(notify),
+      mSawDiscontinuity(false) {
+}
+
+void RTPReceiver::TSAssembler::signalDiscontinuity() {
+    mSawDiscontinuity = true;
+}
+
+status_t RTPReceiver::TSAssembler::processPacket(const sp<ABuffer> &packet) {
+    int32_t rtpTime;
+    CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+
+    packet->meta()->setInt64("timeUs", (rtpTime * 100ll) / 9);
+
+    postAccessUnit(packet, mSawDiscontinuity);
+
+    if (mSawDiscontinuity) {
+        mSawDiscontinuity = false;
+    }
+
+    return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::H264Assembler::H264Assembler(const sp<AMessage> &notify)
+    : Assembler(notify),
+      mState(0),
+      mIndicator(0),
+      mNALType(0),
+      mAccessUnitRTPTime(0) {
+}
+
+void RTPReceiver::H264Assembler::signalDiscontinuity() {
+    reset();
+}
+
+status_t RTPReceiver::H264Assembler::processPacket(const sp<ABuffer> &packet) {
+    status_t err = internalProcessPacket(packet);
+
+    if (err != OK) {
+        reset();
+    }
+
+    return err;
+}
+
+status_t RTPReceiver::H264Assembler::internalProcessPacket(
+        const sp<ABuffer> &packet) {
+    const uint8_t *data = packet->data();
+    size_t size = packet->size();
+
+    switch (mState) {
+        case 0:
+        {
+            if (size < 1 || (data[0] & 0x80)) {
+                ALOGV("Malformed H264 RTP packet (empty or F-bit set)");
+                return ERROR_MALFORMED;
+            }
+
+            unsigned nalType = data[0] & 0x1f;
+            if (nalType >= 1 && nalType <= 23) {
+                addSingleNALUnit(packet);
+                ALOGV("added single NAL packet");
+            } else if (nalType == 28) {
+                // FU-A
+                unsigned indicator = data[0];
+                CHECK((indicator & 0x1f) == 28);
+
+                if (size < 2) {
+                    ALOGV("Malformed H264 FU-A packet (single byte)");
+                    return ERROR_MALFORMED;
+                }
+
+                if (!(data[1] & 0x80)) {
+                    ALOGV("Malformed H264 FU-A packet (no start bit)");
+                    return ERROR_MALFORMED;
+                }
+
+                mIndicator = data[0];
+                mNALType = data[1] & 0x1f;
+                uint32_t nri = (data[0] >> 5) & 3;
+
+                clearAccumulator();
+
+                uint8_t byte = mNALType | (nri << 5);
+                appendToAccumulator(&byte, 1);
+                appendToAccumulator(data + 2, size - 2);
+
+                int32_t rtpTime;
+                CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+                mAccumulator->meta()->setInt32("rtp-time", rtpTime);
+
+                if (data[1] & 0x40) {
+                    // Huh? End bit also set on the first buffer.
+                    addSingleNALUnit(mAccumulator);
+                    clearAccumulator();
+
+                    ALOGV("added FU-A");
+                    break;
+                }
+
+                mState = 1;
+            } else if (nalType == 24) {
+                // STAP-A
+
+                status_t err = addSingleTimeAggregationPacket(packet);
+                if (err != OK) {
+                    return err;
+                }
+            } else {
+                ALOGV("Malformed H264 packet (unknown type %d)", nalType);
+                return ERROR_UNSUPPORTED;
+            }
+            break;
+        }
+
+        case 1:
+        {
+            if (size < 2
+                    || data[0] != mIndicator
+                    || (data[1] & 0x1f) != mNALType
+                    || (data[1] & 0x80)) {
+                ALOGV("Malformed H264 FU-A packet (indicator, "
+                      "type or start bit mismatch)");
+
+                return ERROR_MALFORMED;
+            }
+
+            appendToAccumulator(data + 2, size - 2);
+
+            if (data[1] & 0x40) {
+                addSingleNALUnit(mAccumulator);
+
+                clearAccumulator();
+                mState = 0;
+
+                ALOGV("added FU-A");
+            }
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+
+    int32_t marker;
+    CHECK(packet->meta()->findInt32("M", &marker));
+
+    if (marker) {
+        flushAccessUnit();
+    }
+
+    return OK;
+}
+
+void RTPReceiver::H264Assembler::reset() {
+    mNALUnits.clear();
+
+    clearAccumulator();
+    mState = 0;
+}
+
+void RTPReceiver::H264Assembler::clearAccumulator() {
+    if (mAccumulator != NULL) {
+        // XXX Too expensive.
+        mAccumulator.clear();
+    }
+}
+
+void RTPReceiver::H264Assembler::appendToAccumulator(
+        const void *data, size_t size) {
+    if (mAccumulator == NULL) {
+        mAccumulator = new ABuffer(size);
+        memcpy(mAccumulator->data(), data, size);
+        return;
+    }
+
+    if (mAccumulator->size() + size > mAccumulator->capacity()) {
+        sp<ABuffer> buf = new ABuffer(mAccumulator->size() + size);
+        memcpy(buf->data(), mAccumulator->data(), mAccumulator->size());
+        buf->setRange(0, mAccumulator->size());
+
+        int32_t rtpTime;
+        if (mAccumulator->meta()->findInt32("rtp-time", &rtpTime)) {
+            buf->meta()->setInt32("rtp-time", rtpTime);
+        }
+
+        mAccumulator = buf;
+    }
+
+    memcpy(mAccumulator->data() + mAccumulator->size(), data, size);
+    mAccumulator->setRange(0, mAccumulator->size() + size);
+}
+
+void RTPReceiver::H264Assembler::addSingleNALUnit(const sp<ABuffer> &packet) {
+    if (mNALUnits.empty()) {
+        int32_t rtpTime;
+        CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+
+        mAccessUnitRTPTime = rtpTime;
+    }
+
+    mNALUnits.push_back(packet);
+}
+
+void RTPReceiver::H264Assembler::flushAccessUnit() {
+    if (mNALUnits.empty()) {
+        return;
+    }
+
+    size_t totalSize = 0;
+    for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
+            it != mNALUnits.end(); ++it) {
+        totalSize += 4 + (*it)->size();
+    }
+
+    sp<ABuffer> accessUnit = new ABuffer(totalSize);
+    size_t offset = 0;
+    for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
+            it != mNALUnits.end(); ++it) {
+        const sp<ABuffer> nalUnit = *it;
+
+        memcpy(accessUnit->data() + offset, "\x00\x00\x00\x01", 4);
+
+        memcpy(accessUnit->data() + offset + 4,
+               nalUnit->data(),
+               nalUnit->size());
+
+        offset += 4 + nalUnit->size();
+    }
+
+    mNALUnits.clear();
+
+    accessUnit->meta()->setInt64("timeUs", mAccessUnitRTPTime * 100ll / 9ll);
+    postAccessUnit(accessUnit, false /* followsDiscontinuity */);
+}
+
+status_t RTPReceiver::H264Assembler::addSingleTimeAggregationPacket(
+        const sp<ABuffer> &packet) {
+    const uint8_t *data = packet->data();
+    size_t size = packet->size();
+
+    if (size < 3) {
+        ALOGV("Malformed H264 STAP-A packet (too small)");
+        return ERROR_MALFORMED;
+    }
+
+    int32_t rtpTime;
+    CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+
+    ++data;
+    --size;
+    while (size >= 2) {
+        size_t nalSize = (data[0] << 8) | data[1];
+
+        if (size < nalSize + 2) {
+            ALOGV("Malformed H264 STAP-A packet (incomplete NAL unit)");
+            return ERROR_MALFORMED;
+        }
+
+        sp<ABuffer> unit = new ABuffer(nalSize);
+        memcpy(unit->data(), &data[2], nalSize);
+
+        unit->meta()->setInt32("rtp-time", rtpTime);
+
+        addSingleNALUnit(unit);
+
+        data += 2 + nalSize;
+        size -= 2 + nalSize;
+    }
+
+    if (size != 0) {
+        ALOGV("Unexpected padding at end of STAP-A packet.");
+    }
+
+    ALOGV("added STAP-A");
+
+    return OK;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/rtp/RTPAssembler.h b/media/libstagefright/wifi-display/rtp/RTPAssembler.h
new file mode 100644
index 0000000..e456d32
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPAssembler.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RTP_ASSEMBLER_H_
+
+#define RTP_ASSEMBLER_H_
+
+#include "RTPReceiver.h"
+
+namespace android {
+
+// A helper class to reassemble the payload of RTP packets into access
+// units depending on the packetization scheme.
+struct RTPReceiver::Assembler : public RefBase {
+    Assembler(const sp<AMessage> &notify);
+
+    virtual void signalDiscontinuity() = 0;
+    virtual status_t processPacket(const sp<ABuffer> &packet) = 0;
+
+protected:
+    virtual ~Assembler() {}
+
+    void postAccessUnit(
+            const sp<ABuffer> &accessUnit, bool followsDiscontinuity);
+
+private:
+    sp<AMessage> mNotify;
+
+    DISALLOW_EVIL_CONSTRUCTORS(Assembler);
+};
+
+struct RTPReceiver::TSAssembler : public RTPReceiver::Assembler {
+    TSAssembler(const sp<AMessage> &notify);
+
+    virtual void signalDiscontinuity();
+    virtual status_t processPacket(const sp<ABuffer> &packet);
+
+private:
+    bool mSawDiscontinuity;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TSAssembler);
+};
+
+struct RTPReceiver::H264Assembler : public RTPReceiver::Assembler {
+    H264Assembler(const sp<AMessage> &notify);
+
+    virtual void signalDiscontinuity();
+    virtual status_t processPacket(const sp<ABuffer> &packet);
+
+private:
+    int32_t mState;
+
+    uint8_t mIndicator;
+    uint8_t mNALType;
+
+    sp<ABuffer> mAccumulator;
+
+    List<sp<ABuffer> > mNALUnits;
+    int32_t mAccessUnitRTPTime;
+
+    status_t internalProcessPacket(const sp<ABuffer> &packet);
+
+    void addSingleNALUnit(const sp<ABuffer> &packet);
+    status_t addSingleTimeAggregationPacket(const sp<ABuffer> &packet);
+
+    void flushAccessUnit();
+
+    void clearAccumulator();
+    void appendToAccumulator(const void *data, size_t size);
+
+    void reset();
+
+    DISALLOW_EVIL_CONSTRUCTORS(H264Assembler);
+};
+
+}  // namespace android
+
+#endif  // RTP_ASSEMBLER_H_
+
diff --git a/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp b/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp
new file mode 100644
index 0000000..3b3bd63
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp
@@ -0,0 +1,1152 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RTPReceiver"
+#include <utils/Log.h>
+
+#include "RTPAssembler.h"
+#include "RTPReceiver.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+
+#define TRACK_PACKET_LOSS       0
+
+namespace android {
+
+////////////////////////////////////////////////////////////////////////////////
+
+struct RTPReceiver::Source : public AHandler {
+    Source(RTPReceiver *receiver, uint32_t ssrc);
+
+    void onPacketReceived(uint16_t seq, const sp<ABuffer> &buffer);
+
+    void addReportBlock(uint32_t ssrc, const sp<ABuffer> &buf);
+
+protected:
+    virtual ~Source();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatRetransmit,
+        kWhatDeclareLost,
+    };
+
+    static const uint32_t kMinSequential = 2;
+    static const uint32_t kMaxDropout = 3000;
+    static const uint32_t kMaxMisorder = 100;
+    static const uint32_t kRTPSeqMod = 1u << 16;
+    static const int64_t kReportIntervalUs = 10000000ll;
+
+    RTPReceiver *mReceiver;
+    uint32_t mSSRC;
+    bool mFirst;
+    uint16_t mMaxSeq;
+    uint32_t mCycles;
+    uint32_t mBaseSeq;
+    uint32_t mReceived;
+    uint32_t mExpectedPrior;
+    uint32_t mReceivedPrior;
+
+    int64_t mFirstArrivalTimeUs;
+    int64_t mFirstRTPTimeUs;
+
+    // Ordered by extended seq number.
+    List<sp<ABuffer> > mPackets;
+
+    enum StatusBits {
+        STATUS_DECLARED_LOST            = 1,
+        STATUS_REQUESTED_RETRANSMISSION = 2,
+        STATUS_ARRIVED_LATE             = 4,
+    };
+#if TRACK_PACKET_LOSS
+    KeyedVector<int32_t, uint32_t> mLostPackets;
+#endif
+
+    void modifyPacketStatus(int32_t extSeqNo, uint32_t mask);
+
+    int32_t mAwaitingExtSeqNo;
+    bool mRequestedRetransmission;
+
+    int32_t mActivePacketType;
+    sp<Assembler> mActiveAssembler;
+
+    int64_t mNextReportTimeUs;
+
+    int32_t mNumDeclaredLost;
+    int32_t mNumDeclaredLostPrior;
+
+    int32_t mRetransmitGeneration;
+    int32_t mDeclareLostGeneration;
+    bool mDeclareLostTimerPending;
+
+    void queuePacket(const sp<ABuffer> &packet);
+    void dequeueMore();
+
+    sp<ABuffer> getNextPacket();
+    void resync();
+
+    void postRetransmitTimer(int64_t delayUs);
+    void postDeclareLostTimer(int64_t delayUs);
+    void cancelTimers();
+
+    DISALLOW_EVIL_CONSTRUCTORS(Source);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::Source::Source(RTPReceiver *receiver, uint32_t ssrc)
+    : mReceiver(receiver),
+      mSSRC(ssrc),
+      mFirst(true),
+      mMaxSeq(0),
+      mCycles(0),
+      mBaseSeq(0),
+      mReceived(0),
+      mExpectedPrior(0),
+      mReceivedPrior(0),
+      mFirstArrivalTimeUs(-1ll),
+      mFirstRTPTimeUs(-1ll),
+      mAwaitingExtSeqNo(-1),
+      mRequestedRetransmission(false),
+      mActivePacketType(-1),
+      mNextReportTimeUs(-1ll),
+      mNumDeclaredLost(0),
+      mNumDeclaredLostPrior(0),
+      mRetransmitGeneration(0),
+      mDeclareLostGeneration(0),
+      mDeclareLostTimerPending(false) {
+}
+
+RTPReceiver::Source::~Source() {
+}
+
+void RTPReceiver::Source::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatRetransmit:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mRetransmitGeneration) {
+                break;
+            }
+
+            mRequestedRetransmission = true;
+            mReceiver->requestRetransmission(mSSRC, mAwaitingExtSeqNo);
+
+            modifyPacketStatus(
+                    mAwaitingExtSeqNo, STATUS_REQUESTED_RETRANSMISSION);
+            break;
+        }
+
+        case kWhatDeclareLost:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mDeclareLostGeneration) {
+                break;
+            }
+
+            cancelTimers();
+
+            ALOGV("Lost packet extSeqNo %d %s",
+                  mAwaitingExtSeqNo,
+                  mRequestedRetransmission ? "*" : "");
+
+            mRequestedRetransmission = false;
+            if (mActiveAssembler != NULL) {
+                mActiveAssembler->signalDiscontinuity();
+            }
+
+            modifyPacketStatus(mAwaitingExtSeqNo, STATUS_DECLARED_LOST);
+
+            // resync();
+            ++mAwaitingExtSeqNo;
+            ++mNumDeclaredLost;
+
+            mReceiver->notifyPacketLost();
+
+            dequeueMore();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void RTPReceiver::Source::onPacketReceived(
+        uint16_t seq, const sp<ABuffer> &buffer) {
+    if (mFirst) {
+        buffer->setInt32Data(mCycles | seq);
+        queuePacket(buffer);
+
+        mFirst = false;
+        mBaseSeq = seq;
+        mMaxSeq = seq;
+        ++mReceived;
+        return;
+    }
+
+    uint16_t udelta = seq - mMaxSeq;
+
+    if (udelta < kMaxDropout) {
+        // In order, with permissible gap.
+
+        if (seq < mMaxSeq) {
+            // Sequence number wrapped - count another 64K cycle
+            mCycles += kRTPSeqMod;
+        }
+
+        mMaxSeq = seq;
+
+        ++mReceived;
+    } else if (udelta <= kRTPSeqMod - kMaxMisorder) {
+        // The sequence number made a very large jump
+        return;
+    } else {
+        // Duplicate or reordered packet.
+    }
+
+    buffer->setInt32Data(mCycles | seq);
+    queuePacket(buffer);
+}
+
+void RTPReceiver::Source::queuePacket(const sp<ABuffer> &packet) {
+    int32_t newExtendedSeqNo = packet->int32Data();
+
+    if (mFirstArrivalTimeUs < 0ll) {
+        mFirstArrivalTimeUs = ALooper::GetNowUs();
+
+        uint32_t rtpTime;
+        CHECK(packet->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+
+        mFirstRTPTimeUs = (rtpTime * 100ll) / 9ll;
+    }
+
+    if (mAwaitingExtSeqNo >= 0 && newExtendedSeqNo < mAwaitingExtSeqNo) {
+        // We're no longer interested in these. They're old.
+        ALOGV("dropping stale extSeqNo %d", newExtendedSeqNo);
+
+        modifyPacketStatus(newExtendedSeqNo, STATUS_ARRIVED_LATE);
+        return;
+    }
+
+    if (mPackets.empty()) {
+        mPackets.push_back(packet);
+        dequeueMore();
+        return;
+    }
+
+    List<sp<ABuffer> >::iterator firstIt = mPackets.begin();
+    List<sp<ABuffer> >::iterator it = --mPackets.end();
+    for (;;) {
+        int32_t extendedSeqNo = (*it)->int32Data();
+
+        if (extendedSeqNo == newExtendedSeqNo) {
+            // Duplicate packet.
+            return;
+        }
+
+        if (extendedSeqNo < newExtendedSeqNo) {
+            // Insert new packet after the one at "it".
+            mPackets.insert(++it, packet);
+            break;
+        }
+
+        if (it == firstIt) {
+            // Insert new packet before the first existing one.
+            mPackets.insert(it, packet);
+            break;
+        }
+
+        --it;
+    }
+
+    dequeueMore();
+}
+
+void RTPReceiver::Source::dequeueMore() {
+    int64_t nowUs = ALooper::GetNowUs();
+    if (mNextReportTimeUs < 0ll || nowUs >= mNextReportTimeUs) {
+        if (mNextReportTimeUs >= 0ll) {
+            uint32_t expected = (mMaxSeq | mCycles) - mBaseSeq + 1;
+
+            uint32_t expectedInterval = expected - mExpectedPrior;
+            mExpectedPrior = expected;
+
+            uint32_t receivedInterval = mReceived - mReceivedPrior;
+            mReceivedPrior = mReceived;
+
+            int64_t lostInterval =
+                (int64_t)expectedInterval - (int64_t)receivedInterval;
+
+            int32_t declaredLostInterval =
+                mNumDeclaredLost - mNumDeclaredLostPrior;
+
+            mNumDeclaredLostPrior = mNumDeclaredLost;
+
+            if (declaredLostInterval > 0) {
+                ALOGI("lost %lld packets (%.2f %%), declared %d lost\n",
+                      lostInterval,
+                      100.0f * lostInterval / expectedInterval,
+                      declaredLostInterval);
+            }
+        }
+
+        mNextReportTimeUs = nowUs + kReportIntervalUs;
+
+#if TRACK_PACKET_LOSS
+        for (size_t i = 0; i < mLostPackets.size(); ++i) {
+            int32_t key = mLostPackets.keyAt(i);
+            uint32_t value = mLostPackets.valueAt(i);
+
+            AString status;
+            if (value & STATUS_REQUESTED_RETRANSMISSION) {
+                status.append("retrans ");
+            }
+            if (value & STATUS_ARRIVED_LATE) {
+                status.append("arrived-late ");
+            }
+            ALOGI("Packet %d declared lost %s", key, status.c_str());
+        }
+#endif
+    }
+
+    sp<ABuffer> packet;
+    while ((packet = getNextPacket()) != NULL) {
+        if (mDeclareLostTimerPending) {
+            cancelTimers();
+        }
+
+        CHECK_GE(mAwaitingExtSeqNo, 0);
+#if TRACK_PACKET_LOSS
+        mLostPackets.removeItem(mAwaitingExtSeqNo);
+#endif
+
+        int32_t packetType;
+        CHECK(packet->meta()->findInt32("PT", &packetType));
+
+        if (packetType != mActivePacketType) {
+            mActiveAssembler = mReceiver->makeAssembler(packetType);
+            mActivePacketType = packetType;
+        }
+
+        if (mActiveAssembler != NULL) {
+            status_t err = mActiveAssembler->processPacket(packet);
+            if (err != OK) {
+                ALOGV("assembler returned error %d", err);
+            }
+        }
+
+        ++mAwaitingExtSeqNo;
+    }
+
+    if (mDeclareLostTimerPending) {
+        return;
+    }
+
+    if (mPackets.empty()) {
+        return;
+    }
+
+    CHECK_GE(mAwaitingExtSeqNo, 0);
+
+    const sp<ABuffer> &firstPacket = *mPackets.begin();
+
+    uint32_t rtpTime;
+    CHECK(firstPacket->meta()->findInt32(
+                "rtp-time", (int32_t *)&rtpTime));
+
+
+    int64_t rtpUs = (rtpTime * 100ll) / 9ll;
+
+    int64_t maxArrivalTimeUs =
+        mFirstArrivalTimeUs + rtpUs - mFirstRTPTimeUs;
+
+    nowUs = ALooper::GetNowUs();
+
+    CHECK_LT(mAwaitingExtSeqNo, firstPacket->int32Data());
+
+    ALOGV("waiting for %d, comparing against %d, %lld us left",
+          mAwaitingExtSeqNo,
+          firstPacket->int32Data(),
+          maxArrivalTimeUs - nowUs);
+
+    postDeclareLostTimer(maxArrivalTimeUs + kPacketLostAfterUs);
+
+    if (kRequestRetransmissionAfterUs > 0ll) {
+        postRetransmitTimer(
+                maxArrivalTimeUs + kRequestRetransmissionAfterUs);
+    }
+}
+
+sp<ABuffer> RTPReceiver::Source::getNextPacket() {
+    if (mPackets.empty()) {
+        return NULL;
+    }
+
+    int32_t extSeqNo = (*mPackets.begin())->int32Data();
+
+    if (mAwaitingExtSeqNo < 0) {
+        mAwaitingExtSeqNo = extSeqNo;
+    } else if (extSeqNo != mAwaitingExtSeqNo) {
+        return NULL;
+    }
+
+    sp<ABuffer> packet = *mPackets.begin();
+    mPackets.erase(mPackets.begin());
+
+    return packet;
+}
+
+void RTPReceiver::Source::resync() {
+    mAwaitingExtSeqNo = -1;
+}
+
+void RTPReceiver::Source::addReportBlock(
+        uint32_t ssrc, const sp<ABuffer> &buf) {
+    uint32_t extMaxSeq = mMaxSeq | mCycles;
+    uint32_t expected = extMaxSeq - mBaseSeq + 1;
+
+    int64_t lost = (int64_t)expected - (int64_t)mReceived;
+    if (lost > 0x7fffff) {
+        lost = 0x7fffff;
+    } else if (lost < -0x800000) {
+        lost = -0x800000;
+    }
+
+    uint32_t expectedInterval = expected - mExpectedPrior;
+    mExpectedPrior = expected;
+
+    uint32_t receivedInterval = mReceived - mReceivedPrior;
+    mReceivedPrior = mReceived;
+
+    int64_t lostInterval = expectedInterval - receivedInterval;
+
+    uint8_t fractionLost;
+    if (expectedInterval == 0 || lostInterval <=0) {
+        fractionLost = 0;
+    } else {
+        fractionLost = (lostInterval << 8) / expectedInterval;
+    }
+
+    uint8_t *ptr = buf->data() + buf->size();
+
+    ptr[0] = ssrc >> 24;
+    ptr[1] = (ssrc >> 16) & 0xff;
+    ptr[2] = (ssrc >> 8) & 0xff;
+    ptr[3] = ssrc & 0xff;
+
+    ptr[4] = fractionLost;
+
+    ptr[5] = (lost >> 16) & 0xff;
+    ptr[6] = (lost >> 8) & 0xff;
+    ptr[7] = lost & 0xff;
+
+    ptr[8] = extMaxSeq >> 24;
+    ptr[9] = (extMaxSeq >> 16) & 0xff;
+    ptr[10] = (extMaxSeq >> 8) & 0xff;
+    ptr[11] = extMaxSeq & 0xff;
+
+    // XXX TODO:
+
+    ptr[12] = 0x00;  // interarrival jitter
+    ptr[13] = 0x00;
+    ptr[14] = 0x00;
+    ptr[15] = 0x00;
+
+    ptr[16] = 0x00;  // last SR
+    ptr[17] = 0x00;
+    ptr[18] = 0x00;
+    ptr[19] = 0x00;
+
+    ptr[20] = 0x00;  // delay since last SR
+    ptr[21] = 0x00;
+    ptr[22] = 0x00;
+    ptr[23] = 0x00;
+
+    buf->setRange(buf->offset(), buf->size() + 24);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::RTPReceiver(
+        const sp<ANetworkSession> &netSession,
+        const sp<AMessage> &notify,
+        uint32_t flags)
+    : mNetSession(netSession),
+      mNotify(notify),
+      mFlags(flags),
+      mRTPMode(TRANSPORT_UNDEFINED),
+      mRTCPMode(TRANSPORT_UNDEFINED),
+      mRTPSessionID(0),
+      mRTCPSessionID(0),
+      mRTPConnected(false),
+      mRTCPConnected(false),
+      mRTPClientSessionID(0),
+      mRTCPClientSessionID(0) {
+}
+
+RTPReceiver::~RTPReceiver() {
+    if (mRTCPClientSessionID != 0) {
+        mNetSession->destroySession(mRTCPClientSessionID);
+        mRTCPClientSessionID = 0;
+    }
+
+    if (mRTPClientSessionID != 0) {
+        mNetSession->destroySession(mRTPClientSessionID);
+        mRTPClientSessionID = 0;
+    }
+
+    if (mRTCPSessionID != 0) {
+        mNetSession->destroySession(mRTCPSessionID);
+        mRTCPSessionID = 0;
+    }
+
+    if (mRTPSessionID != 0) {
+        mNetSession->destroySession(mRTPSessionID);
+        mRTPSessionID = 0;
+    }
+}
+
+status_t RTPReceiver::initAsync(
+        TransportMode rtpMode,
+        TransportMode rtcpMode,
+        int32_t *outLocalRTPPort) {
+    if (mRTPMode != TRANSPORT_UNDEFINED
+            || rtpMode == TRANSPORT_UNDEFINED
+            || rtpMode == TRANSPORT_NONE
+            || rtcpMode == TRANSPORT_UNDEFINED) {
+        return INVALID_OPERATION;
+    }
+
+    CHECK_NE(rtpMode, TRANSPORT_TCP_INTERLEAVED);
+    CHECK_NE(rtcpMode, TRANSPORT_TCP_INTERLEAVED);
+
+    sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id());
+
+    sp<AMessage> rtcpNotify;
+    if (rtcpMode != TRANSPORT_NONE) {
+        rtcpNotify = new AMessage(kWhatRTCPNotify, id());
+    }
+
+    CHECK_EQ(mRTPSessionID, 0);
+    CHECK_EQ(mRTCPSessionID, 0);
+
+    int32_t localRTPPort;
+
+    struct in_addr ifaceAddr;
+    ifaceAddr.s_addr = INADDR_ANY;
+
+    for (;;) {
+        localRTPPort = PickRandomRTPPort();
+
+        status_t err;
+        if (rtpMode == TRANSPORT_UDP) {
+            err = mNetSession->createUDPSession(
+                    localRTPPort,
+                    rtpNotify,
+                    &mRTPSessionID);
+        } else {
+            CHECK_EQ(rtpMode, TRANSPORT_TCP);
+            err = mNetSession->createTCPDatagramSession(
+                    ifaceAddr,
+                    localRTPPort,
+                    rtpNotify,
+                    &mRTPSessionID);
+        }
+
+        if (err != OK) {
+            continue;
+        }
+
+        if (rtcpMode == TRANSPORT_NONE) {
+            break;
+        } else if (rtcpMode == TRANSPORT_UDP) {
+            err = mNetSession->createUDPSession(
+                    localRTPPort + 1,
+                    rtcpNotify,
+                    &mRTCPSessionID);
+        } else {
+            CHECK_EQ(rtpMode, TRANSPORT_TCP);
+            err = mNetSession->createTCPDatagramSession(
+                    ifaceAddr,
+                    localRTPPort + 1,
+                    rtcpNotify,
+                    &mRTCPSessionID);
+        }
+
+        if (err == OK) {
+            break;
+        }
+
+        mNetSession->destroySession(mRTPSessionID);
+        mRTPSessionID = 0;
+    }
+
+    mRTPMode = rtpMode;
+    mRTCPMode = rtcpMode;
+    *outLocalRTPPort = localRTPPort;
+
+    return OK;
+}
+
+status_t RTPReceiver::connect(
+        const char *remoteHost, int32_t remoteRTPPort, int32_t remoteRTCPPort) {
+    status_t err;
+
+    if (mRTPMode == TRANSPORT_UDP) {
+        CHECK(!mRTPConnected);
+
+        err = mNetSession->connectUDPSession(
+                mRTPSessionID, remoteHost, remoteRTPPort);
+
+        if (err != OK) {
+            notifyInitDone(err);
+            return err;
+        }
+
+        ALOGI("connectUDPSession RTP successful.");
+
+        mRTPConnected = true;
+    }
+
+    if (mRTCPMode == TRANSPORT_UDP) {
+        CHECK(!mRTCPConnected);
+
+        err = mNetSession->connectUDPSession(
+                mRTCPSessionID, remoteHost, remoteRTCPPort);
+
+        if (err != OK) {
+            notifyInitDone(err);
+            return err;
+        }
+
+        scheduleSendRR();
+
+        ALOGI("connectUDPSession RTCP successful.");
+
+        mRTCPConnected = true;
+    }
+
+    if (mRTPConnected
+            && (mRTCPConnected || mRTCPMode == TRANSPORT_NONE)) {
+        notifyInitDone(OK);
+    }
+
+    return OK;
+}
+
+status_t RTPReceiver::informSender(const sp<AMessage> &params) {
+    if (!mRTCPConnected) {
+        return INVALID_OPERATION;
+    }
+
+    int64_t avgLatencyUs;
+    CHECK(params->findInt64("avgLatencyUs", &avgLatencyUs));
+
+    int64_t maxLatencyUs;
+    CHECK(params->findInt64("maxLatencyUs", &maxLatencyUs));
+
+    sp<ABuffer> buf = new ABuffer(28);
+
+    uint8_t *ptr = buf->data();
+    ptr[0] = 0x80 | 0;
+    ptr[1] = 204;  // APP
+    ptr[2] = 0;
+
+    CHECK((buf->size() % 4) == 0u);
+    ptr[3] = (buf->size() / 4) - 1;
+
+    ptr[4] = kSourceID >> 24;  // SSRC
+    ptr[5] = (kSourceID >> 16) & 0xff;
+    ptr[6] = (kSourceID >> 8) & 0xff;
+    ptr[7] = kSourceID & 0xff;
+    ptr[8] = 'l';
+    ptr[9] = 'a';
+    ptr[10] = 't';
+    ptr[11] = 'e';
+
+    ptr[12] = avgLatencyUs >> 56;
+    ptr[13] = (avgLatencyUs >> 48) & 0xff;
+    ptr[14] = (avgLatencyUs >> 40) & 0xff;
+    ptr[15] = (avgLatencyUs >> 32) & 0xff;
+    ptr[16] = (avgLatencyUs >> 24) & 0xff;
+    ptr[17] = (avgLatencyUs >> 16) & 0xff;
+    ptr[18] = (avgLatencyUs >> 8) & 0xff;
+    ptr[19] = avgLatencyUs & 0xff;
+
+    ptr[20] = maxLatencyUs >> 56;
+    ptr[21] = (maxLatencyUs >> 48) & 0xff;
+    ptr[22] = (maxLatencyUs >> 40) & 0xff;
+    ptr[23] = (maxLatencyUs >> 32) & 0xff;
+    ptr[24] = (maxLatencyUs >> 24) & 0xff;
+    ptr[25] = (maxLatencyUs >> 16) & 0xff;
+    ptr[26] = (maxLatencyUs >> 8) & 0xff;
+    ptr[27] = maxLatencyUs & 0xff;
+
+    mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+
+    return OK;
+}
+
+void RTPReceiver::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatRTPNotify:
+        case kWhatRTCPNotify:
+            onNetNotify(msg->what() == kWhatRTPNotify, msg);
+            break;
+
+        case kWhatSendRR:
+        {
+            onSendRR();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void RTPReceiver::onNetNotify(bool isRTP, const sp<AMessage> &msg) {
+    int32_t reason;
+    CHECK(msg->findInt32("reason", &reason));
+
+    switch (reason) {
+        case ANetworkSession::kWhatError:
+        {
+            int32_t sessionID;
+            CHECK(msg->findInt32("sessionID", &sessionID));
+
+            int32_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            int32_t errorOccuredDuringSend;
+            CHECK(msg->findInt32("send", &errorOccuredDuringSend));
+
+            AString detail;
+            CHECK(msg->findString("detail", &detail));
+
+            ALOGE("An error occurred during %s in session %d "
+                  "(%d, '%s' (%s)).",
+                  errorOccuredDuringSend ? "send" : "receive",
+                  sessionID,
+                  err,
+                  detail.c_str(),
+                  strerror(-err));
+
+            mNetSession->destroySession(sessionID);
+
+            if (sessionID == mRTPSessionID) {
+                mRTPSessionID = 0;
+            } else if (sessionID == mRTCPSessionID) {
+                mRTCPSessionID = 0;
+            } else if (sessionID == mRTPClientSessionID) {
+                mRTPClientSessionID = 0;
+            } else if (sessionID == mRTCPClientSessionID) {
+                mRTCPClientSessionID = 0;
+            }
+
+            if (!mRTPConnected
+                    || (mRTCPMode != TRANSPORT_NONE && !mRTCPConnected)) {
+                notifyInitDone(err);
+                break;
+            }
+
+            notifyError(err);
+            break;
+        }
+
+        case ANetworkSession::kWhatDatagram:
+        {
+            sp<ABuffer> data;
+            CHECK(msg->findBuffer("data", &data));
+
+            if (isRTP) {
+                if (mFlags & FLAG_AUTO_CONNECT) {
+                    AString fromAddr;
+                    CHECK(msg->findString("fromAddr", &fromAddr));
+
+                    int32_t fromPort;
+                    CHECK(msg->findInt32("fromPort", &fromPort));
+
+                    CHECK_EQ((status_t)OK,
+                             connect(
+                                 fromAddr.c_str(), fromPort, fromPort + 1));
+
+                    mFlags &= ~FLAG_AUTO_CONNECT;
+                }
+
+                onRTPData(data);
+            } else {
+                onRTCPData(data);
+            }
+            break;
+        }
+
+        case ANetworkSession::kWhatClientConnected:
+        {
+            int32_t sessionID;
+            CHECK(msg->findInt32("sessionID", &sessionID));
+
+            if (isRTP) {
+                CHECK_EQ(mRTPMode, TRANSPORT_TCP);
+
+                if (mRTPClientSessionID != 0) {
+                    // We only allow a single client connection.
+                    mNetSession->destroySession(sessionID);
+                    sessionID = 0;
+                    break;
+                }
+
+                mRTPClientSessionID = sessionID;
+                mRTPConnected = true;
+            } else {
+                CHECK_EQ(mRTCPMode, TRANSPORT_TCP);
+
+                if (mRTCPClientSessionID != 0) {
+                    // We only allow a single client connection.
+                    mNetSession->destroySession(sessionID);
+                    sessionID = 0;
+                    break;
+                }
+
+                mRTCPClientSessionID = sessionID;
+                mRTCPConnected = true;
+            }
+
+            if (mRTPConnected
+                    && (mRTCPConnected || mRTCPMode == TRANSPORT_NONE)) {
+                notifyInitDone(OK);
+            }
+            break;
+        }
+    }
+}
+
+void RTPReceiver::notifyInitDone(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatInitDone);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void RTPReceiver::notifyError(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void RTPReceiver::notifyPacketLost() {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatPacketLost);
+    notify->post();
+}
+
+status_t RTPReceiver::onRTPData(const sp<ABuffer> &buffer) {
+    size_t size = buffer->size();
+    if (size < 12) {
+        // Too short to be a valid RTP header.
+        return ERROR_MALFORMED;
+    }
+
+    const uint8_t *data = buffer->data();
+
+    if ((data[0] >> 6) != 2) {
+        // Unsupported version.
+        return ERROR_UNSUPPORTED;
+    }
+
+    if (data[0] & 0x20) {
+        // Padding present.
+
+        size_t paddingLength = data[size - 1];
+
+        if (paddingLength + 12 > size) {
+            // If we removed this much padding we'd end up with something
+            // that's too short to be a valid RTP header.
+            return ERROR_MALFORMED;
+        }
+
+        size -= paddingLength;
+    }
+
+    int numCSRCs = data[0] & 0x0f;
+
+    size_t payloadOffset = 12 + 4 * numCSRCs;
+
+    if (size < payloadOffset) {
+        // Not enough data to fit the basic header and all the CSRC entries.
+        return ERROR_MALFORMED;
+    }
+
+    if (data[0] & 0x10) {
+        // Header eXtension present.
+
+        if (size < payloadOffset + 4) {
+            // Not enough data to fit the basic header, all CSRC entries
+            // and the first 4 bytes of the extension header.
+
+            return ERROR_MALFORMED;
+        }
+
+        const uint8_t *extensionData = &data[payloadOffset];
+
+        size_t extensionLength =
+            4 * (extensionData[2] << 8 | extensionData[3]);
+
+        if (size < payloadOffset + 4 + extensionLength) {
+            return ERROR_MALFORMED;
+        }
+
+        payloadOffset += 4 + extensionLength;
+    }
+
+    uint32_t srcId = U32_AT(&data[8]);
+    uint32_t rtpTime = U32_AT(&data[4]);
+    uint16_t seqNo = U16_AT(&data[2]);
+
+    sp<AMessage> meta = buffer->meta();
+    meta->setInt32("ssrc", srcId);
+    meta->setInt32("rtp-time", rtpTime);
+    meta->setInt32("PT", data[1] & 0x7f);
+    meta->setInt32("M", data[1] >> 7);
+
+    buffer->setRange(payloadOffset, size - payloadOffset);
+
+    ssize_t index = mSources.indexOfKey(srcId);
+    sp<Source> source;
+    if (index < 0) {
+        source = new Source(this, srcId);
+        looper()->registerHandler(source);
+
+        mSources.add(srcId, source);
+    } else {
+        source = mSources.valueAt(index);
+    }
+
+    source->onPacketReceived(seqNo, buffer);
+
+    return OK;
+}
+
+status_t RTPReceiver::onRTCPData(const sp<ABuffer> &data) {
+    ALOGI("onRTCPData");
+    return OK;
+}
+
+void RTPReceiver::addSDES(const sp<ABuffer> &buffer) {
+    uint8_t *data = buffer->data() + buffer->size();
+    data[0] = 0x80 | 1;
+    data[1] = 202;  // SDES
+    data[4] = kSourceID >> 24;  // SSRC
+    data[5] = (kSourceID >> 16) & 0xff;
+    data[6] = (kSourceID >> 8) & 0xff;
+    data[7] = kSourceID & 0xff;
+
+    size_t offset = 8;
+
+    data[offset++] = 1;  // CNAME
+
+    AString cname = "stagefright@somewhere";
+    data[offset++] = cname.size();
+
+    memcpy(&data[offset], cname.c_str(), cname.size());
+    offset += cname.size();
+
+    data[offset++] = 6;  // TOOL
+
+    AString tool = "stagefright/1.0";
+    data[offset++] = tool.size();
+
+    memcpy(&data[offset], tool.c_str(), tool.size());
+    offset += tool.size();
+
+    data[offset++] = 0;
+
+    if ((offset % 4) > 0) {
+        size_t count = 4 - (offset % 4);
+        switch (count) {
+            case 3:
+                data[offset++] = 0;
+            case 2:
+                data[offset++] = 0;
+            case 1:
+                data[offset++] = 0;
+        }
+    }
+
+    size_t numWords = (offset / 4) - 1;
+    data[2] = numWords >> 8;
+    data[3] = numWords & 0xff;
+
+    buffer->setRange(buffer->offset(), buffer->size() + offset);
+}
+
+void RTPReceiver::scheduleSendRR() {
+    (new AMessage(kWhatSendRR, id()))->post(5000000ll);
+}
+
+void RTPReceiver::onSendRR() {
+    sp<ABuffer> buf = new ABuffer(kMaxUDPPacketSize);
+    buf->setRange(0, 0);
+
+    uint8_t *ptr = buf->data();
+    ptr[0] = 0x80 | 0;
+    ptr[1] = 201;  // RR
+    ptr[2] = 0;
+    ptr[3] = 1;
+    ptr[4] = kSourceID >> 24;  // SSRC
+    ptr[5] = (kSourceID >> 16) & 0xff;
+    ptr[6] = (kSourceID >> 8) & 0xff;
+    ptr[7] = kSourceID & 0xff;
+
+    buf->setRange(0, 8);
+
+    size_t numReportBlocks = 0;
+    for (size_t i = 0; i < mSources.size(); ++i) {
+        uint32_t ssrc = mSources.keyAt(i);
+        sp<Source> source = mSources.valueAt(i);
+
+        if (numReportBlocks > 31 || buf->size() + 24 > buf->capacity()) {
+            // Cannot fit another report block.
+            break;
+        }
+
+        source->addReportBlock(ssrc, buf);
+        ++numReportBlocks;
+    }
+
+    ptr[0] |= numReportBlocks;  // 5 bit
+
+    size_t sizeInWordsMinus1 = 1 + 6 * numReportBlocks;
+    ptr[2] = sizeInWordsMinus1 >> 8;
+    ptr[3] = sizeInWordsMinus1 & 0xff;
+
+    buf->setRange(0, (sizeInWordsMinus1 + 1) * 4);
+
+    addSDES(buf);
+
+    mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+
+    scheduleSendRR();
+}
+
+status_t RTPReceiver::registerPacketType(
+        uint8_t packetType, PacketizationMode mode) {
+    mPacketTypes.add(packetType, mode);
+
+    return OK;
+}
+
+sp<RTPReceiver::Assembler> RTPReceiver::makeAssembler(uint8_t packetType) {
+    ssize_t index = mPacketTypes.indexOfKey(packetType);
+    if (index < 0) {
+        return NULL;
+    }
+
+    PacketizationMode mode = mPacketTypes.valueAt(index);
+
+    switch (mode) {
+        case PACKETIZATION_NONE:
+        case PACKETIZATION_TRANSPORT_STREAM:
+            return new TSAssembler(mNotify);
+
+        case PACKETIZATION_H264:
+            return new H264Assembler(mNotify);
+
+        default:
+            return NULL;
+    }
+}
+
+void RTPReceiver::requestRetransmission(uint32_t senderSSRC, int32_t extSeqNo) {
+    int32_t blp = 0;
+
+    sp<ABuffer> buf = new ABuffer(16);
+    buf->setRange(0, 0);
+
+    uint8_t *ptr = buf->data();
+    ptr[0] = 0x80 | 1;  // generic NACK
+    ptr[1] = 205;  // TSFB
+    ptr[2] = 0;
+    ptr[3] = 3;
+    ptr[8] = (senderSSRC >> 24) & 0xff;
+    ptr[9] = (senderSSRC >> 16) & 0xff;
+    ptr[10] = (senderSSRC >> 8) & 0xff;
+    ptr[11] = (senderSSRC & 0xff);
+    ptr[8] = (kSourceID >> 24) & 0xff;
+    ptr[9] = (kSourceID >> 16) & 0xff;
+    ptr[10] = (kSourceID >> 8) & 0xff;
+    ptr[11] = (kSourceID & 0xff);
+    ptr[12] = (extSeqNo >> 8) & 0xff;
+    ptr[13] = (extSeqNo & 0xff);
+    ptr[14] = (blp >> 8) & 0xff;
+    ptr[15] = (blp & 0xff);
+
+    buf->setRange(0, 16);
+
+     mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+}
+
+void RTPReceiver::Source::modifyPacketStatus(int32_t extSeqNo, uint32_t mask) {
+#if TRACK_PACKET_LOSS
+    ssize_t index = mLostPackets.indexOfKey(extSeqNo);
+    if (index < 0) {
+        mLostPackets.add(extSeqNo, mask);
+    } else {
+        mLostPackets.editValueAt(index) |= mask;
+    }
+#endif
+}
+
+void RTPReceiver::Source::postRetransmitTimer(int64_t timeUs) {
+    int64_t delayUs = timeUs - ALooper::GetNowUs();
+    sp<AMessage> msg = new AMessage(kWhatRetransmit, id());
+    msg->setInt32("generation", mRetransmitGeneration);
+    msg->post(delayUs);
+}
+
+void RTPReceiver::Source::postDeclareLostTimer(int64_t timeUs) {
+    CHECK(!mDeclareLostTimerPending);
+    mDeclareLostTimerPending = true;
+
+    int64_t delayUs = timeUs - ALooper::GetNowUs();
+    sp<AMessage> msg = new AMessage(kWhatDeclareLost, id());
+    msg->setInt32("generation", mDeclareLostGeneration);
+    msg->post(delayUs);
+}
+
+void RTPReceiver::Source::cancelTimers() {
+    ++mRetransmitGeneration;
+    ++mDeclareLostGeneration;
+    mDeclareLostTimerPending = false;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/rtp/RTPReceiver.h b/media/libstagefright/wifi-display/rtp/RTPReceiver.h
new file mode 100644
index 0000000..240ab2e
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPReceiver.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RTP_RECEIVER_H_
+
+#define RTP_RECEIVER_H_
+
+#include "RTPBase.h"
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ABuffer;
+struct ANetworkSession;
+
+// An object of this class facilitates receiving of media data on an RTP
+// channel. The channel is established over a UDP or TCP connection depending
+// on which "TransportMode" was chosen. In addition different RTP packetization
+// schemes are supported such as "Transport Stream Packets over RTP",
+// or "AVC/H.264 encapsulation as specified in RFC 3984 (non-interleaved mode)"
+struct RTPReceiver : public RTPBase, public AHandler {
+    enum {
+        kWhatInitDone,
+        kWhatError,
+        kWhatAccessUnit,
+        kWhatPacketLost,
+    };
+
+    enum Flags {
+        FLAG_AUTO_CONNECT = 1,
+    };
+    RTPReceiver(
+            const sp<ANetworkSession> &netSession,
+            const sp<AMessage> &notify,
+            uint32_t flags = 0);
+
+    status_t registerPacketType(
+            uint8_t packetType, PacketizationMode mode);
+
+    status_t initAsync(
+            TransportMode rtpMode,
+            TransportMode rtcpMode,
+            int32_t *outLocalRTPPort);
+
+    status_t connect(
+            const char *remoteHost,
+            int32_t remoteRTPPort,
+            int32_t remoteRTCPPort);
+
+    status_t informSender(const sp<AMessage> &params);
+
+protected:
+    virtual ~RTPReceiver();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatRTPNotify,
+        kWhatRTCPNotify,
+        kWhatSendRR,
+    };
+
+    enum {
+        kSourceID                       = 0xdeadbeef,
+        kPacketLostAfterUs              = 100000,
+        kRequestRetransmissionAfterUs   = -1,
+    };
+
+    struct Assembler;
+    struct H264Assembler;
+    struct Source;
+    struct TSAssembler;
+
+    sp<ANetworkSession> mNetSession;
+    sp<AMessage> mNotify;
+    uint32_t mFlags;
+    TransportMode mRTPMode;
+    TransportMode mRTCPMode;
+    int32_t mRTPSessionID;
+    int32_t mRTCPSessionID;
+    bool mRTPConnected;
+    bool mRTCPConnected;
+
+    int32_t mRTPClientSessionID;  // in TRANSPORT_TCP mode.
+    int32_t mRTCPClientSessionID;  // in TRANSPORT_TCP mode.
+
+    KeyedVector<uint8_t, PacketizationMode> mPacketTypes;
+    KeyedVector<uint32_t, sp<Source> > mSources;
+
+    void onNetNotify(bool isRTP, const sp<AMessage> &msg);
+    status_t onRTPData(const sp<ABuffer> &data);
+    status_t onRTCPData(const sp<ABuffer> &data);
+    void onSendRR();
+
+    void scheduleSendRR();
+    void addSDES(const sp<ABuffer> &buffer);
+
+    void notifyInitDone(status_t err);
+    void notifyError(status_t err);
+    void notifyPacketLost();
+
+    sp<Assembler> makeAssembler(uint8_t packetType);
+
+    void requestRetransmission(uint32_t senderSSRC, int32_t extSeqNo);
+
+    DISALLOW_EVIL_CONSTRUCTORS(RTPReceiver);
+};
+
+}  // namespace android
+
+#endif  // RTP_RECEIVER_H_
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
index 095fd97..1887b8b 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.cpp
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
@@ -20,11 +20,10 @@
 
 #include "RTPSender.h"
 
-#include "ANetworkSession.h"
-
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
@@ -767,6 +766,17 @@
 }
 
 status_t RTPSender::parseAPP(const uint8_t *data, size_t size) {
+    if (!memcmp("late", &data[8], 4)) {
+        int64_t avgLatencyUs = (int64_t)U64_AT(&data[12]);
+        int64_t maxLatencyUs = (int64_t)U64_AT(&data[20]);
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatInformSender);
+        notify->setInt64("avgLatencyUs", avgLatencyUs);
+        notify->setInt64("maxLatencyUs", maxLatencyUs);
+        notify->post();
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.h b/media/libstagefright/wifi-display/rtp/RTPSender.h
index 7dc138a..fefcab7 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.h
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.h
@@ -37,6 +37,7 @@
         kWhatInitDone,
         kWhatError,
         kWhatNetworkStall,
+        kWhatInformSender,
     };
     RTPSender(
             const sp<ANetworkSession> &netSession,
diff --git a/media/libstagefright/wifi-display/rtptest.cpp b/media/libstagefright/wifi-display/rtptest.cpp
new file mode 100644
index 0000000..b902f29
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtptest.cpp
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "rtptest"
+#include <utils/Log.h>
+
+#include "rtp/RTPSender.h"
+#include "rtp/RTPReceiver.h"
+#include "TimeSyncer.h"
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <media/stagefright/Utils.h>
+
+#define MEDIA_FILENAME "/sdcard/Frame Counter HD 30FPS_1080p.mp4"
+
+namespace android {
+
+struct PacketSource : public RefBase {
+    PacketSource() {}
+
+    virtual sp<ABuffer> getNextAccessUnit() = 0;
+
+protected:
+    virtual ~PacketSource() {}
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(PacketSource);
+};
+
+struct MediaPacketSource : public PacketSource {
+    MediaPacketSource()
+        : mMaxSampleSize(1024 * 1024) {
+        mExtractor = new NuMediaExtractor;
+        CHECK_EQ((status_t)OK,
+                 mExtractor->setDataSource(MEDIA_FILENAME));
+
+        bool haveVideo = false;
+        for (size_t i = 0; i < mExtractor->countTracks(); ++i) {
+            sp<AMessage> format;
+            CHECK_EQ((status_t)OK, mExtractor->getTrackFormat(i, &format));
+
+            AString mime;
+            CHECK(format->findString("mime", &mime));
+
+            if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str())) {
+                mExtractor->selectTrack(i);
+                haveVideo = true;
+                break;
+            }
+        }
+
+        CHECK(haveVideo);
+    }
+
+    virtual sp<ABuffer> getNextAccessUnit() {
+        int64_t timeUs;
+        status_t err = mExtractor->getSampleTime(&timeUs);
+
+        if (err != OK) {
+            return NULL;
+        }
+
+        sp<ABuffer> accessUnit = new ABuffer(mMaxSampleSize);
+        CHECK_EQ((status_t)OK, mExtractor->readSampleData(accessUnit));
+
+        accessUnit->meta()->setInt64("timeUs", timeUs);
+
+        CHECK_EQ((status_t)OK, mExtractor->advance());
+
+        return accessUnit;
+    }
+
+protected:
+    virtual ~MediaPacketSource() {
+    }
+
+private:
+    sp<NuMediaExtractor> mExtractor;
+    size_t mMaxSampleSize;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaPacketSource);
+};
+
+struct SimplePacketSource : public PacketSource {
+    SimplePacketSource()
+        : mCounter(0) {
+    }
+
+    virtual sp<ABuffer> getNextAccessUnit() {
+        sp<ABuffer> buffer = new ABuffer(4);
+        uint8_t *dst = buffer->data();
+        dst[0] = mCounter >> 24;
+        dst[1] = (mCounter >> 16) & 0xff;
+        dst[2] = (mCounter >> 8) & 0xff;
+        dst[3] = mCounter & 0xff;
+
+        buffer->meta()->setInt64("timeUs", mCounter * 1000000ll / kFrameRate);
+
+        ++mCounter;
+
+        return buffer;
+    }
+
+protected:
+    virtual ~SimplePacketSource() {
+    }
+
+private:
+    enum {
+        kFrameRate = 30
+    };
+
+    uint32_t mCounter;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SimplePacketSource);
+};
+
+struct TestHandler : public AHandler {
+    TestHandler(const sp<ANetworkSession> &netSession);
+
+    void listen();
+    void connect(const char *host, int32_t port);
+
+protected:
+    virtual ~TestHandler();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatListen,
+        kWhatConnect,
+        kWhatReceiverNotify,
+        kWhatSenderNotify,
+        kWhatSendMore,
+        kWhatStop,
+        kWhatTimeSyncerNotify,
+    };
+
+#if 1
+    static const RTPBase::TransportMode kRTPMode = RTPBase::TRANSPORT_UDP;
+    static const RTPBase::TransportMode kRTCPMode = RTPBase::TRANSPORT_UDP;
+#else
+    static const RTPBase::TransportMode kRTPMode = RTPBase::TRANSPORT_TCP;
+    static const RTPBase::TransportMode kRTCPMode = RTPBase::TRANSPORT_NONE;
+#endif
+
+#if 1
+    static const RTPBase::PacketizationMode kPacketizationMode
+        = RTPBase::PACKETIZATION_H264;
+#else
+    static const RTPBase::PacketizationMode kPacketizationMode
+        = RTPBase::PACKETIZATION_NONE;
+#endif
+
+    sp<ANetworkSession> mNetSession;
+    sp<PacketSource> mSource;
+    sp<RTPSender> mSender;
+    sp<RTPReceiver> mReceiver;
+
+    sp<TimeSyncer> mTimeSyncer;
+    bool mTimeSyncerStarted;
+
+    int64_t mFirstTimeRealUs;
+    int64_t mFirstTimeMediaUs;
+
+    int64_t mTimeOffsetUs;
+    bool mTimeOffsetValid;
+
+    status_t readMore();
+
+    DISALLOW_EVIL_CONSTRUCTORS(TestHandler);
+};
+
+TestHandler::TestHandler(const sp<ANetworkSession> &netSession)
+    : mNetSession(netSession),
+      mTimeSyncerStarted(false),
+      mFirstTimeRealUs(-1ll),
+      mFirstTimeMediaUs(-1ll),
+      mTimeOffsetUs(-1ll),
+      mTimeOffsetValid(false) {
+}
+
+TestHandler::~TestHandler() {
+}
+
+void TestHandler::listen() {
+    sp<AMessage> msg = new AMessage(kWhatListen, id());
+    msg->post();
+}
+
+void TestHandler::connect(const char *host, int32_t port) {
+    sp<AMessage> msg = new AMessage(kWhatConnect, id());
+    msg->setString("host", host);
+    msg->setInt32("port", port);
+    msg->post();
+}
+
+static void dumpDelay(int64_t delayMs) {
+    static const int64_t kMinDelayMs = 0;
+    static const int64_t kMaxDelayMs = 300;
+
+    const char *kPattern = "########################################";
+    size_t kPatternSize = strlen(kPattern);
+
+    int n = (kPatternSize * (delayMs - kMinDelayMs))
+                / (kMaxDelayMs - kMinDelayMs);
+
+    if (n < 0) {
+        n = 0;
+    } else if ((size_t)n > kPatternSize) {
+        n = kPatternSize;
+    }
+
+    ALOGI("(%4lld ms) %s\n",
+          delayMs,
+          kPattern + kPatternSize - n);
+}
+
+void TestHandler::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatListen:
+        {
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+
+            notify = new AMessage(kWhatReceiverNotify, id());
+            mReceiver = new RTPReceiver(
+                    mNetSession, notify, RTPReceiver::FLAG_AUTO_CONNECT);
+            looper()->registerHandler(mReceiver);
+
+            CHECK_EQ((status_t)OK,
+                     mReceiver->registerPacketType(33, kPacketizationMode));
+
+            int32_t receiverRTPPort;
+            CHECK_EQ((status_t)OK,
+                     mReceiver->initAsync(
+                         kRTPMode,
+                         kRTCPMode,
+                         &receiverRTPPort));
+
+            printf("picked receiverRTPPort %d\n", receiverRTPPort);
+
+#if 0
+            CHECK_EQ((status_t)OK,
+                     mReceiver->connect(
+                         "127.0.0.1", senderRTPPort, senderRTPPort + 1));
+#endif
+            break;
+        }
+
+        case kWhatConnect:
+        {
+            AString host;
+            CHECK(msg->findString("host", &host));
+
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+            mTimeSyncer->startServer(8123);
+
+            int32_t receiverRTPPort;
+            CHECK(msg->findInt32("port", &receiverRTPPort));
+
+#if 1
+            mSource = new MediaPacketSource;
+#else
+            mSource = new SimplePacketSource;
+#endif
+
+            notify = new AMessage(kWhatSenderNotify, id());
+            mSender = new RTPSender(mNetSession, notify);
+
+            looper()->registerHandler(mSender);
+
+            int32_t senderRTPPort;
+            CHECK_EQ((status_t)OK,
+                     mSender->initAsync(
+                         host.c_str(),
+                         receiverRTPPort,
+                         kRTPMode,
+                         kRTCPMode == RTPBase::TRANSPORT_NONE
+                            ? -1 : receiverRTPPort + 1,
+                         kRTCPMode,
+                         &senderRTPPort));
+
+            printf("picked senderRTPPort %d\n", senderRTPPort);
+            break;
+        }
+
+        case kWhatSenderNotify:
+        {
+            ALOGI("kWhatSenderNotify");
+
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            switch (what) {
+                case RTPSender::kWhatInitDone:
+                {
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    ALOGI("RTPSender::initAsync completed w/ err %d", err);
+
+                    if (err == OK) {
+                        err = readMore();
+
+                        if (err != OK) {
+                            (new AMessage(kWhatStop, id()))->post();
+                        }
+                    }
+                    break;
+                }
+
+                case RTPSender::kWhatError:
+                    break;
+            }
+            break;
+        }
+
+        case kWhatReceiverNotify:
+        {
+            ALOGV("kWhatReceiverNotify");
+
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            switch (what) {
+                case RTPReceiver::kWhatInitDone:
+                {
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    ALOGI("RTPReceiver::initAsync completed w/ err %d", err);
+                    break;
+                }
+
+                case RTPReceiver::kWhatError:
+                    break;
+
+                case RTPReceiver::kWhatAccessUnit:
+                {
+#if 0
+                    if (!mTimeSyncerStarted) {
+                        mTimeSyncer->startClient("172.18.41.216", 8123);
+                        mTimeSyncerStarted = true;
+                    }
+
+                    sp<ABuffer> accessUnit;
+                    CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+                    int64_t timeUs;
+                    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+                    if (mTimeOffsetValid) {
+                        timeUs -= mTimeOffsetUs;
+                        int64_t nowUs = ALooper::GetNowUs();
+                        int64_t delayMs = (nowUs - timeUs) / 1000ll;
+
+                        dumpDelay(delayMs);
+                    }
+#endif
+                    break;
+                }
+
+                case RTPReceiver::kWhatPacketLost:
+                    ALOGV("kWhatPacketLost");
+                    break;
+
+                default:
+                    TRESPASS();
+            }
+            break;
+        }
+
+        case kWhatSendMore:
+        {
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+            CHECK_EQ((status_t)OK,
+                     mSender->queueBuffer(
+                         accessUnit,
+                         33,
+                         kPacketizationMode));
+
+            status_t err = readMore();
+
+            if (err != OK) {
+                (new AMessage(kWhatStop, id()))->post();
+            }
+            break;
+        }
+
+        case kWhatStop:
+        {
+            if (mReceiver != NULL) {
+                looper()->unregisterHandler(mReceiver->id());
+                mReceiver.clear();
+            }
+
+            if (mSender != NULL) {
+                looper()->unregisterHandler(mSender->id());
+                mSender.clear();
+            }
+
+            mSource.clear();
+
+            looper()->stop();
+            break;
+        }
+
+        case kWhatTimeSyncerNotify:
+        {
+            CHECK(msg->findInt64("offset", &mTimeOffsetUs));
+            mTimeOffsetValid = true;
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+status_t TestHandler::readMore() {
+    sp<ABuffer> accessUnit = mSource->getNextAccessUnit();
+
+    if (accessUnit == NULL) {
+        return ERROR_END_OF_STREAM;
+    }
+
+    int64_t timeUs;
+    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+    int64_t nowUs = ALooper::GetNowUs();
+    int64_t whenUs;
+
+    if (mFirstTimeRealUs < 0ll) {
+        mFirstTimeRealUs = whenUs = nowUs;
+        mFirstTimeMediaUs = timeUs;
+    } else {
+        whenUs = mFirstTimeRealUs + timeUs - mFirstTimeMediaUs;
+    }
+
+    accessUnit->meta()->setInt64("timeUs", whenUs);
+
+    sp<AMessage> msg = new AMessage(kWhatSendMore, id());
+    msg->setBuffer("accessUnit", accessUnit);
+    msg->post(whenUs - nowUs);
+
+    return OK;
+}
+
+}  // namespace android
+
+static void usage(const char *me) {
+    fprintf(stderr,
+            "usage: %s -c host:port\tconnect to remote host\n"
+            "               -l       \tlisten\n",
+            me);
+}
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    // srand(time(NULL));
+
+    ProcessState::self()->startThreadPool();
+
+    DataSource::RegisterDefaultSniffers();
+
+    bool listen = false;
+    int32_t connectToPort = -1;
+    AString connectToHost;
+
+    int res;
+    while ((res = getopt(argc, argv, "hc:l")) >= 0) {
+        switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    usage(argv[0]);
+                    exit(1);
+                }
+
+                connectToHost.setTo(optarg, colonPos - optarg);
+
+                char *end;
+                connectToPort = strtol(colonPos + 1, &end, 10);
+
+                if (*end != '\0' || end == colonPos + 1
+                        || connectToPort < 1 || connectToPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case 'l':
+            {
+                listen = true;
+                break;
+            }
+
+            case '?':
+            case 'h':
+                usage(argv[0]);
+                exit(1);
+        }
+    }
+
+    if (!listen && connectToPort < 0) {
+        fprintf(stderr,
+                "You need to select either client or server mode.\n");
+        exit(1);
+    }
+
+    sp<ANetworkSession> netSession = new ANetworkSession;
+    netSession->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<TestHandler> handler = new TestHandler(netSession);
+    looper->registerHandler(handler);
+
+    if (listen) {
+        handler->listen();
+    }
+
+    if (connectToPort >= 0) {
+        handler->connect(connectToHost.c_str(), connectToPort);
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    return 0;
+}
+
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
new file mode 100644
index 0000000..cdb2267
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
@@ -0,0 +1,653 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DirectRenderer"
+#include <utils/Log.h>
+
+#include "DirectRenderer.h"
+
+#include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
+#include <media/AudioTrack.h>
+#include <media/ICrypto.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+
+namespace android {
+
+/*
+   Drives the decoding process using a MediaCodec instance. Input buffers
+   queued by calls to "queueInputBuffer" are fed to the decoder as soon
+   as the decoder is ready for them, the client is notified about output
+   buffers as the decoder spits them out.
+*/
+struct DirectRenderer::DecoderContext : public AHandler {
+    enum {
+        kWhatOutputBufferReady,
+    };
+    DecoderContext(const sp<AMessage> &notify);
+
+    status_t init(
+            const sp<AMessage> &format,
+            const sp<IGraphicBufferProducer> &surfaceTex);
+
+    void queueInputBuffer(const sp<ABuffer> &accessUnit);
+
+    status_t renderOutputBufferAndRelease(size_t index);
+    status_t releaseOutputBuffer(size_t index);
+
+protected:
+    virtual ~DecoderContext();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatDecoderNotify,
+    };
+
+    sp<AMessage> mNotify;
+    sp<ALooper> mDecoderLooper;
+    sp<MediaCodec> mDecoder;
+    Vector<sp<ABuffer> > mDecoderInputBuffers;
+    Vector<sp<ABuffer> > mDecoderOutputBuffers;
+    List<size_t> mDecoderInputBuffersAvailable;
+    bool mDecoderNotificationPending;
+
+    List<sp<ABuffer> > mAccessUnits;
+
+    void onDecoderNotify();
+    void scheduleDecoderNotification();
+    void queueDecoderInputBuffers();
+
+    void queueOutputBuffer(
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(DecoderContext);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/*
+   A "push" audio renderer. The primary function of this renderer is to use
+   an AudioTrack in push mode and making sure not to block the event loop
+   be ensuring that calls to AudioTrack::write never block. This is done by
+   estimating an upper bound of data that can be written to the AudioTrack
+   buffer without delay.
+*/
+struct DirectRenderer::AudioRenderer : public AHandler {
+    AudioRenderer(const sp<DecoderContext> &decoderContext);
+
+    void queueInputBuffer(
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+protected:
+    virtual ~AudioRenderer();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatPushAudio,
+    };
+
+    struct BufferInfo {
+        size_t mIndex;
+        int64_t mTimeUs;
+        sp<ABuffer> mBuffer;
+    };
+
+    sp<DecoderContext> mDecoderContext;
+    sp<AudioTrack> mAudioTrack;
+
+    List<BufferInfo> mInputBuffers;
+    bool mPushPending;
+
+    size_t mNumFramesWritten;
+
+    void schedulePushIfNecessary();
+    void onPushAudio();
+
+    ssize_t writeNonBlocking(const uint8_t *data, size_t size);
+
+    DISALLOW_EVIL_CONSTRUCTORS(AudioRenderer);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DecoderContext::DecoderContext(const sp<AMessage> &notify)
+    : mNotify(notify),
+      mDecoderNotificationPending(false) {
+}
+
+DirectRenderer::DecoderContext::~DecoderContext() {
+    if (mDecoder != NULL) {
+        mDecoder->release();
+        mDecoder.clear();
+
+        mDecoderLooper->stop();
+        mDecoderLooper.clear();
+    }
+}
+
+status_t DirectRenderer::DecoderContext::init(
+        const sp<AMessage> &format,
+        const sp<IGraphicBufferProducer> &surfaceTex) {
+    CHECK(mDecoder == NULL);
+
+    AString mime;
+    CHECK(format->findString("mime", &mime));
+
+    mDecoderLooper = new ALooper;
+    mDecoderLooper->setName("video codec looper");
+
+    mDecoderLooper->start(
+            false /* runOnCallingThread */,
+            false /* canCallJava */,
+            PRIORITY_DEFAULT);
+
+    mDecoder = MediaCodec::CreateByType(
+            mDecoderLooper, mime.c_str(), false /* encoder */);
+
+    CHECK(mDecoder != NULL);
+
+    status_t err = mDecoder->configure(
+            format,
+            surfaceTex == NULL
+                ? NULL : new Surface(surfaceTex),
+            NULL /* crypto */,
+            0 /* flags */);
+    CHECK_EQ(err, (status_t)OK);
+
+    err = mDecoder->start();
+    CHECK_EQ(err, (status_t)OK);
+
+    err = mDecoder->getInputBuffers(
+            &mDecoderInputBuffers);
+    CHECK_EQ(err, (status_t)OK);
+
+    err = mDecoder->getOutputBuffers(
+            &mDecoderOutputBuffers);
+    CHECK_EQ(err, (status_t)OK);
+
+    scheduleDecoderNotification();
+
+    return OK;
+}
+
+void DirectRenderer::DecoderContext::queueInputBuffer(
+        const sp<ABuffer> &accessUnit) {
+    CHECK(mDecoder != NULL);
+
+    mAccessUnits.push_back(accessUnit);
+    queueDecoderInputBuffers();
+}
+
+status_t DirectRenderer::DecoderContext::renderOutputBufferAndRelease(
+        size_t index) {
+    return mDecoder->renderOutputBufferAndRelease(index);
+}
+
+status_t DirectRenderer::DecoderContext::releaseOutputBuffer(size_t index) {
+    return mDecoder->releaseOutputBuffer(index);
+}
+
+void DirectRenderer::DecoderContext::queueDecoderInputBuffers() {
+    if (mDecoder == NULL) {
+        return;
+    }
+
+    bool submittedMore = false;
+
+    while (!mAccessUnits.empty()
+            && !mDecoderInputBuffersAvailable.empty()) {
+        size_t index = *mDecoderInputBuffersAvailable.begin();
+
+        mDecoderInputBuffersAvailable.erase(
+                mDecoderInputBuffersAvailable.begin());
+
+        sp<ABuffer> srcBuffer = *mAccessUnits.begin();
+        mAccessUnits.erase(mAccessUnits.begin());
+
+        const sp<ABuffer> &dstBuffer =
+            mDecoderInputBuffers.itemAt(index);
+
+        memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
+
+        int64_t timeUs;
+        CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
+
+        status_t err = mDecoder->queueInputBuffer(
+                index,
+                0 /* offset */,
+                srcBuffer->size(),
+                timeUs,
+                0 /* flags */);
+        CHECK_EQ(err, (status_t)OK);
+
+        submittedMore = true;
+    }
+
+    if (submittedMore) {
+        scheduleDecoderNotification();
+    }
+}
+
+void DirectRenderer::DecoderContext::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDecoderNotify:
+        {
+            onDecoderNotify();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::DecoderContext::onDecoderNotify() {
+    mDecoderNotificationPending = false;
+
+    for (;;) {
+        size_t index;
+        status_t err = mDecoder->dequeueInputBuffer(&index);
+
+        if (err == OK) {
+            mDecoderInputBuffersAvailable.push_back(index);
+        } else if (err == -EAGAIN) {
+            break;
+        } else {
+            TRESPASS();
+        }
+    }
+
+    queueDecoderInputBuffers();
+
+    for (;;) {
+        size_t index;
+        size_t offset;
+        size_t size;
+        int64_t timeUs;
+        uint32_t flags;
+        status_t err = mDecoder->dequeueOutputBuffer(
+                &index,
+                &offset,
+                &size,
+                &timeUs,
+                &flags);
+
+        if (err == OK) {
+            queueOutputBuffer(
+                    index, timeUs, mDecoderOutputBuffers.itemAt(index));
+        } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+            err = mDecoder->getOutputBuffers(
+                    &mDecoderOutputBuffers);
+            CHECK_EQ(err, (status_t)OK);
+        } else if (err == INFO_FORMAT_CHANGED) {
+            // We don't care.
+        } else if (err == -EAGAIN) {
+            break;
+        } else {
+            TRESPASS();
+        }
+    }
+
+    scheduleDecoderNotification();
+}
+
+void DirectRenderer::DecoderContext::scheduleDecoderNotification() {
+    if (mDecoderNotificationPending) {
+        return;
+    }
+
+    sp<AMessage> notify =
+        new AMessage(kWhatDecoderNotify, id());
+
+    mDecoder->requestActivityNotification(notify);
+    mDecoderNotificationPending = true;
+}
+
+void DirectRenderer::DecoderContext::queueOutputBuffer(
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatOutputBufferReady);
+    msg->setSize("index", index);
+    msg->setInt64("timeUs", timeUs);
+    msg->setBuffer("buffer", buffer);
+    msg->post();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::AudioRenderer::AudioRenderer(
+        const sp<DecoderContext> &decoderContext)
+    : mDecoderContext(decoderContext),
+      mPushPending(false),
+      mNumFramesWritten(0) {
+    mAudioTrack = new AudioTrack(
+            AUDIO_STREAM_DEFAULT,
+            48000.0f,
+            AUDIO_FORMAT_PCM,
+            AUDIO_CHANNEL_OUT_STEREO,
+            (int)0 /* frameCount */);
+
+    CHECK_EQ((status_t)OK, mAudioTrack->initCheck());
+
+    mAudioTrack->start();
+}
+
+DirectRenderer::AudioRenderer::~AudioRenderer() {
+}
+
+void DirectRenderer::AudioRenderer::queueInputBuffer(
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    BufferInfo info;
+    info.mIndex = index;
+    info.mTimeUs = timeUs;
+    info.mBuffer = buffer;
+
+    mInputBuffers.push_back(info);
+    schedulePushIfNecessary();
+}
+
+void DirectRenderer::AudioRenderer::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatPushAudio:
+        {
+            onPushAudio();
+            break;
+        }
+
+        default:
+            break;
+    }
+}
+
+void DirectRenderer::AudioRenderer::schedulePushIfNecessary() {
+    if (mPushPending || mInputBuffers.empty()) {
+        return;
+    }
+
+    mPushPending = true;
+
+    uint32_t numFramesPlayed;
+    CHECK_EQ(mAudioTrack->getPosition(&numFramesPlayed),
+             (status_t)OK);
+
+    uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
+
+    // This is how long the audio sink will have data to
+    // play back.
+    const float msecsPerFrame = 1000.0f / mAudioTrack->getSampleRate();
+
+    int64_t delayUs =
+        msecsPerFrame * numFramesPendingPlayout * 1000ll;
+
+    // Let's give it more data after about half that time
+    // has elapsed.
+    (new AMessage(kWhatPushAudio, id()))->post(delayUs / 2);
+}
+
+void DirectRenderer::AudioRenderer::onPushAudio() {
+    mPushPending = false;
+
+    while (!mInputBuffers.empty()) {
+        const BufferInfo &info = *mInputBuffers.begin();
+
+        ssize_t n = writeNonBlocking(
+                info.mBuffer->data(), info.mBuffer->size());
+
+        if (n < (ssize_t)info.mBuffer->size()) {
+            CHECK_GE(n, 0);
+
+            info.mBuffer->setRange(
+                    info.mBuffer->offset() + n, info.mBuffer->size() - n);
+            break;
+        }
+
+        mDecoderContext->releaseOutputBuffer(info.mIndex);
+
+        mInputBuffers.erase(mInputBuffers.begin());
+    }
+
+    schedulePushIfNecessary();
+}
+
+ssize_t DirectRenderer::AudioRenderer::writeNonBlocking(
+        const uint8_t *data, size_t size) {
+    uint32_t numFramesPlayed;
+    status_t err = mAudioTrack->getPosition(&numFramesPlayed);
+    if (err != OK) {
+        return err;
+    }
+
+    ssize_t numFramesAvailableToWrite =
+        mAudioTrack->frameCount() - (mNumFramesWritten - numFramesPlayed);
+
+    size_t numBytesAvailableToWrite =
+        numFramesAvailableToWrite * mAudioTrack->frameSize();
+
+    if (size > numBytesAvailableToWrite) {
+        size = numBytesAvailableToWrite;
+    }
+
+    CHECK_EQ(mAudioTrack->write(data, size), (ssize_t)size);
+
+    size_t numFramesWritten = size / mAudioTrack->frameSize();
+    mNumFramesWritten += numFramesWritten;
+
+    return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DirectRenderer(
+        const sp<IGraphicBufferProducer> &bufferProducer)
+    : mSurfaceTex(bufferProducer),
+      mVideoRenderPending(false),
+      mNumFramesLate(0),
+      mNumFrames(0) {
+}
+
+DirectRenderer::~DirectRenderer() {
+}
+
+void DirectRenderer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDecoderNotify:
+        {
+            onDecoderNotify(msg);
+            break;
+        }
+
+        case kWhatRenderVideo:
+        {
+            onRenderVideo();
+            break;
+        }
+
+        case kWhatQueueAccessUnit:
+            onQueueAccessUnit(msg);
+            break;
+
+        case kWhatSetFormat:
+            onSetFormat(msg);
+            break;
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
+    sp<AMessage> msg = new AMessage(kWhatSetFormat, id());
+    msg->setSize("trackIndex", trackIndex);
+    msg->setMessage("format", format);
+    msg->post();
+}
+
+void DirectRenderer::onSetFormat(const sp<AMessage> &msg) {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    sp<AMessage> format;
+    CHECK(msg->findMessage("format", &format));
+
+    internalSetFormat(trackIndex, format);
+}
+
+void DirectRenderer::internalSetFormat(
+        size_t trackIndex, const sp<AMessage> &format) {
+    CHECK_LT(trackIndex, 2u);
+
+    CHECK(mDecoderContext[trackIndex] == NULL);
+
+    sp<AMessage> notify = new AMessage(kWhatDecoderNotify, id());
+    notify->setSize("trackIndex", trackIndex);
+
+    mDecoderContext[trackIndex] = new DecoderContext(notify);
+    looper()->registerHandler(mDecoderContext[trackIndex]);
+
+    CHECK_EQ((status_t)OK,
+             mDecoderContext[trackIndex]->init(
+                 format, trackIndex == 0 ? mSurfaceTex : NULL));
+
+    if (trackIndex == 1) {
+        // Audio
+        mAudioRenderer = new AudioRenderer(mDecoderContext[1]);
+        looper()->registerHandler(mAudioRenderer);
+    }
+}
+
+void DirectRenderer::queueAccessUnit(
+        size_t trackIndex, const sp<ABuffer> &accessUnit) {
+    sp<AMessage> msg = new AMessage(kWhatQueueAccessUnit, id());
+    msg->setSize("trackIndex", trackIndex);
+    msg->setBuffer("accessUnit", accessUnit);
+    msg->post();
+}
+
+void DirectRenderer::onQueueAccessUnit(const sp<AMessage> &msg) {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    sp<ABuffer> accessUnit;
+    CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+    CHECK_LT(trackIndex, 2u);
+    CHECK(mDecoderContext[trackIndex] != NULL);
+
+    mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
+}
+
+void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    int32_t what;
+    CHECK(msg->findInt32("what", &what));
+
+    switch (what) {
+        case DecoderContext::kWhatOutputBufferReady:
+        {
+            size_t index;
+            CHECK(msg->findSize("index", &index));
+
+            int64_t timeUs;
+            CHECK(msg->findInt64("timeUs", &timeUs));
+
+            sp<ABuffer> buffer;
+            CHECK(msg->findBuffer("buffer", &buffer));
+
+            queueOutputBuffer(trackIndex, index, timeUs, buffer);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::queueOutputBuffer(
+        size_t trackIndex,
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    if (trackIndex == 1) {
+        // Audio
+        mAudioRenderer->queueInputBuffer(index, timeUs, buffer);
+        return;
+    }
+
+    OutputInfo info;
+    info.mIndex = index;
+    info.mTimeUs = timeUs;
+    info.mBuffer = buffer;
+    mVideoOutputBuffers.push_back(info);
+
+    scheduleVideoRenderIfNecessary();
+}
+
+void DirectRenderer::scheduleVideoRenderIfNecessary() {
+    if (mVideoRenderPending || mVideoOutputBuffers.empty()) {
+        return;
+    }
+
+    mVideoRenderPending = true;
+
+    int64_t timeUs = (*mVideoOutputBuffers.begin()).mTimeUs;
+    int64_t nowUs = ALooper::GetNowUs();
+
+    int64_t delayUs = timeUs - nowUs;
+
+    (new AMessage(kWhatRenderVideo, id()))->post(delayUs);
+}
+
+void DirectRenderer::onRenderVideo() {
+    mVideoRenderPending = false;
+
+    int64_t nowUs = ALooper::GetNowUs();
+
+    while (!mVideoOutputBuffers.empty()) {
+        const OutputInfo &info = *mVideoOutputBuffers.begin();
+
+        if (info.mTimeUs > nowUs) {
+            break;
+        }
+
+        if (info.mTimeUs + 15000ll < nowUs) {
+            ++mNumFramesLate;
+        }
+        ++mNumFrames;
+
+        status_t err =
+            mDecoderContext[0]->renderOutputBufferAndRelease(info.mIndex);
+        CHECK_EQ(err, (status_t)OK);
+
+        mVideoOutputBuffers.erase(mVideoOutputBuffers.begin());
+    }
+
+    scheduleVideoRenderIfNecessary();
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.h b/media/libstagefright/wifi-display/sink/DirectRenderer.h
new file mode 100644
index 0000000..07c2170
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DIRECT_RENDERER_H_
+
+#define DIRECT_RENDERER_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ABuffer;
+struct IGraphicBufferProducer;
+
+// Renders audio and video data queued by calls to "queueAccessUnit".
+struct DirectRenderer : public AHandler {
+    DirectRenderer(const sp<IGraphicBufferProducer> &bufferProducer);
+
+    void setFormat(size_t trackIndex, const sp<AMessage> &format);
+    void queueAccessUnit(size_t trackIndex, const sp<ABuffer> &accessUnit);
+
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+    virtual ~DirectRenderer();
+
+private:
+    struct DecoderContext;
+    struct AudioRenderer;
+
+    enum {
+        kWhatDecoderNotify,
+        kWhatRenderVideo,
+        kWhatQueueAccessUnit,
+        kWhatSetFormat,
+    };
+
+    struct OutputInfo {
+        size_t mIndex;
+        int64_t mTimeUs;
+        sp<ABuffer> mBuffer;
+    };
+
+    sp<IGraphicBufferProducer> mSurfaceTex;
+
+    sp<DecoderContext> mDecoderContext[2];
+    List<OutputInfo> mVideoOutputBuffers;
+
+    bool mVideoRenderPending;
+
+    sp<AudioRenderer> mAudioRenderer;
+
+    int32_t mNumFramesLate;
+    int32_t mNumFrames;
+
+    void onDecoderNotify(const sp<AMessage> &msg);
+
+    void queueOutputBuffer(
+            size_t trackIndex,
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+    void scheduleVideoRenderIfNecessary();
+    void onRenderVideo();
+
+    void onSetFormat(const sp<AMessage> &msg);
+    void onQueueAccessUnit(const sp<AMessage> &msg);
+
+    void internalSetFormat(size_t trackIndex, const sp<AMessage> &format);
+
+    DISALLOW_EVIL_CONSTRUCTORS(DirectRenderer);
+};
+
+}  // namespace android
+
+#endif  // DIRECT_RENDERER_H_
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
new file mode 100644
index 0000000..bc88f1e
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WifiDisplaySink"
+#include <utils/Log.h>
+
+#include "WifiDisplaySink.h"
+
+#include "DirectRenderer.h"
+#include "MediaReceiver.h"
+#include "TimeSyncer.h"
+
+#include <cutils/properties.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ParsedMessage.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+// static
+const AString WifiDisplaySink::sUserAgent = MakeUserAgent();
+
+WifiDisplaySink::WifiDisplaySink(
+        uint32_t flags,
+        const sp<ANetworkSession> &netSession,
+        const sp<IGraphicBufferProducer> &bufferProducer,
+        const sp<AMessage> &notify)
+    : mState(UNDEFINED),
+      mFlags(flags),
+      mNetSession(netSession),
+      mSurfaceTex(bufferProducer),
+      mNotify(notify),
+      mUsingTCPTransport(false),
+      mUsingTCPInterleaving(false),
+      mSessionID(0),
+      mNextCSeq(1),
+      mIDRFrameRequestPending(false),
+      mTimeOffsetUs(0ll),
+      mTimeOffsetValid(false),
+      mSetupDeferred(false),
+      mLatencyCount(0),
+      mLatencySumUs(0ll),
+      mLatencyMaxUs(0ll),
+      mMaxDelayMs(-1ll) {
+    // We support any and all resolutions, but prefer 720p30
+    mSinkSupportedVideoFormats.setNativeResolution(
+            VideoFormats::RESOLUTION_CEA, 5);  // 1280 x 720 p30
+
+    mSinkSupportedVideoFormats.enableAll();
+}
+
+WifiDisplaySink::~WifiDisplaySink() {
+}
+
+void WifiDisplaySink::start(const char *sourceHost, int32_t sourcePort) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    msg->setString("sourceHost", sourceHost);
+    msg->setInt32("sourcePort", sourcePort);
+    msg->post();
+}
+
+void WifiDisplaySink::start(const char *uri) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    msg->setString("setupURI", uri);
+    msg->post();
+}
+
+// static
+bool WifiDisplaySink::ParseURL(
+        const char *url, AString *host, int32_t *port, AString *path,
+        AString *user, AString *pass) {
+    host->clear();
+    *port = 0;
+    path->clear();
+    user->clear();
+    pass->clear();
+
+    if (strncasecmp("rtsp://", url, 7)) {
+        return false;
+    }
+
+    const char *slashPos = strchr(&url[7], '/');
+
+    if (slashPos == NULL) {
+        host->setTo(&url[7]);
+        path->setTo("/");
+    } else {
+        host->setTo(&url[7], slashPos - &url[7]);
+        path->setTo(slashPos);
+    }
+
+    ssize_t atPos = host->find("@");
+
+    if (atPos >= 0) {
+        // Split of user:pass@ from hostname.
+
+        AString userPass(*host, 0, atPos);
+        host->erase(0, atPos + 1);
+
+        ssize_t colonPos = userPass.find(":");
+
+        if (colonPos < 0) {
+            *user = userPass;
+        } else {
+            user->setTo(userPass, 0, colonPos);
+            pass->setTo(userPass, colonPos + 1, userPass.size() - colonPos - 1);
+        }
+    }
+
+    const char *colonPos = strchr(host->c_str(), ':');
+
+    if (colonPos != NULL) {
+        char *end;
+        unsigned long x = strtoul(colonPos + 1, &end, 10);
+
+        if (end == colonPos + 1 || *end != '\0' || x >= 65536) {
+            return false;
+        }
+
+        *port = x;
+
+        size_t colonOffset = colonPos - host->c_str();
+        size_t trailing = host->size() - colonOffset;
+        host->erase(colonOffset, trailing);
+    } else {
+        *port = 554;
+    }
+
+    return true;
+}
+
+void WifiDisplaySink::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatStart:
+        {
+            sleep(2);  // XXX
+
+            int32_t sourcePort;
+            CHECK(msg->findString("sourceHost", &mRTSPHost));
+            CHECK(msg->findInt32("sourcePort", &sourcePort));
+
+            sp<AMessage> notify = new AMessage(kWhatRTSPNotify, id());
+
+            status_t err = mNetSession->createRTSPClient(
+                    mRTSPHost.c_str(), sourcePort, notify, &mSessionID);
+            CHECK_EQ(err, (status_t)OK);
+
+            mState = CONNECTING;
+            break;
+        }
+
+        case kWhatRTSPNotify:
+        {
+            int32_t reason;
+            CHECK(msg->findInt32("reason", &reason));
+
+            switch (reason) {
+                case ANetworkSession::kWhatError:
+                {
+                    int32_t sessionID;
+                    CHECK(msg->findInt32("sessionID", &sessionID));
+
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    AString detail;
+                    CHECK(msg->findString("detail", &detail));
+
+                    ALOGE("An error occurred in session %d (%d, '%s/%s').",
+                          sessionID,
+                          err,
+                          detail.c_str(),
+                          strerror(-err));
+
+                    if (sessionID == mSessionID) {
+                        ALOGI("Lost control connection.");
+
+                        // The control connection is dead now.
+                        mNetSession->destroySession(mSessionID);
+                        mSessionID = 0;
+
+                        if (mNotify == NULL) {
+                            looper()->stop();
+                        } else {
+                            sp<AMessage> notify = mNotify->dup();
+                            notify->setInt32("what", kWhatDisconnected);
+                            notify->post();
+                        }
+                    }
+                    break;
+                }
+
+                case ANetworkSession::kWhatConnected:
+                {
+                    ALOGI("We're now connected.");
+                    mState = CONNECTED;
+
+                    if (mFlags & FLAG_SPECIAL_MODE) {
+                        sp<AMessage> notify = new AMessage(
+                                kWhatTimeSyncerNotify, id());
+
+                        mTimeSyncer = new TimeSyncer(mNetSession, notify);
+                        looper()->registerHandler(mTimeSyncer);
+
+                        mTimeSyncer->startClient(mRTSPHost.c_str(), 8123);
+                    }
+                    break;
+                }
+
+                case ANetworkSession::kWhatData:
+                {
+                    onReceiveClientData(msg);
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+            break;
+        }
+
+        case kWhatStop:
+        {
+            looper()->stop();
+            break;
+        }
+
+        case kWhatMediaReceiverNotify:
+        {
+            onMediaReceiverNotify(msg);
+            break;
+        }
+
+        case kWhatTimeSyncerNotify:
+        {
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            if (what == TimeSyncer::kWhatTimeOffset) {
+                CHECK(msg->findInt64("offset", &mTimeOffsetUs));
+                mTimeOffsetValid = true;
+
+                if (mSetupDeferred) {
+                    CHECK_EQ((status_t)OK,
+                             sendSetup(
+                                mSessionID,
+                                "rtsp://x.x.x.x:x/wfd1.0/streamid=0"));
+
+                    mSetupDeferred = false;
+                }
+            }
+            break;
+        }
+
+        case kWhatReportLateness:
+        {
+            if (mLatencyCount > 0) {
+                int64_t avgLatencyUs = mLatencySumUs / mLatencyCount;
+
+                ALOGV("avg. latency = %lld ms (max %lld ms)",
+                      avgLatencyUs / 1000ll,
+                      mLatencyMaxUs / 1000ll);
+
+                sp<AMessage> params = new AMessage;
+                params->setInt64("avgLatencyUs", avgLatencyUs);
+                params->setInt64("maxLatencyUs", mLatencyMaxUs);
+                mMediaReceiver->informSender(0 /* trackIndex */, params);
+            }
+
+            mLatencyCount = 0;
+            mLatencySumUs = 0ll;
+            mLatencyMaxUs = 0ll;
+
+            msg->post(kReportLatenessEveryUs);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void WifiDisplaySink::dumpDelay(size_t trackIndex, int64_t timeUs) {
+    int64_t delayMs = (ALooper::GetNowUs() - timeUs) / 1000ll;
+
+    if (delayMs > mMaxDelayMs) {
+        mMaxDelayMs = delayMs;
+    }
+
+    static const int64_t kMinDelayMs = 0;
+    static const int64_t kMaxDelayMs = 300;
+
+    const char *kPattern = "########################################";
+    size_t kPatternSize = strlen(kPattern);
+
+    int n = (kPatternSize * (delayMs - kMinDelayMs))
+                / (kMaxDelayMs - kMinDelayMs);
+
+    if (n < 0) {
+        n = 0;
+    } else if ((size_t)n > kPatternSize) {
+        n = kPatternSize;
+    }
+
+    ALOGI("[%lld]: (%4lld ms / %4lld ms) %s",
+          timeUs / 1000,
+          delayMs,
+          mMaxDelayMs,
+          kPattern + kPatternSize - n);
+}
+
+void WifiDisplaySink::onMediaReceiverNotify(const sp<AMessage> &msg) {
+    int32_t what;
+    CHECK(msg->findInt32("what", &what));
+
+    switch (what) {
+        case MediaReceiver::kWhatInitDone:
+        {
+            status_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            ALOGI("MediaReceiver initialization completed w/ err %d", err);
+            break;
+        }
+
+        case MediaReceiver::kWhatError:
+        {
+            status_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            ALOGE("MediaReceiver signaled error %d", err);
+            break;
+        }
+
+        case MediaReceiver::kWhatAccessUnit:
+        {
+            if (mRenderer == NULL) {
+                mRenderer = new DirectRenderer(mSurfaceTex);
+                looper()->registerHandler(mRenderer);
+            }
+
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+            int64_t timeUs;
+            CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+            if (!mTimeOffsetValid && !(mFlags & FLAG_SPECIAL_MODE)) {
+                mTimeOffsetUs = timeUs - ALooper::GetNowUs();
+                mTimeOffsetValid = true;
+            }
+
+            CHECK(mTimeOffsetValid);
+
+            // We are the timesync _client_,
+            // client time = server time - time offset.
+            timeUs -= mTimeOffsetUs;
+
+            size_t trackIndex;
+            CHECK(msg->findSize("trackIndex", &trackIndex));
+
+            int64_t nowUs = ALooper::GetNowUs();
+            int64_t delayUs = nowUs - timeUs;
+
+            mLatencySumUs += delayUs;
+            if (mLatencyCount == 0 || delayUs > mLatencyMaxUs) {
+                mLatencyMaxUs = delayUs;
+            }
+            ++mLatencyCount;
+
+            // dumpDelay(trackIndex, timeUs);
+
+            timeUs += 220000ll;  // Assume 220 ms of latency
+            accessUnit->meta()->setInt64("timeUs", timeUs);
+
+            sp<AMessage> format;
+            if (msg->findMessage("format", &format)) {
+                mRenderer->setFormat(trackIndex, format);
+            }
+
+            mRenderer->queueAccessUnit(trackIndex, accessUnit);
+            break;
+        }
+
+        case MediaReceiver::kWhatPacketLost:
+        {
+#if 0
+            if (!mIDRFrameRequestPending) {
+                ALOGI("requesting IDR frame");
+
+                sendIDRFrameRequest(mSessionID);
+            }
+#endif
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void WifiDisplaySink::registerResponseHandler(
+        int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func) {
+    ResponseID id;
+    id.mSessionID = sessionID;
+    id.mCSeq = cseq;
+    mResponseHandlers.add(id, func);
+}
+
+status_t WifiDisplaySink::sendM2(int32_t sessionID) {
+    AString request = "OPTIONS * RTSP/1.0\r\n";
+    AppendCommonResponse(&request, mNextCSeq);
+
+    request.append(
+            "Require: org.wfa.wfd1.0\r\n"
+            "\r\n");
+
+    status_t err =
+        mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID, mNextCSeq, &WifiDisplaySink::onReceiveM2Response);
+
+    ++mNextCSeq;
+
+    return OK;
+}
+
+status_t WifiDisplaySink::onReceiveM2Response(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    int32_t statusCode;
+    if (!msg->getStatusCode(&statusCode)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (statusCode != 200) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    return OK;
+}
+
+status_t WifiDisplaySink::onReceiveSetupResponse(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    int32_t statusCode;
+    if (!msg->getStatusCode(&statusCode)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (statusCode != 200) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    if (!msg->findString("session", &mPlaybackSessionID)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (!ParsedMessage::GetInt32Attribute(
+                mPlaybackSessionID.c_str(),
+                "timeout",
+                &mPlaybackSessionTimeoutSecs)) {
+        mPlaybackSessionTimeoutSecs = -1;
+    }
+
+    ssize_t colonPos = mPlaybackSessionID.find(";");
+    if (colonPos >= 0) {
+        // Strip any options from the returned session id.
+        mPlaybackSessionID.erase(
+                colonPos, mPlaybackSessionID.size() - colonPos);
+    }
+
+    status_t err = configureTransport(msg);
+
+    if (err != OK) {
+        return err;
+    }
+
+    mState = PAUSED;
+
+    return sendPlay(
+            sessionID,
+            "rtsp://x.x.x.x:x/wfd1.0/streamid=0");
+}
+
+status_t WifiDisplaySink::configureTransport(const sp<ParsedMessage> &msg) {
+    if (mUsingTCPTransport && !(mFlags & FLAG_SPECIAL_MODE)) {
+        // In "special" mode we still use a UDP RTCP back-channel that
+        // needs connecting.
+        return OK;
+    }
+
+    AString transport;
+    if (!msg->findString("transport", &transport)) {
+        ALOGE("Missing 'transport' field in SETUP response.");
+        return ERROR_MALFORMED;
+    }
+
+    AString sourceHost;
+    if (!ParsedMessage::GetAttribute(
+                transport.c_str(), "source", &sourceHost)) {
+        sourceHost = mRTSPHost;
+    }
+
+    AString serverPortStr;
+    if (!ParsedMessage::GetAttribute(
+                transport.c_str(), "server_port", &serverPortStr)) {
+        ALOGE("Missing 'server_port' in Transport field.");
+        return ERROR_MALFORMED;
+    }
+
+    int rtpPort, rtcpPort;
+    if (sscanf(serverPortStr.c_str(), "%d-%d", &rtpPort, &rtcpPort) != 2
+            || rtpPort <= 0 || rtpPort > 65535
+            || rtcpPort <=0 || rtcpPort > 65535
+            || rtcpPort != rtpPort + 1) {
+        ALOGE("Invalid server_port description '%s'.",
+                serverPortStr.c_str());
+
+        return ERROR_MALFORMED;
+    }
+
+    if (rtpPort & 1) {
+        ALOGW("Server picked an odd numbered RTP port.");
+    }
+
+    return mMediaReceiver->connectTrack(
+            0 /* trackIndex */, sourceHost.c_str(), rtpPort, rtcpPort);
+}
+
+status_t WifiDisplaySink::onReceivePlayResponse(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    int32_t statusCode;
+    if (!msg->getStatusCode(&statusCode)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (statusCode != 200) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    mState = PLAYING;
+
+    (new AMessage(kWhatReportLateness, id()))->post(kReportLatenessEveryUs);
+
+    return OK;
+}
+
+status_t WifiDisplaySink::onReceiveIDRFrameRequestResponse(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    CHECK(mIDRFrameRequestPending);
+    mIDRFrameRequestPending = false;
+
+    return OK;
+}
+
+void WifiDisplaySink::onReceiveClientData(const sp<AMessage> &msg) {
+    int32_t sessionID;
+    CHECK(msg->findInt32("sessionID", &sessionID));
+
+    sp<RefBase> obj;
+    CHECK(msg->findObject("data", &obj));
+
+    sp<ParsedMessage> data =
+        static_cast<ParsedMessage *>(obj.get());
+
+    ALOGV("session %d received '%s'",
+          sessionID, data->debugString().c_str());
+
+    AString method;
+    AString uri;
+    data->getRequestField(0, &method);
+
+    int32_t cseq;
+    if (!data->findInt32("cseq", &cseq)) {
+        sendErrorResponse(sessionID, "400 Bad Request", -1 /* cseq */);
+        return;
+    }
+
+    if (method.startsWith("RTSP/")) {
+        // This is a response.
+
+        ResponseID id;
+        id.mSessionID = sessionID;
+        id.mCSeq = cseq;
+
+        ssize_t index = mResponseHandlers.indexOfKey(id);
+
+        if (index < 0) {
+            ALOGW("Received unsolicited server response, cseq %d", cseq);
+            return;
+        }
+
+        HandleRTSPResponseFunc func = mResponseHandlers.valueAt(index);
+        mResponseHandlers.removeItemsAt(index);
+
+        status_t err = (this->*func)(sessionID, data);
+        CHECK_EQ(err, (status_t)OK);
+    } else {
+        AString version;
+        data->getRequestField(2, &version);
+        if (!(version == AString("RTSP/1.0"))) {
+            sendErrorResponse(sessionID, "505 RTSP Version not supported", cseq);
+            return;
+        }
+
+        if (method == "OPTIONS") {
+            onOptionsRequest(sessionID, cseq, data);
+        } else if (method == "GET_PARAMETER") {
+            onGetParameterRequest(sessionID, cseq, data);
+        } else if (method == "SET_PARAMETER") {
+            onSetParameterRequest(sessionID, cseq, data);
+        } else {
+            sendErrorResponse(sessionID, "405 Method Not Allowed", cseq);
+        }
+    }
+}
+
+void WifiDisplaySink::onOptionsRequest(
+        int32_t sessionID,
+        int32_t cseq,
+        const sp<ParsedMessage> &data) {
+    AString response = "RTSP/1.0 200 OK\r\n";
+    AppendCommonResponse(&response, cseq);
+    response.append("Public: org.wfa.wfd1.0, GET_PARAMETER, SET_PARAMETER\r\n");
+    response.append("\r\n");
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+
+    err = sendM2(sessionID);
+    CHECK_EQ(err, (status_t)OK);
+}
+
+void WifiDisplaySink::onGetParameterRequest(
+        int32_t sessionID,
+        int32_t cseq,
+        const sp<ParsedMessage> &data) {
+    AString body;
+
+    if (mState == CONNECTED) {
+        mUsingTCPTransport = false;
+        mUsingTCPInterleaving = false;
+
+        char val[PROPERTY_VALUE_MAX];
+        if (property_get("media.wfd-sink.tcp-mode", val, NULL)) {
+            if (!strcasecmp("true", val) || !strcmp("1", val)) {
+                ALOGI("Using TCP unicast transport.");
+                mUsingTCPTransport = true;
+                mUsingTCPInterleaving = false;
+            } else if (!strcasecmp("interleaved", val)) {
+                ALOGI("Using TCP interleaved transport.");
+                mUsingTCPTransport = true;
+                mUsingTCPInterleaving = true;
+            }
+        } else if (mFlags & FLAG_SPECIAL_MODE) {
+            mUsingTCPTransport = true;
+        }
+
+        body = "wfd_video_formats: ";
+        body.append(mSinkSupportedVideoFormats.getFormatSpec());
+
+        body.append(
+                "\r\nwfd_audio_codecs: AAC 0000000F 00\r\n"
+                "wfd_client_rtp_ports: RTP/AVP/");
+
+        if (mUsingTCPTransport) {
+            body.append("TCP;");
+            if (mUsingTCPInterleaving) {
+                body.append("interleaved");
+            } else {
+                body.append("unicast 19000 0");
+            }
+        } else {
+            body.append("UDP;unicast 19000 0");
+        }
+
+        body.append(" mode=play\r\n");
+    }
+
+    AString response = "RTSP/1.0 200 OK\r\n";
+    AppendCommonResponse(&response, cseq);
+    response.append("Content-Type: text/parameters\r\n");
+    response.append(StringPrintf("Content-Length: %d\r\n", body.size()));
+    response.append("\r\n");
+    response.append(body);
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+}
+
+status_t WifiDisplaySink::sendSetup(int32_t sessionID, const char *uri) {
+    sp<AMessage> notify = new AMessage(kWhatMediaReceiverNotify, id());
+
+    mMediaReceiverLooper = new ALooper;
+    mMediaReceiverLooper->setName("media_receiver");
+
+    mMediaReceiverLooper->start(
+            false /* runOnCallingThread */,
+            false /* canCallJava */,
+            PRIORITY_AUDIO);
+
+    mMediaReceiver = new MediaReceiver(mNetSession, notify);
+    mMediaReceiverLooper->registerHandler(mMediaReceiver);
+
+    RTPReceiver::TransportMode rtpMode = RTPReceiver::TRANSPORT_UDP;
+    if (mUsingTCPTransport) {
+        if (mUsingTCPInterleaving) {
+            rtpMode = RTPReceiver::TRANSPORT_TCP_INTERLEAVED;
+        } else {
+            rtpMode = RTPReceiver::TRANSPORT_TCP;
+        }
+    }
+
+    int32_t localRTPPort;
+    status_t err = mMediaReceiver->addTrack(
+            rtpMode, RTPReceiver::TRANSPORT_UDP /* rtcpMode */, &localRTPPort);
+
+    if (err == OK) {
+        err = mMediaReceiver->initAsync(MediaReceiver::MODE_TRANSPORT_STREAM);
+    }
+
+    if (err != OK) {
+        mMediaReceiverLooper->unregisterHandler(mMediaReceiver->id());
+        mMediaReceiver.clear();
+
+        mMediaReceiverLooper->stop();
+        mMediaReceiverLooper.clear();
+
+        return err;
+    }
+
+    AString request = StringPrintf("SETUP %s RTSP/1.0\r\n", uri);
+
+    AppendCommonResponse(&request, mNextCSeq);
+
+    if (rtpMode == RTPReceiver::TRANSPORT_TCP_INTERLEAVED) {
+        request.append("Transport: RTP/AVP/TCP;interleaved=0-1\r\n");
+    } else if (rtpMode == RTPReceiver::TRANSPORT_TCP) {
+        if (mFlags & FLAG_SPECIAL_MODE) {
+            // This isn't quite true, since the RTP connection is through TCP
+            // and the RTCP connection through UDP...
+            request.append(
+                    StringPrintf(
+                        "Transport: RTP/AVP/TCP;unicast;client_port=%d-%d\r\n",
+                        localRTPPort, localRTPPort + 1));
+        } else {
+            request.append(
+                    StringPrintf(
+                        "Transport: RTP/AVP/TCP;unicast;client_port=%d\r\n",
+                        localRTPPort));
+        }
+    } else {
+        request.append(
+                StringPrintf(
+                    "Transport: RTP/AVP/UDP;unicast;client_port=%d-%d\r\n",
+                    localRTPPort,
+                    localRTPPort + 1));
+    }
+
+    request.append("\r\n");
+
+    ALOGV("request = '%s'", request.c_str());
+
+    err = mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID, mNextCSeq, &WifiDisplaySink::onReceiveSetupResponse);
+
+    ++mNextCSeq;
+
+    return OK;
+}
+
+status_t WifiDisplaySink::sendPlay(int32_t sessionID, const char *uri) {
+    AString request = StringPrintf("PLAY %s RTSP/1.0\r\n", uri);
+
+    AppendCommonResponse(&request, mNextCSeq);
+
+    request.append(StringPrintf("Session: %s\r\n", mPlaybackSessionID.c_str()));
+    request.append("\r\n");
+
+    status_t err =
+        mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID, mNextCSeq, &WifiDisplaySink::onReceivePlayResponse);
+
+    ++mNextCSeq;
+
+    return OK;
+}
+
+status_t WifiDisplaySink::sendIDRFrameRequest(int32_t sessionID) {
+    CHECK(!mIDRFrameRequestPending);
+
+    AString request = "SET_PARAMETER rtsp://localhost/wfd1.0 RTSP/1.0\r\n";
+
+    AppendCommonResponse(&request, mNextCSeq);
+
+    AString content = "wfd_idr_request\r\n";
+
+    request.append(StringPrintf("Session: %s\r\n", mPlaybackSessionID.c_str()));
+    request.append(StringPrintf("Content-Length: %d\r\n", content.size()));
+    request.append("\r\n");
+    request.append(content);
+
+    status_t err =
+        mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID,
+            mNextCSeq,
+            &WifiDisplaySink::onReceiveIDRFrameRequestResponse);
+
+    ++mNextCSeq;
+
+    mIDRFrameRequestPending = true;
+
+    return OK;
+}
+
+void WifiDisplaySink::onSetParameterRequest(
+        int32_t sessionID,
+        int32_t cseq,
+        const sp<ParsedMessage> &data) {
+    const char *content = data->getContent();
+
+    if (strstr(content, "wfd_trigger_method: SETUP\r\n") != NULL) {
+        if ((mFlags & FLAG_SPECIAL_MODE) && !mTimeOffsetValid) {
+            mSetupDeferred = true;
+        } else {
+            status_t err =
+                sendSetup(
+                        sessionID,
+                        "rtsp://x.x.x.x:x/wfd1.0/streamid=0");
+
+            CHECK_EQ(err, (status_t)OK);
+        }
+    }
+
+    AString response = "RTSP/1.0 200 OK\r\n";
+    AppendCommonResponse(&response, cseq);
+    response.append("\r\n");
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+}
+
+void WifiDisplaySink::sendErrorResponse(
+        int32_t sessionID,
+        const char *errorDetail,
+        int32_t cseq) {
+    AString response;
+    response.append("RTSP/1.0 ");
+    response.append(errorDetail);
+    response.append("\r\n");
+
+    AppendCommonResponse(&response, cseq);
+
+    response.append("\r\n");
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+}
+
+// static
+void WifiDisplaySink::AppendCommonResponse(AString *response, int32_t cseq) {
+    time_t now = time(NULL);
+    struct tm *now2 = gmtime(&now);
+    char buf[128];
+    strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S %z", now2);
+
+    response->append("Date: ");
+    response->append(buf);
+    response->append("\r\n");
+
+    response->append(StringPrintf("User-Agent: %s\r\n", sUserAgent.c_str()));
+
+    if (cseq >= 0) {
+        response->append(StringPrintf("CSeq: %d\r\n", cseq));
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.h b/media/libstagefright/wifi-display/sink/WifiDisplaySink.h
new file mode 100644
index 0000000..dc1fc32
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WIFI_DISPLAY_SINK_H_
+
+#define WIFI_DISPLAY_SINK_H_
+
+#include "VideoFormats.h"
+
+#include <gui/Surface.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+
+namespace android {
+
+struct AMessage;
+struct DirectRenderer;
+struct MediaReceiver;
+struct ParsedMessage;
+struct TimeSyncer;
+
+// Represents the RTSP client acting as a wifi display sink.
+// Connects to a wifi display source and renders the incoming
+// transport stream using a MediaPlayer instance.
+struct WifiDisplaySink : public AHandler {
+    enum {
+        kWhatDisconnected,
+    };
+
+    enum Flags {
+        FLAG_SPECIAL_MODE = 1,
+    };
+
+    // If no notification message is specified (notify == NULL)
+    // the sink will stop its looper() once the session ends,
+    // otherwise it will post an appropriate notification but leave
+    // the looper() running.
+    WifiDisplaySink(
+            uint32_t flags,
+            const sp<ANetworkSession> &netSession,
+            const sp<IGraphicBufferProducer> &bufferProducer = NULL,
+            const sp<AMessage> &notify = NULL);
+
+    void start(const char *sourceHost, int32_t sourcePort);
+    void start(const char *uri);
+
+protected:
+    virtual ~WifiDisplaySink();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum State {
+        UNDEFINED,
+        CONNECTING,
+        CONNECTED,
+        PAUSED,
+        PLAYING,
+    };
+
+    enum {
+        kWhatStart,
+        kWhatRTSPNotify,
+        kWhatStop,
+        kWhatMediaReceiverNotify,
+        kWhatTimeSyncerNotify,
+        kWhatReportLateness,
+    };
+
+    struct ResponseID {
+        int32_t mSessionID;
+        int32_t mCSeq;
+
+        bool operator<(const ResponseID &other) const {
+            return mSessionID < other.mSessionID
+                || (mSessionID == other.mSessionID
+                        && mCSeq < other.mCSeq);
+        }
+    };
+
+    typedef status_t (WifiDisplaySink::*HandleRTSPResponseFunc)(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    static const int64_t kReportLatenessEveryUs = 1000000ll;
+
+    static const AString sUserAgent;
+
+    State mState;
+    uint32_t mFlags;
+    VideoFormats mSinkSupportedVideoFormats;
+    sp<ANetworkSession> mNetSession;
+    sp<IGraphicBufferProducer> mSurfaceTex;
+    sp<AMessage> mNotify;
+    sp<TimeSyncer> mTimeSyncer;
+    bool mUsingTCPTransport;
+    bool mUsingTCPInterleaving;
+    AString mRTSPHost;
+    int32_t mSessionID;
+
+    int32_t mNextCSeq;
+
+    KeyedVector<ResponseID, HandleRTSPResponseFunc> mResponseHandlers;
+
+    sp<ALooper> mMediaReceiverLooper;
+    sp<MediaReceiver> mMediaReceiver;
+    sp<DirectRenderer> mRenderer;
+
+    AString mPlaybackSessionID;
+    int32_t mPlaybackSessionTimeoutSecs;
+
+    bool mIDRFrameRequestPending;
+
+    int64_t mTimeOffsetUs;
+    bool mTimeOffsetValid;
+
+    bool mSetupDeferred;
+
+    size_t mLatencyCount;
+    int64_t mLatencySumUs;
+    int64_t mLatencyMaxUs;
+
+    int64_t mMaxDelayMs;
+
+    status_t sendM2(int32_t sessionID);
+    status_t sendSetup(int32_t sessionID, const char *uri);
+    status_t sendPlay(int32_t sessionID, const char *uri);
+    status_t sendIDRFrameRequest(int32_t sessionID);
+
+    status_t onReceiveM2Response(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    status_t onReceiveSetupResponse(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    status_t configureTransport(const sp<ParsedMessage> &msg);
+
+    status_t onReceivePlayResponse(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    status_t onReceiveIDRFrameRequestResponse(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    void registerResponseHandler(
+            int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func);
+
+    void onReceiveClientData(const sp<AMessage> &msg);
+
+    void onOptionsRequest(
+            int32_t sessionID,
+            int32_t cseq,
+            const sp<ParsedMessage> &data);
+
+    void onGetParameterRequest(
+            int32_t sessionID,
+            int32_t cseq,
+            const sp<ParsedMessage> &data);
+
+    void onSetParameterRequest(
+            int32_t sessionID,
+            int32_t cseq,
+            const sp<ParsedMessage> &data);
+
+    void onMediaReceiverNotify(const sp<AMessage> &msg);
+
+    void sendErrorResponse(
+            int32_t sessionID,
+            const char *errorDetail,
+            int32_t cseq);
+
+    static void AppendCommonResponse(AString *response, int32_t cseq);
+
+    bool ParseURL(
+            const char *url, AString *host, int32_t *port, AString *path,
+            AString *user, AString *pass);
+
+    void dumpDelay(size_t trackIndex, int64_t timeUs);
+
+    DISALLOW_EVIL_CONSTRUCTORS(WifiDisplaySink);
+};
+
+}  // namespace android
+
+#endif  // WIFI_DISPLAY_SINK_H_
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 5344623..6f23854 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -21,6 +21,7 @@
 #include "Converter.h"
 
 #include "MediaPuller.h"
+#include "include/avc_utils.h"
 
 #include <cutils/properties.h>
 #include <gui/Surface.h>
@@ -33,6 +34,8 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 
+#include <arpa/inet.h>
+
 #include <OMX_Video.h>
 
 namespace android {
@@ -40,14 +43,15 @@
 Converter::Converter(
         const sp<AMessage> &notify,
         const sp<ALooper> &codecLooper,
-        const sp<AMessage> &format,
-        bool usePCMAudio)
-    : mInitCheck(NO_INIT),
-      mNotify(notify),
+        const sp<AMessage> &outputFormat,
+        uint32_t flags)
+    : mNotify(notify),
       mCodecLooper(codecLooper),
-      mInputFormat(format),
+      mOutputFormat(outputFormat),
+      mFlags(flags),
       mIsVideo(false),
-      mIsPCMAudio(usePCMAudio),
+      mIsH264(false),
+      mIsPCMAudio(false),
       mNeedToManuallyPrependSPSPPS(false),
       mDoMoreWorkPending(false)
 #if ENABLE_SILENCE_DETECTION
@@ -56,20 +60,17 @@
 #endif
       ,mPrevVideoBitrate(-1)
       ,mNumFramesToDrop(0)
+      ,mEncodingSuspended(false)
     {
     AString mime;
-    CHECK(mInputFormat->findString("mime", &mime));
+    CHECK(mOutputFormat->findString("mime", &mime));
 
     if (!strncasecmp("video/", mime.c_str(), 6)) {
         mIsVideo = true;
-    }
 
-    CHECK(!usePCMAudio || !mIsVideo);
-
-    mInitCheck = initEncoder();
-
-    if (mInitCheck != OK) {
-        releaseEncoder();
+        mIsH264 = !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_AVC);
+    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime.c_str())) {
+        mIsPCMAudio = true;
     }
 }
 
@@ -119,8 +120,19 @@
     (new AMessage(kWhatShutdown, id()))->post();
 }
 
-status_t Converter::initCheck() const {
-    return mInitCheck;
+status_t Converter::init() {
+    status_t err = initEncoder();
+
+    if (err != OK) {
+        releaseEncoder();
+    }
+
+    return err;
+}
+
+sp<IGraphicBufferProducer> Converter::getGraphicBufferProducer() {
+    CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+    return mGraphicBufferProducer;
 }
 
 size_t Converter::getInputBufferCount() const {
@@ -152,23 +164,10 @@
 }
 
 status_t Converter::initEncoder() {
-    AString inputMIME;
-    CHECK(mInputFormat->findString("mime", &inputMIME));
-
     AString outputMIME;
-    bool isAudio = false;
-    if (!strcasecmp(inputMIME.c_str(), MEDIA_MIMETYPE_AUDIO_RAW)) {
-        if (mIsPCMAudio) {
-            outputMIME = MEDIA_MIMETYPE_AUDIO_RAW;
-        } else {
-            outputMIME = MEDIA_MIMETYPE_AUDIO_AAC;
-        }
-        isAudio = true;
-    } else if (!strcasecmp(inputMIME.c_str(), MEDIA_MIMETYPE_VIDEO_RAW)) {
-        outputMIME = MEDIA_MIMETYPE_VIDEO_AVC;
-    } else {
-        TRESPASS();
-    }
+    CHECK(mOutputFormat->findString("mime", &outputMIME));
+
+    bool isAudio = !strncasecmp(outputMIME.c_str(), "audio/", 6);
 
     if (!mIsPCMAudio) {
         mEncoder = MediaCodec::CreateByType(
@@ -179,14 +178,10 @@
         }
     }
 
-    mOutputFormat = mInputFormat->dup();
-
     if (mIsPCMAudio) {
         return OK;
     }
 
-    mOutputFormat->setString("mime", outputMIME.c_str());
-
     int32_t audioBitrate = GetInt32Property("media.wfd.audio-bitrate", 128000);
     int32_t videoBitrate = GetInt32Property("media.wfd.video-bitrate", 5000000);
     mPrevVideoBitrate = videoBitrate;
@@ -262,6 +257,16 @@
         return err;
     }
 
+    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+        CHECK(mIsVideo);
+
+        err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+
+        if (err != OK) {
+            return err;
+        }
+    }
+
     err = mEncoder->start();
 
     if (err != OK) {
@@ -274,7 +279,17 @@
         return err;
     }
 
-    return mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+    err = mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (mFlags & FLAG_USE_SURFACE_INPUT) {
+        scheduleDoMoreWork();
+    }
+
+    return OK;
 }
 
 void Converter::notifyError(status_t err) {
@@ -330,9 +345,12 @@
                 sp<ABuffer> accessUnit;
                 CHECK(msg->findBuffer("accessUnit", &accessUnit));
 
-                if (mIsVideo && mNumFramesToDrop) {
-                    --mNumFramesToDrop;
-                    ALOGI("dropping frame.");
+                if (mNumFramesToDrop > 0 || mEncodingSuspended) {
+                    if (mNumFramesToDrop > 0) {
+                        --mNumFramesToDrop;
+                        ALOGI("dropping frame.");
+                    }
+
                     ReleaseMediaBufferReference(accessUnit);
                     break;
                 }
@@ -414,7 +432,7 @@
             }
 
             if (mIsVideo) {
-                ALOGI("requesting IDR frame");
+                ALOGV("requesting IDR frame");
                 mEncoder->requestIDRFrame();
             }
             break;
@@ -427,8 +445,12 @@
             releaseEncoder();
 
             AString mime;
-            CHECK(mInputFormat->findString("mime", &mime));
+            CHECK(mOutputFormat->findString("mime", &mime));
             ALOGI("encoder (%s) shut down.", mime.c_str());
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatShutdownCompleted);
+            notify->post();
             break;
         }
 
@@ -438,6 +460,32 @@
             break;
         }
 
+        case kWhatReleaseOutputBuffer:
+        {
+            if (mEncoder != NULL) {
+                size_t bufferIndex;
+                CHECK(msg->findInt32("bufferIndex", (int32_t*)&bufferIndex));
+                CHECK(bufferIndex < mEncoderOutputBuffers.size());
+                mEncoder->releaseOutputBuffer(bufferIndex);
+            }
+            break;
+        }
+
+        case kWhatSuspendEncoding:
+        {
+            int32_t suspend;
+            CHECK(msg->findInt32("suspend", &suspend));
+
+            mEncodingSuspended = suspend;
+
+            if (mFlags & FLAG_USE_SURFACE_INPUT) {
+                sp<AMessage> params = new AMessage;
+                params->setInt32("drop-input-frames",suspend);
+                mEncoder->setParameters(params);
+            }
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -623,28 +671,46 @@
     return OK;
 }
 
+sp<ABuffer> Converter::prependCSD(const sp<ABuffer> &accessUnit) const {
+    CHECK(mCSD0 != NULL);
+
+    sp<ABuffer> dup = new ABuffer(accessUnit->size() + mCSD0->size());
+    memcpy(dup->data(), mCSD0->data(), mCSD0->size());
+    memcpy(dup->data() + mCSD0->size(), accessUnit->data(), accessUnit->size());
+
+    int64_t timeUs;
+    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+    dup->meta()->setInt64("timeUs", timeUs);
+
+    return dup;
+}
+
 status_t Converter::doMoreWork() {
     status_t err;
 
-    for (;;) {
-        size_t bufferIndex;
-        err = mEncoder->dequeueInputBuffer(&bufferIndex);
+    if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+        for (;;) {
+            size_t bufferIndex;
+            err = mEncoder->dequeueInputBuffer(&bufferIndex);
 
-        if (err != OK) {
-            break;
+            if (err != OK) {
+                break;
+            }
+
+            mAvailEncoderInputIndices.push_back(bufferIndex);
         }
 
-        mAvailEncoderInputIndices.push_back(bufferIndex);
+        feedEncoderInputBuffers();
     }
 
-    feedEncoderInputBuffers();
-
     for (;;) {
         size_t bufferIndex;
         size_t offset;
         size_t size;
         int64_t timeUs;
         uint32_t flags;
+        native_handle_t* handle = NULL;
         err = mEncoder->dequeueOutputBuffer(
                 &bufferIndex, &offset, &size, &timeUs, &flags);
 
@@ -667,19 +733,63 @@
             notify->setInt32("what", kWhatEOS);
             notify->post();
         } else {
-            sp<ABuffer> buffer = new ABuffer(size);
+#if 0
+            if (mIsVideo) {
+                int32_t videoBitrate = GetInt32Property(
+                        "media.wfd.video-bitrate", 5000000);
+
+                setVideoBitrate(videoBitrate);
+            }
+#endif
+
+            sp<ABuffer> buffer;
+            sp<ABuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
+
+            if (outbuf->meta()->findPointer("handle", (void**)&handle) &&
+                    handle != NULL) {
+                int32_t rangeLength, rangeOffset;
+                CHECK(outbuf->meta()->findInt32("rangeOffset", &rangeOffset));
+                CHECK(outbuf->meta()->findInt32("rangeLength", &rangeLength));
+                outbuf->meta()->setPointer("handle", NULL);
+
+                // MediaSender will post the following message when HDCP
+                // is done, to release the output buffer back to encoder.
+                sp<AMessage> notify(new AMessage(
+                        kWhatReleaseOutputBuffer, id()));
+                notify->setInt32("bufferIndex", bufferIndex);
+
+                buffer = new ABuffer(
+                        rangeLength > (int32_t)size ? rangeLength : size);
+                buffer->meta()->setPointer("handle", handle);
+                buffer->meta()->setInt32("rangeOffset", rangeOffset);
+                buffer->meta()->setInt32("rangeLength", rangeLength);
+                buffer->meta()->setMessage("notify", notify);
+            } else {
+                buffer = new ABuffer(size);
+            }
+
             buffer->meta()->setInt64("timeUs", timeUs);
 
             ALOGV("[%s] time %lld us (%.2f secs)",
                   mIsVideo ? "video" : "audio", timeUs, timeUs / 1E6);
 
-            memcpy(buffer->data(),
-                   mEncoderOutputBuffers.itemAt(bufferIndex)->base() + offset,
-                   size);
+            memcpy(buffer->data(), outbuf->base() + offset, size);
 
             if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
-                mOutputFormat->setBuffer("csd-0", buffer);
+                if (!handle) {
+                    if (mIsH264) {
+                        mCSD0 = buffer;
+                    }
+                    mOutputFormat->setBuffer("csd-0", buffer);
+                }
             } else {
+                if (mNeedToManuallyPrependSPSPPS
+                        && mIsH264
+                        && (mFlags & FLAG_PREPEND_CSD_IF_NECESSARY)
+                        && IsIDR(buffer)) {
+                    buffer = prependCSD(buffer);
+                }
+
                 sp<AMessage> notify = mNotify->dup();
                 notify->setInt32("what", kWhatAccessUnit);
                 notify->setBuffer("accessUnit", buffer);
@@ -687,7 +797,9 @@
             }
         }
 
-        mEncoder->releaseOutputBuffer(bufferIndex);
+        if (!handle) {
+            mEncoder->releaseOutputBuffer(bufferIndex);
+        }
 
         if (flags & MediaCodec::BUFFER_FLAG_EOS) {
             break;
@@ -702,9 +814,18 @@
 }
 
 void Converter::dropAFrame() {
+    // Unsupported in surface input mode.
+    CHECK(!(mFlags & FLAG_USE_SURFACE_INPUT));
+
     (new AMessage(kWhatDropAFrame, id()))->post();
 }
 
+void Converter::suspendEncoding(bool suspend) {
+    sp<AMessage> msg = new AMessage(kWhatSuspendEncoding, id());
+    msg->setInt32("suspend", suspend);
+    msg->post();
+}
+
 int32_t Converter::getVideoBitrate() const {
     return mPrevVideoBitrate;
 }
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index ba297c4..5876e07 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -18,13 +18,12 @@
 
 #define CONVERTER_H_
 
-#include "WifiDisplaySource.h"
-
 #include <media/stagefright/foundation/AHandler.h>
 
 namespace android {
 
 struct ABuffer;
+struct IGraphicBufferProducer;
 struct MediaCodec;
 
 #define ENABLE_SILENCE_DETECTION        0
@@ -33,13 +32,25 @@
 // media access unit of a different format.
 // Right now this'll convert raw video into H.264 and raw audio into AAC.
 struct Converter : public AHandler {
-    Converter(
-            const sp<AMessage> &notify,
-            const sp<ALooper> &codecLooper,
-            const sp<AMessage> &format,
-            bool usePCMAudio);
+    enum {
+        kWhatAccessUnit,
+        kWhatEOS,
+        kWhatError,
+        kWhatShutdownCompleted,
+    };
 
-    status_t initCheck() const;
+    enum FlagBits {
+        FLAG_USE_SURFACE_INPUT          = 1,
+        FLAG_PREPEND_CSD_IF_NECESSARY   = 2,
+    };
+    Converter(const sp<AMessage> &notify,
+              const sp<ALooper> &codecLooper,
+              const sp<AMessage> &outputFormat,
+              uint32_t flags = 0);
+
+    status_t init();
+
+    sp<IGraphicBufferProducer> getGraphicBufferProducer();
 
     size_t getInputBufferCount() const;
 
@@ -52,21 +63,7 @@
     void requestIDRFrame();
 
     void dropAFrame();
-
-    enum {
-        kWhatAccessUnit,
-        kWhatEOS,
-        kWhatError,
-    };
-
-    enum {
-        kWhatDoMoreWork,
-        kWhatRequestIDRFrame,
-        kWhatShutdown,
-        kWhatMediaPullerNotify,
-        kWhatEncoderActivity,
-        kWhatDropAFrame,
-    };
+    void suspendEncoding(bool suspend);
 
     void shutdownAsync();
 
@@ -75,23 +72,40 @@
 
     static int32_t GetInt32Property(const char *propName, int32_t defaultValue);
 
+    enum {
+        // MUST not conflict with private enums below.
+        kWhatMediaPullerNotify = 'pulN',
+    };
+
 protected:
     virtual ~Converter();
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
 private:
-    status_t mInitCheck;
+    enum {
+        kWhatDoMoreWork,
+        kWhatRequestIDRFrame,
+        kWhatSuspendEncoding,
+        kWhatShutdown,
+        kWhatEncoderActivity,
+        kWhatDropAFrame,
+        kWhatReleaseOutputBuffer,
+    };
+
     sp<AMessage> mNotify;
     sp<ALooper> mCodecLooper;
-    sp<AMessage> mInputFormat;
-    bool mIsVideo;
-    bool mIsPCMAudio;
     sp<AMessage> mOutputFormat;
+    uint32_t mFlags;
+    bool mIsVideo;
+    bool mIsH264;
+    bool mIsPCMAudio;
     bool mNeedToManuallyPrependSPSPPS;
 
     sp<MediaCodec> mEncoder;
     sp<AMessage> mEncoderActivityNotify;
 
+    sp<IGraphicBufferProducer> mGraphicBufferProducer;
+
     Vector<sp<ABuffer> > mEncoderInputBuffers;
     Vector<sp<ABuffer> > mEncoderOutputBuffers;
 
@@ -99,6 +113,8 @@
 
     List<sp<ABuffer> > mInputBufferQueue;
 
+    sp<ABuffer> mCSD0;
+
     bool mDoMoreWorkPending;
 
 #if ENABLE_SILENCE_DETECTION
@@ -111,6 +127,7 @@
     int32_t mPrevVideoBitrate;
 
     int32_t mNumFramesToDrop;
+    bool mEncodingSuspended;
 
     status_t initEncoder();
     void releaseEncoder();
@@ -129,6 +146,8 @@
 
     static bool IsSilence(const sp<ABuffer> &accessUnit);
 
+    sp<ABuffer> prependCSD(const sp<ABuffer> &accessUnit) const;
+
     DISALLOW_EVIL_CONSTRUCTORS(Converter);
 };
 
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp
index 189bea3..7e8891d 100644
--- a/media/libstagefright/wifi-display/source/MediaPuller.cpp
+++ b/media/libstagefright/wifi-display/source/MediaPuller.cpp
@@ -93,6 +93,9 @@
                 err = mSource->start(params.get());
             } else {
                 err = mSource->start();
+                if (err != OK) {
+                    ALOGE("source failed to start w/ err %d", err);
+                }
             }
 
             if (err == OK) {
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 3d7b865..0aa4ee5 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -378,7 +378,9 @@
         bool usePCMAudio,
         bool enableVideo,
         VideoFormats::ResolutionType videoResolutionType,
-        size_t videoResolutionIndex) {
+        size_t videoResolutionIndex,
+        VideoFormats::ProfileType videoProfileType,
+        VideoFormats::LevelType videoLevelType) {
     sp<AMessage> notify = new AMessage(kWhatMediaSenderNotify, id());
     mMediaSender = new MediaSender(mNetSession, notify);
     looper()->registerHandler(mMediaSender);
@@ -390,7 +392,9 @@
             usePCMAudio,
             enableVideo,
             videoResolutionType,
-            videoResolutionIndex);
+            videoResolutionIndex,
+            videoProfileType,
+            videoLevelType);
 
     if (err == OK) {
         err = mMediaSender->initAsync(
@@ -517,7 +521,7 @@
                 if (mTracks.isEmpty()) {
                     ALOGI("Reached EOS");
                 }
-            } else {
+            } else if (what != Converter::kWhatShutdownCompleted) {
                 CHECK_EQ(what, Converter::kWhatError);
 
                 status_t err;
@@ -559,6 +563,8 @@
                         converter->dropAFrame();
                     }
                 }
+            } else if (what == MediaSender::kWhatInformSender) {
+                onSinkFeedback(msg);
             } else {
                 TRESPASS();
             }
@@ -654,6 +660,89 @@
     }
 }
 
+void WifiDisplaySource::PlaybackSession::onSinkFeedback(const sp<AMessage> &msg) {
+    int64_t avgLatencyUs;
+    CHECK(msg->findInt64("avgLatencyUs", &avgLatencyUs));
+
+    int64_t maxLatencyUs;
+    CHECK(msg->findInt64("maxLatencyUs", &maxLatencyUs));
+
+    ALOGI("sink reports avg. latency of %lld ms (max %lld ms)",
+          avgLatencyUs / 1000ll,
+          maxLatencyUs / 1000ll);
+
+    if (mVideoTrackIndex >= 0) {
+        const sp<Track> &videoTrack = mTracks.valueFor(mVideoTrackIndex);
+        sp<Converter> converter = videoTrack->converter();
+
+        if (converter != NULL) {
+            int32_t videoBitrate =
+                Converter::GetInt32Property("media.wfd.video-bitrate", -1);
+
+            char val[PROPERTY_VALUE_MAX];
+            if (videoBitrate < 0
+                    && property_get("media.wfd.video-bitrate", val, NULL)
+                    && !strcasecmp("adaptive", val)) {
+                videoBitrate = converter->getVideoBitrate();
+
+                if (avgLatencyUs > 300000ll) {
+                    videoBitrate *= 0.6;
+                } else if (avgLatencyUs < 100000ll) {
+                    videoBitrate *= 1.1;
+                }
+            }
+
+            if (videoBitrate > 0) {
+                if (videoBitrate < 500000) {
+                    videoBitrate = 500000;
+                } else if (videoBitrate > 10000000) {
+                    videoBitrate = 10000000;
+                }
+
+                if (videoBitrate != converter->getVideoBitrate()) {
+                    ALOGI("setting video bitrate to %d bps", videoBitrate);
+
+                    converter->setVideoBitrate(videoBitrate);
+                }
+            }
+        }
+
+        sp<RepeaterSource> repeaterSource = videoTrack->repeaterSource();
+        if (repeaterSource != NULL) {
+            double rateHz =
+                Converter::GetInt32Property(
+                        "media.wfd.video-framerate", -1);
+
+            char val[PROPERTY_VALUE_MAX];
+            if (rateHz < 0.0
+                    && property_get("media.wfd.video-framerate", val, NULL)
+                    && !strcasecmp("adaptive", val)) {
+                 rateHz = repeaterSource->getFrameRate();
+
+                if (avgLatencyUs > 300000ll) {
+                    rateHz *= 0.9;
+                } else if (avgLatencyUs < 200000ll) {
+                    rateHz *= 1.1;
+                }
+            }
+
+            if (rateHz > 0) {
+                if (rateHz < 5.0) {
+                    rateHz = 5.0;
+                } else if (rateHz > 30.0) {
+                    rateHz = 30.0;
+                }
+
+                if (rateHz != repeaterSource->getFrameRate()) {
+                    ALOGI("setting frame rate to %.2f Hz", rateHz);
+
+                    repeaterSource->setFrameRate(rateHz);
+                }
+            }
+        }
+    }
+}
+
 status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
         bool enableAudio, bool enableVideo) {
     DataSource::RegisterDefaultSniffers();
@@ -785,7 +874,9 @@
         bool usePCMAudio,
         bool enableVideo,
         VideoFormats::ResolutionType videoResolutionType,
-        size_t videoResolutionIndex) {
+        size_t videoResolutionIndex,
+        VideoFormats::ProfileType videoProfileType,
+        VideoFormats::LevelType videoLevelType) {
     CHECK(enableAudio || enableVideo);
 
     if (!mMediaPath.empty()) {
@@ -794,7 +885,8 @@
 
     if (enableVideo) {
         status_t err = addVideoSource(
-                videoResolutionType, videoResolutionIndex);
+                videoResolutionType, videoResolutionIndex, videoProfileType,
+                videoLevelType);
 
         if (err != OK) {
             return err;
@@ -810,9 +902,13 @@
 
 status_t WifiDisplaySource::PlaybackSession::addSource(
         bool isVideo, const sp<MediaSource> &source, bool isRepeaterSource,
-        bool usePCMAudio, size_t *numInputBuffers) {
+        bool usePCMAudio, unsigned profileIdc, unsigned levelIdc,
+        unsigned constraintSet, size_t *numInputBuffers) {
     CHECK(!usePCMAudio || !isVideo);
     CHECK(!isRepeaterSource || isVideo);
+    CHECK(!profileIdc || isVideo);
+    CHECK(!levelIdc || isVideo);
+    CHECK(!constraintSet || isVideo);
 
     sp<ALooper> pullLooper = new ALooper;
     pullLooper->setName("pull_looper");
@@ -841,26 +937,36 @@
     CHECK_EQ(err, (status_t)OK);
 
     if (isVideo) {
+        format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
         format->setInt32("store-metadata-in-buffers", true);
-
+        format->setInt32("store-metadata-in-buffers-output", (mHDCP != NULL));
         format->setInt32(
                 "color-format", OMX_COLOR_FormatAndroidOpaque);
+        format->setInt32("profile-idc", profileIdc);
+        format->setInt32("level-idc", levelIdc);
+        format->setInt32("constraint-set", constraintSet);
+    } else {
+        format->setString(
+                "mime",
+                usePCMAudio
+                    ? MEDIA_MIMETYPE_AUDIO_RAW : MEDIA_MIMETYPE_AUDIO_AAC);
     }
 
     notify = new AMessage(kWhatConverterNotify, id());
     notify->setSize("trackIndex", trackIndex);
 
-    sp<Converter> converter =
-        new Converter(notify, codecLooper, format, usePCMAudio);
-
-    err = converter->initCheck();
-    if (err != OK) {
-        ALOGE("%s converter returned err %d", isVideo ? "video" : "audio", err);
-        return err;
-    }
+    sp<Converter> converter = new Converter(notify, codecLooper, format);
 
     looper()->registerHandler(converter);
 
+    err = converter->init();
+    if (err != OK) {
+        ALOGE("%s converter returned err %d", isVideo ? "video" : "audio", err);
+
+        looper()->unregisterHandler(converter->id());
+        return err;
+    }
+
     notify = new AMessage(Converter::kWhatMediaPullerNotify, converter->id());
     notify->setSize("trackIndex", trackIndex);
 
@@ -905,7 +1011,9 @@
 
 status_t WifiDisplaySource::PlaybackSession::addVideoSource(
         VideoFormats::ResolutionType videoResolutionType,
-        size_t videoResolutionIndex) {
+        size_t videoResolutionIndex,
+        VideoFormats::ProfileType videoProfileType,
+        VideoFormats::LevelType videoLevelType) {
     size_t width, height, framesPerSecond;
     bool interlaced;
     CHECK(VideoFormats::GetConfiguration(
@@ -916,6 +1024,14 @@
                 &framesPerSecond,
                 &interlaced));
 
+    unsigned profileIdc, levelIdc, constraintSet;
+    CHECK(VideoFormats::GetProfileLevel(
+                videoProfileType,
+                videoLevelType,
+                &profileIdc,
+                &levelIdc,
+                &constraintSet));
+
     sp<SurfaceMediaSource> source = new SurfaceMediaSource(width, height);
 
     source->setUseAbsoluteTimestamps();
@@ -926,7 +1042,8 @@
     size_t numInputBuffers;
     status_t err = addSource(
             true /* isVideo */, videoSource, true /* isRepeaterSource */,
-            false /* usePCMAudio */, &numInputBuffers);
+            false /* usePCMAudio */, profileIdc, levelIdc, constraintSet,
+            &numInputBuffers);
 
     if (err != OK) {
         return err;
@@ -949,7 +1066,8 @@
     if (audioSource->initCheck() == OK) {
         return addSource(
                 false /* isVideo */, audioSource, false /* isRepeaterSource */,
-                usePCMAudio, NULL /* numInputBuffers */);
+                usePCMAudio, 0 /* profileIdc */, 0 /* levelIdc */,
+                0 /* constraintSet */, NULL /* numInputBuffers */);
     }
 
     ALOGW("Unable to instantiate audio source");
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 39086a1..5c8ee94 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -53,7 +53,9 @@
             bool usePCMAudio,
             bool enableVideo,
             VideoFormats::ResolutionType videoResolutionType,
-            size_t videoResolutionIndex);
+            size_t videoResolutionIndex,
+            VideoFormats::ProfileType videoProfileType,
+            VideoFormats::LevelType videoLevelType);
 
     void destroyAsync();
 
@@ -130,18 +132,25 @@
             bool usePCMAudio,
             bool enableVideo,
             VideoFormats::ResolutionType videoResolutionType,
-            size_t videoResolutionIndex);
+            size_t videoResolutionIndex,
+            VideoFormats::ProfileType videoProfileType,
+            VideoFormats::LevelType videoLevelType);
 
     status_t addSource(
             bool isVideo,
             const sp<MediaSource> &source,
             bool isRepeaterSource,
             bool usePCMAudio,
+            unsigned profileIdc,
+            unsigned levelIdc,
+            unsigned contraintSet,
             size_t *numInputBuffers);
 
     status_t addVideoSource(
             VideoFormats::ResolutionType videoResolutionType,
-            size_t videoResolutionIndex);
+            size_t videoResolutionIndex,
+            VideoFormats::ProfileType videoProfileType,
+            VideoFormats::LevelType videoLevelType);
 
     status_t addAudioSource(bool usePCMAudio);
 
diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.cpp b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
index 2c4a373..c674700 100644
--- a/media/libstagefright/wifi-display/source/TSPacketizer.cpp
+++ b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
@@ -261,12 +261,24 @@
             data[0] = 40;  // descriptor_tag
             data[1] = 4;  // descriptor_length
 
-            CHECK_GE(mCSD.size(), 1u);
-            const sp<ABuffer> &sps = mCSD.itemAt(0);
-            CHECK(!memcmp("\x00\x00\x00\x01", sps->data(), 4));
-            CHECK_GE(sps->size(), 7u);
-            // profile_idc, constraint_set*, level_idc
-            memcpy(&data[2], sps->data() + 4, 3);
+            if (mCSD.size() > 0) {
+                CHECK_GE(mCSD.size(), 1u);
+                const sp<ABuffer> &sps = mCSD.itemAt(0);
+                CHECK(!memcmp("\x00\x00\x00\x01", sps->data(), 4));
+                CHECK_GE(sps->size(), 7u);
+                // profile_idc, constraint_set*, level_idc
+                memcpy(&data[2], sps->data() + 4, 3);
+            } else {
+                int32_t profileIdc, levelIdc, constraintSet;
+                CHECK(mFormat->findInt32("profile-idc", &profileIdc));
+                CHECK(mFormat->findInt32("level-idc", &levelIdc));
+                CHECK(mFormat->findInt32("constraint-set", &constraintSet));
+                CHECK_GE(profileIdc, 0u);
+                CHECK_GE(levelIdc, 0u);
+                data[2] = profileIdc;    // profile_idc
+                data[3] = constraintSet; // constraint_set*
+                data[4] = levelIdc;      // level_idc
+            }
 
             // AVC_still_present=0, AVC_24_hour_picture_flag=0, reserved
             data[5] = 0x3f;
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 22dd0b1..4b59e62 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -21,8 +21,8 @@
 #include "WifiDisplaySource.h"
 #include "PlaybackSession.h"
 #include "Parameters.h"
-#include "ParsedMessage.h"
 #include "rtp/RTPSender.h"
+#include "TimeSyncer.h"
 
 #include <binder/IServiceManager.h>
 #include <gui/IGraphicBufferProducer.h>
@@ -32,6 +32,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ParsedMessage.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
 
@@ -73,6 +74,12 @@
 
     mSupportedSourceVideoFormats.setNativeResolution(
             VideoFormats::RESOLUTION_CEA, 5);  // 1280x720 p30
+
+    // Enable all resolutions up to 1280x720p30
+    mSupportedSourceVideoFormats.enableResolutionUpto(
+            VideoFormats::RESOLUTION_CEA, 5,
+            VideoFormats::PROFILE_CHP,  // Constrained High Profile
+            VideoFormats::LEVEL_32);    // Level 3.2
 }
 
 WifiDisplaySource::~WifiDisplaySource() {
@@ -164,6 +171,14 @@
                 } else {
                     err = -EINVAL;
                 }
+            }
+
+            if (err == OK) {
+                sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+                mTimeSyncer = new TimeSyncer(mNetSession, notify);
+                looper()->registerHandler(mTimeSyncer);
+
+                mTimeSyncer->startServer(8123);
 
                 mState = AWAITING_CLIENT_CONNECTION;
             }
@@ -539,6 +554,11 @@
             break;
         }
 
+        case kWhatTimeSyncerNotify:
+        {
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -617,6 +637,9 @@
         chosenVideoFormat.disableAll();
         chosenVideoFormat.setNativeResolution(
                 mChosenVideoResolutionType, mChosenVideoResolutionIndex);
+        chosenVideoFormat.setProfileLevel(
+                mChosenVideoResolutionType, mChosenVideoResolutionIndex,
+                mChosenVideoProfile, mChosenVideoLevel);
 
         body.append(chosenVideoFormat.getFormatSpec(true /* forM4Message */));
         body.append("\r\n");
@@ -729,6 +752,8 @@
 
     ++mNextCSeq;
 
+    scheduleKeepAlive(sessionID);
+
     return OK;
 }
 
@@ -845,7 +870,9 @@
                     mSupportedSinkVideoFormats,
                     mSupportedSourceVideoFormats,
                     &mChosenVideoResolutionType,
-                    &mChosenVideoResolutionIndex)) {
+                    &mChosenVideoResolutionIndex,
+                    &mChosenVideoProfile,
+                    &mChosenVideoLevel)) {
             ALOGE("Sink and source share no commonly supported video "
                   "formats.");
 
@@ -864,6 +891,9 @@
 
         ALOGI("Picked video resolution %u x %u %c%u",
               width, height, interlaced ? 'i' : 'p', framesPerSecond);
+
+        ALOGI("Picked AVC profile %d, level %d",
+              mChosenVideoProfile, mChosenVideoLevel);
     } else {
         ALOGI("Sink doesn't support video at all.");
     }
@@ -994,8 +1024,6 @@
 
     if (mClientInfo.mPlaybackSession != NULL) {
         mClientInfo.mPlaybackSession->updateLiveness();
-
-        scheduleKeepAlive(sessionID);
     }
 
     return OK;
@@ -1257,7 +1285,9 @@
             mUsingPCMAudio,
             mSinkSupportsVideo,
             mChosenVideoResolutionType,
-            mChosenVideoResolutionIndex);
+            mChosenVideoResolutionIndex,
+            mChosenVideoProfile,
+            mChosenVideoLevel);
 
     if (err != OK) {
         looper()->unregisterHandler(playbackSession->id());
@@ -1340,7 +1370,9 @@
         return ERROR_MALFORMED;
     }
 
-    if (mState != AWAITING_CLIENT_PLAY) {
+    if (mState != AWAITING_CLIENT_PLAY
+     && mState != PAUSED_TO_PLAYING
+     && mState != PAUSED) {
         ALOGW("Received PLAY request but we're in state %d", mState);
 
         sendErrorResponse(
@@ -1367,7 +1399,7 @@
         return err;
     }
 
-    if (mState == PAUSED_TO_PLAYING) {
+    if (mState == PAUSED_TO_PLAYING || mPlaybackSessionEstablished) {
         mState = PLAYING;
         return OK;
     }
@@ -1401,7 +1433,7 @@
 
     ALOGI("Received PAUSE request.");
 
-    if (mState != PLAYING_TO_PAUSED) {
+    if (mState != PLAYING_TO_PAUSED && mState != PLAYING) {
         return INVALID_OPERATION;
     }
 
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index 44d3e4d..4f11712 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -18,10 +18,10 @@
 
 #define WIFI_DISPLAY_SOURCE_H_
 
-#include "ANetworkSession.h"
 #include "VideoFormats.h"
 
 #include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
 
 #include <netinet/in.h>
 
@@ -30,6 +30,7 @@
 struct IHDCP;
 struct IRemoteDisplayClient;
 struct ParsedMessage;
+struct TimeSyncer;
 
 // Represents the RTSP server acting as a wifi display source.
 // Manages incoming connections, sets up Playback sessions as necessary.
@@ -82,6 +83,7 @@
         kWhatHDCPNotify,
         kWhatFinishStop2,
         kWhatTeardownTriggerTimedOut,
+        kWhatTimeSyncerNotify,
     };
 
     struct ResponseID {
@@ -118,6 +120,7 @@
     sp<ANetworkSession> mNetSession;
     sp<IRemoteDisplayClient> mClient;
     AString mMediaPath;
+    sp<TimeSyncer> mTimeSyncer;
     struct in_addr mInterfaceAddr;
     int32_t mSessionID;
 
@@ -131,6 +134,8 @@
 
     VideoFormats::ResolutionType mChosenVideoResolutionType;
     size_t mChosenVideoResolutionIndex;
+    VideoFormats::ProfileType mChosenVideoProfile;
+    VideoFormats::LevelType mChosenVideoLevel;
 
     bool mSinkSupportsAudio;
 
diff --git a/media/libstagefright/wifi-display/udptest.cpp b/media/libstagefright/wifi-display/udptest.cpp
new file mode 100644
index 0000000..61eb9f9
--- /dev/null
+++ b/media/libstagefright/wifi-display/udptest.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "udptest"
+#include <utils/Log.h>
+
+#include "TimeSyncer.h"
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ANetworkSession.h>
+
+namespace android {
+
+}  // namespace android
+
+static void usage(const char *me) {
+    fprintf(stderr,
+            "usage: %s -c host[:port]\tconnect to test server\n"
+            "           -l            \tcreate a test server\n",
+            me);
+}
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    ProcessState::self()->startThreadPool();
+
+    int32_t localPort = -1;
+    int32_t connectToPort = -1;
+    AString connectToHost;
+
+    int res;
+    while ((res = getopt(argc, argv, "hc:l:")) >= 0) {
+        switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    connectToHost = optarg;
+                    connectToPort = 49152;
+                } else {
+                    connectToHost.setTo(optarg, colonPos - optarg);
+
+                    char *end;
+                    connectToPort = strtol(colonPos + 1, &end, 10);
+
+                    if (*end != '\0' || end == colonPos + 1
+                            || connectToPort < 1 || connectToPort > 65535) {
+                        fprintf(stderr, "Illegal port specified.\n");
+                        exit(1);
+                    }
+                }
+                break;
+            }
+
+            case 'l':
+            {
+                char *end;
+                localPort = strtol(optarg, &end, 10);
+
+                if (*end != '\0' || end == optarg
+                        || localPort < 1 || localPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case '?':
+            case 'h':
+                usage(argv[0]);
+                exit(1);
+        }
+    }
+
+    if (localPort < 0 && connectToPort < 0) {
+        fprintf(stderr,
+                "You need to select either client or server mode.\n");
+        exit(1);
+    }
+
+    sp<ANetworkSession> netSession = new ANetworkSession;
+    netSession->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<TimeSyncer> handler = new TimeSyncer(netSession, NULL /* notify */);
+    looper->registerHandler(handler);
+
+    if (localPort >= 0) {
+        handler->startServer(localPort);
+    } else {
+        handler->startClient(connectToHost.c_str(), connectToPort);
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    return 0;
+}
+
diff --git a/media/libstagefright/wifi-display/wfd.cpp b/media/libstagefright/wifi-display/wfd.cpp
index c947765..04cb319 100644
--- a/media/libstagefright/wifi-display/wfd.cpp
+++ b/media/libstagefright/wifi-display/wfd.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "wfd"
 #include <utils/Log.h>
 
+#include "sink/WifiDisplaySink.h"
 #include "source/WifiDisplaySource.h"
 
 #include <binder/ProcessState.h>
@@ -38,8 +39,12 @@
 static void usage(const char *me) {
     fprintf(stderr,
             "usage:\n"
-            "           %s -l iface[:port]\tcreate a wifi display source\n"
-            "               -f(ilename)  \tstream media\n",
+            "           %s -c host[:port]\tconnect to wifi source\n"
+            "               -u uri        \tconnect to an rtsp uri\n"
+            "               -l ip[:port] \tlisten on the specified port "
+            "               -f(ilename)  \tstream media "
+            "(create a sink)\n"
+            "               -s(pecial)   \trun in 'special' mode\n",
             me);
 }
 
@@ -133,28 +138,6 @@
     }
 }
 
-static status_t enableAudioSubmix(bool enable) {
-    status_t err = AudioSystem::setDeviceConnectionState(
-            AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-            enable
-                ? AUDIO_POLICY_DEVICE_STATE_AVAILABLE
-                : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-            NULL /* device_address */);
-
-    if (err != OK) {
-        return err;
-    }
-
-    err = AudioSystem::setDeviceConnectionState(
-            AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-            enable
-                ? AUDIO_POLICY_DEVICE_STATE_AVAILABLE
-                : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-            NULL /* device_address */);
-
-    return err;
-}
-
 static void createSource(const AString &addr, int32_t port) {
     sp<IServiceManager> sm = defaultServiceManager();
     sp<IBinder> binder = sm->getService(String16("media.player"));
@@ -163,21 +146,18 @@
 
     CHECK(service.get() != NULL);
 
-    enableAudioSubmix(true /* enable */);
-
     String8 iface;
     iface.append(addr.c_str());
     iface.append(StringPrintf(":%d", port).c_str());
 
     sp<RemoteDisplayClient> client = new RemoteDisplayClient;
-    sp<IRemoteDisplay> display = service->listenForRemoteDisplay(client, iface);
+    sp<IRemoteDisplay> display =
+        service->listenForRemoteDisplay(client, iface);
 
     client->waitUntilDone();
 
     display->dispose();
     display.clear();
-
-    enableAudioSubmix(false /* enable */);
 }
 
 static void createFileSource(
@@ -209,14 +189,48 @@
 
     DataSource::RegisterDefaultSniffers();
 
+    AString connectToHost;
+    int32_t connectToPort = -1;
+    AString uri;
+
     AString listenOnAddr;
     int32_t listenOnPort = -1;
 
     AString path;
 
+    bool specialMode = false;
+
     int res;
-    while ((res = getopt(argc, argv, "hl:f:")) >= 0) {
+    while ((res = getopt(argc, argv, "hc:l:u:f:s")) >= 0) {
         switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    connectToHost = optarg;
+                    connectToPort = WifiDisplaySource::kWifiDisplayDefaultPort;
+                } else {
+                    connectToHost.setTo(optarg, colonPos - optarg);
+
+                    char *end;
+                    connectToPort = strtol(colonPos + 1, &end, 10);
+
+                    if (*end != '\0' || end == colonPos + 1
+                            || connectToPort < 1 || connectToPort > 65535) {
+                        fprintf(stderr, "Illegal port specified.\n");
+                        exit(1);
+                    }
+                }
+                break;
+            }
+
+            case 'u':
+            {
+                uri = optarg;
+                break;
+            }
+
             case 'f':
             {
                 path = optarg;
@@ -245,6 +259,12 @@
                 break;
             }
 
+            case 's':
+            {
+                specialMode = true;
+                break;
+            }
+
             case '?':
             case 'h':
             default:
@@ -253,6 +273,13 @@
         }
     }
 
+    if (connectToPort >= 0 && listenOnPort >= 0) {
+        fprintf(stderr,
+                "You can connect to a source or create one, "
+                "but not both at the same time.\n");
+        exit(1);
+    }
+
     if (listenOnPort >= 0) {
         if (path.empty()) {
             createSource(listenOnAddr, listenOnPort);
@@ -263,7 +290,72 @@
         exit(0);
     }
 
-    usage(argv[0]);
+    if (connectToPort < 0 && uri.empty()) {
+        fprintf(stderr,
+                "You need to select either source host or uri.\n");
+
+        exit(1);
+    }
+
+    if (connectToPort >= 0 && !uri.empty()) {
+        fprintf(stderr,
+                "You need to either connect to a wfd host or an rtsp url, "
+                "not both.\n");
+        exit(1);
+    }
+
+    sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+    CHECK_EQ(composerClient->initCheck(), (status_t)OK);
+
+    sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
+            ISurfaceComposer::eDisplayIdMain));
+    DisplayInfo info;
+    SurfaceComposerClient::getDisplayInfo(display, &info);
+    ssize_t displayWidth = info.w;
+    ssize_t displayHeight = info.h;
+
+    ALOGV("display is %d x %d\n", displayWidth, displayHeight);
+
+    sp<SurfaceControl> control =
+        composerClient->createSurface(
+                String8("A Surface"),
+                displayWidth,
+                displayHeight,
+                PIXEL_FORMAT_RGB_565,
+                0);
+
+    CHECK(control != NULL);
+    CHECK(control->isValid());
+
+    SurfaceComposerClient::openGlobalTransaction();
+    CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
+    CHECK_EQ(control->show(), (status_t)OK);
+    SurfaceComposerClient::closeGlobalTransaction();
+
+    sp<Surface> surface = control->getSurface();
+    CHECK(surface != NULL);
+
+    sp<ANetworkSession> session = new ANetworkSession;
+    session->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<WifiDisplaySink> sink = new WifiDisplaySink(
+            specialMode ? WifiDisplaySink::FLAG_SPECIAL_MODE : 0 /* flags */,
+            session,
+            surface->getIGraphicBufferProducer());
+
+    looper->registerHandler(sink);
+
+    if (connectToPort >= 0) {
+        sink->start(connectToHost.c_str(), connectToPort);
+    } else {
+        sink->start(uri.c_str());
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    composerClient->dispose();
 
     return 0;
 }
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 061a079..54377f1 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -27,9 +27,6 @@
 
 LOCAL_SRC_FILES += StateQueue.cpp
 
-# uncomment for debugging timing problems related to StateQueue::push()
-LOCAL_CFLAGS += -DSTATE_QUEUE_DUMP
-
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
@@ -56,24 +53,10 @@
 
 LOCAL_MODULE:= libaudioflinger
 
-LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp
-
-LOCAL_CFLAGS += -DFAST_MIXER_STATISTICS
-
-# uncomment to display CPU load adjusted for CPU frequency
-# LOCAL_CFLAGS += -DCPU_FREQUENCY_STATISTICS
+LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp
 
 LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"'
 
-LOCAL_CFLAGS += -UFAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
-
-# uncomment to allow tee sink debugging to be enabled by property
-# LOCAL_CFLAGS += -DTEE_SINK
-
-# uncomment to enable the audio watchdog
-# LOCAL_SRC_FILES += AudioWatchdog.cpp
-# LOCAL_CFLAGS += -DAUDIO_WATCHDOG
-
 # Define ANDROID_SMP appropriately. Used to get inline tracing fast-path.
 ifeq ($(TARGET_CPU_SMP),true)
     LOCAL_CFLAGS += -DANDROID_SMP=1
@@ -81,6 +64,8 @@
     LOCAL_CFLAGS += -DANDROID_SMP=0
 endif
 
+LOCAL_CFLAGS += -fvisibility=hidden
+
 include $(BUILD_SHARED_LIBRARY)
 
 #
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 87eb6aa..3d65c44 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -19,6 +19,7 @@
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <dirent.h>
 #include <math.h>
 #include <signal.h>
@@ -36,10 +37,6 @@
 
 #include <cutils/bitops.h>
 #include <cutils/properties.h>
-#include <cutils/compiler.h>
-
-//#include <private/media/AudioTrackShared.h>
-//#include <private/media/AudioEffectShared.h>
 
 #include <system/audio.h>
 #include <hardware/audio.h>
@@ -58,12 +55,13 @@
 #include <powermanager/PowerManager.h>
 
 #include <common_time/cc_helper.h>
-//#include <common_time/local_clock.h>
 
 #include <media/IMediaLogService.h>
 
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
+#include <media/AudioParameter.h>
+#include <private/android_filesystem_config.h>
 
 // ----------------------------------------------------------------------------
 
@@ -141,7 +139,9 @@
       mMasterMute(false),
       mNextUniqueId(1),
       mMode(AUDIO_MODE_INVALID),
-      mBtNrecIsOff(false)
+      mBtNrecIsOff(false),
+      mIsLowRamDevice(true),
+      mIsDeviceTypeKnown(false)
 {
     getpid_cached = getpid();
     char value[PROPERTY_VALUE_MAX];
@@ -436,6 +436,7 @@
         audio_io_handle_t output,
         pid_t tid,
         int *sessionId,
+        String8& name,
         status_t *status)
 {
     sp<PlaybackThread::Track> track;
@@ -524,6 +525,9 @@
         }
     }
     if (lStatus == NO_ERROR) {
+        // s for server's pid, n for normal mixer name, f for fast index
+        name = String8::format("s:%d;n:%d;f:%d", getpid_cached, track->name() - AudioMixer::TRACK0,
+                track->fastIndex());
         trackHandle = new TrackHandle(track);
     } else {
         // remove local strong reference to Client before deleting the Track so that the Client
@@ -981,11 +985,12 @@
 
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;
-    struct audio_config config = {
-        sample_rate: sampleRate,
-        channel_mask: channelMask,
-        format: format,
-    };
+    struct audio_config config;
+    memset(&config, 0, sizeof(config));
+    config.sample_rate = sampleRate;
+    config.channel_mask = channelMask;
+    config.format = format;
+
     audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
     size_t size = dev->get_input_buffer_size(dev, &config);
     mHardwareStatus = AUDIO_HW_IDLE;
@@ -1207,7 +1212,7 @@
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t frameCount,
-        IAudioFlinger::track_flags_t flags,
+        IAudioFlinger::track_flags_t *flags,
         pid_t tid,
         int *sessionId,
         status_t *status)
@@ -1226,6 +1231,12 @@
         goto Exit;
     }
 
+    if (format != AUDIO_FORMAT_PCM_16_BIT) {
+        ALOGE("openRecord() invalid format %d", format);
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
     // add client to list
     { // scope for mLock
         Mutex::Autolock _l(mLock);
@@ -1382,31 +1393,53 @@
 
 // ----------------------------------------------------------------------------
 
+status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice)
+{
+    uid_t uid = IPCThreadState::self()->getCallingUid();
+    if (uid != AID_SYSTEM) {
+        return PERMISSION_DENIED;
+    }
+    Mutex::Autolock _l(mLock);
+    if (mIsDeviceTypeKnown) {
+        return INVALID_OPERATION;
+    }
+    mIsLowRamDevice = isLowRamDevice;
+    mIsDeviceTypeKnown = true;
+    return NO_ERROR;
+}
+
+// ----------------------------------------------------------------------------
+
 audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
                                            audio_devices_t *pDevices,
                                            uint32_t *pSamplingRate,
                                            audio_format_t *pFormat,
                                            audio_channel_mask_t *pChannelMask,
                                            uint32_t *pLatencyMs,
-                                           audio_output_flags_t flags)
+                                           audio_output_flags_t flags,
+                                           const audio_offload_info_t *offloadInfo)
 {
-    status_t status;
     PlaybackThread *thread = NULL;
-    struct audio_config config = {
-        sample_rate: pSamplingRate ? *pSamplingRate : 0,
-        channel_mask: pChannelMask ? *pChannelMask : 0,
-        format: pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT,
-    };
+    struct audio_config config;
+    config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
+    config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
+    config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
+    if (offloadInfo) {
+        config.offload_info = *offloadInfo;
+    }
+
     audio_stream_out_t *outStream = NULL;
     AudioHwDevice *outHwDev;
 
-    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
+    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (pDevices != NULL) ? *pDevices : 0,
               config.sample_rate,
               config.format,
               config.channel_mask,
               flags);
+    ALOGV("openOutput(), offloadInfo %p version 0x%04x",
+          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version );
 
     if (pDevices == NULL || *pDevices == 0) {
         return 0;
@@ -1423,7 +1456,7 @@
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
-    status = hwDevHal->open_output_stream(hwDevHal,
+    status_t status = hwDevHal->open_output_stream(hwDevHal,
                                           id,
                                           *pDevices,
                                           (audio_output_flags_t)flags,
@@ -1431,7 +1464,7 @@
                                           &outStream);
 
     mHardwareStatus = AUDIO_HW_IDLE;
-    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, "
+    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %#08x, "
             "Channels %x, status %d",
             outStream,
             config.sample_rate,
@@ -1440,9 +1473,12 @@
             status);
 
     if (status == NO_ERROR && outStream != NULL) {
-        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
+        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
 
-        if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
+        if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+            thread = new OffloadThread(this, output, id, *pDevices);
+            ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
+        } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
             (config.format != AUDIO_FORMAT_PCM_16_BIT) ||
             (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
             thread = new DirectOutputThread(this, output, id, *pDevices);
@@ -1453,10 +1489,18 @@
         }
         mPlaybackThreads.add(id, thread);
 
-        if (pSamplingRate != NULL) *pSamplingRate = config.sample_rate;
-        if (pFormat != NULL) *pFormat = config.format;
-        if (pChannelMask != NULL) *pChannelMask = config.channel_mask;
-        if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = config.sample_rate;
+        }
+        if (pFormat != NULL) {
+            *pFormat = config.format;
+        }
+        if (pChannelMask != NULL) {
+            *pChannelMask = config.channel_mask;
+        }
+        if (pLatencyMs != NULL) {
+            *pLatencyMs = thread->latency();
+        }
 
         // notify client processes of the new output creation
         thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
@@ -1524,11 +1568,28 @@
                     DuplicatingThread *dupThread =
                             (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
                     dupThread->removeOutputTrack((MixerThread *)thread.get());
+
+                }
+            }
+        }
+
+
+        mPlaybackThreads.removeItem(output);
+        // save all effects to the default thread
+        if (mPlaybackThreads.size()) {
+            PlaybackThread *dstThread = checkPlaybackThread_l(mPlaybackThreads.keyAt(0));
+            if (dstThread != NULL) {
+                // audioflinger lock is held here so the acquisition order of thread locks does not
+                // matter
+                Mutex::Autolock _dl(dstThread->mLock);
+                Mutex::Autolock _sl(thread->mLock);
+                Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l();
+                for (size_t i = 0; i < effectChains.size(); i ++) {
+                    moveEffectChain_l(effectChains[i]->sessionId(), thread.get(), dstThread, true);
                 }
             }
         }
         audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
-        mPlaybackThreads.removeItem(output);
     }
     thread->exit();
     // The thread entity (active unit of execution) is no longer running here,
@@ -1583,11 +1644,11 @@
 {
     status_t status;
     RecordThread *thread = NULL;
-    struct audio_config config = {
-        sample_rate: pSamplingRate ? *pSamplingRate : 0,
-        channel_mask: pChannelMask ? *pChannelMask : 0,
-        format: pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT,
-    };
+    struct audio_config config;
+    config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0;
+    config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0;
+    config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT;
+
     uint32_t reqSamplingRate = config.sample_rate;
     audio_format_t reqFormat = config.format;
     audio_channel_mask_t reqChannels = config.channel_mask;
@@ -1683,7 +1744,7 @@
         AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
 
         // Start record thread
-        // RecorThread require both input and output device indication to forward to audio
+        // RecordThread requires both input and output device indication to forward to audio
         // pre processing modules
         thread = new RecordThread(this,
                                   input,
@@ -1698,9 +1759,15 @@
                                   );
         mRecordThreads.add(id, thread);
         ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
-        if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
-        if (pFormat != NULL) *pFormat = config.format;
-        if (pChannelMask != NULL) *pChannelMask = reqChannels;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = reqSamplingRate;
+        }
+        if (pFormat != NULL) {
+            *pFormat = config.format;
+        }
+        if (pChannelMask != NULL) {
+            *pChannelMask = reqChannels;
+        }
 
         // notify client processes of the new input creation
         thread->audioConfigChanged_l(AudioSystem::INPUT_OPENED);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index b0efef6..e5e4113 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -24,6 +24,8 @@
 
 #include <common_time/cc_helper.h>
 
+#include <cutils/compiler.h>
+
 #include <media/IAudioFlinger.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioTrack.h>
@@ -54,6 +56,7 @@
 #include <powermanager/IPowerManager.h>
 
 #include <media/nbaio/NBLog.h>
+#include <private/media/AudioTrackShared.h>
 
 namespace android {
 
@@ -89,7 +92,7 @@
 {
     friend class BinderService<AudioFlinger>;   // for AudioFlinger()
 public:
-    static const char* getServiceName() { return "media.audio_flinger"; }
+    static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
 
     virtual     status_t    dump(int fd, const Vector<String16>& args);
 
@@ -105,6 +108,7 @@
                                 audio_io_handle_t output,
                                 pid_t tid,
                                 int *sessionId,
+                                String8& name,
                                 status_t *status);
 
     virtual sp<IAudioRecord> openRecord(
@@ -113,7 +117,7 @@
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
-                                IAudioFlinger::track_flags_t flags,
+                                IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
                                 int *sessionId,
                                 status_t *status);
@@ -157,7 +161,8 @@
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask,
                                          uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags);
+                                         audio_output_flags_t flags,
+                                         const audio_offload_info_t *offloadInfo);
 
     virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
                                                   audio_io_handle_t output2);
@@ -216,6 +221,8 @@
     virtual uint32_t getPrimaryOutputSamplingRate();
     virtual size_t getPrimaryOutputFrameCount();
 
+    virtual status_t setLowRamDevice(bool isLowRamDevice);
+
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,
@@ -278,7 +285,7 @@
 
                 bool        btNrecIsOff() const { return mBtNrecIsOff; }
 
-                            AudioFlinger();
+                            AudioFlinger() ANDROID_API;
     virtual                 ~AudioFlinger();
 
     // call in any IAudioFlinger method that accesses mPrimaryHardwareDev
@@ -359,7 +366,9 @@
     class PlaybackThread;
     class MixerThread;
     class DirectOutputThread;
+    class OffloadThread;
     class DuplicatingThread;
+    class AsyncCallbackThread;
     class Track;
     class RecordTrack;
     class EffectModule;
@@ -401,8 +410,11 @@
                                              int64_t pts);
         virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
                                                   int target);
+        virtual status_t    setParameters(const String8& keyValuePairs);
+
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
+
     private:
         const sp<PlaybackThread::Track> mTrack;
     };
@@ -424,6 +436,7 @@
         void                stop_nonvirtual();
     };
 
+
               PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
               MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
               RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
@@ -490,11 +503,12 @@
     struct AudioStreamOut {
         AudioHwDevice* const audioHwDev;
         audio_stream_out_t* const stream;
+        audio_output_flags_t flags;
 
         audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
 
-        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out) :
-            audioHwDev(dev), stream(out) {}
+        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out, audio_output_flags_t flags) :
+            audioHwDev(dev), stream(out), flags(flags) {}
     };
 
     struct AudioStreamIn {
@@ -588,12 +602,11 @@
     status_t    closeOutput_nonvirtual(audio_io_handle_t output);
     status_t    closeInput_nonvirtual(audio_io_handle_t input);
 
-// do not use #ifdef here, since AudioFlinger.h is included by more than one module
-//#ifdef TEE_SINK
+#ifdef TEE_SINK
     // all record threads serially share a common tee sink, which is re-created on format change
     sp<NBAIO_Sink>   mRecordTeeSink;
     sp<NBAIO_Source> mRecordTeeSource;
-//#endif
+#endif
 
 public:
 
@@ -618,6 +631,15 @@
     static const size_t kTeeSinkTrackFramesDefault = 0x1000;
 #endif
 
+    // This method reads from a variable without mLock, but the variable is updated under mLock.  So
+    // we might read a stale value, or a value that's inconsistent with respect to other variables.
+    // In this case, it's safe because the return value isn't used for making an important decision.
+    // The reason we don't want to take mLock is because it could block the caller for a long time.
+    bool    isLowRamDevice() const { return mIsLowRamDevice; }
+
+private:
+    bool    mIsLowRamDevice;
+    bool    mIsDeviceTypeKnown;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 7d38f80..df4e029 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "AudioMixer"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 2706880..900b411 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "AudioPolicyService"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #undef __STRICT_ANSI__
 #define __STDINT_LIMITS
 #define __STDC_LIMIT_MACROS
@@ -40,6 +41,7 @@
 #include <system/audio_policy.h>
 #include <hardware/audio_policy.h>
 #include <audio_effects/audio_effects_conf.h>
+#include <media/AudioParameter.h>
 
 namespace android {
 
@@ -49,7 +51,7 @@
 static const int kDumpLockRetries = 50;
 static const int kDumpLockSleepUs = 20000;
 
-static const nsecs_t kAudioCommandTimeout = 3000000000; // 3 seconds
+static const nsecs_t kAudioCommandTimeout = 3000000000LL; // 3 seconds
 
 namespace {
     extern struct audio_policy_service_ops aps_ops;
@@ -68,10 +70,11 @@
     Mutex::Autolock _l(mLock);
 
     // start tone playback thread
-    mTonePlaybackThread = new AudioCommandThread(String8(""));
+    mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
     // start audio commands thread
-    mAudioCommandThread = new AudioCommandThread(String8("ApmCommand"));
-
+    mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
+    // start output activity command thread
+    mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
     if (rc)
@@ -222,15 +225,16 @@
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags)
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
 {
     if (mpAudioPolicy == NULL) {
         return 0;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, format, channelMask,
-                                        flags);
+    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
+                                    format, channelMask, flags, offloadInfo);
 }
 
 status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -253,6 +257,15 @@
         return NO_INIT;
     }
     ALOGV("stopOutput()");
+    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    return NO_ERROR;
+}
+
+status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
+                                      audio_stream_type_t stream,
+                                      int session)
+{
+    ALOGV("doStopOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
 }
@@ -263,6 +276,12 @@
         return;
     }
     ALOGV("releaseOutput()");
+    mOutputCommandThread->releaseOutputCommand(output);
+}
+
+void AudioPolicyService::doReleaseOutput(audio_io_handle_t output)
+{
+    ALOGV("doReleaseOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
     mpAudioPolicy->release_output(mpAudioPolicy, output);
 }
@@ -638,8 +657,9 @@
 
 // -----------  AudioPolicyService::AudioCommandThread implementation ----------
 
-AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name)
-    : Thread(false), mName(name)
+AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name,
+                                                           const wp<AudioPolicyService>& service)
+    : Thread(false), mName(name), mService(service)
 {
     mpToneGenerator = NULL;
 }
@@ -647,7 +667,7 @@
 
 AudioPolicyService::AudioCommandThread::~AudioCommandThread()
 {
-    if (mName != "" && !mAudioCommands.isEmpty()) {
+    if (!mAudioCommands.isEmpty()) {
         release_wake_lock(mName.string());
     }
     mAudioCommands.clear();
@@ -656,11 +676,7 @@
 
 void AudioPolicyService::AudioCommandThread::onFirstRef()
 {
-    if (mName != "") {
-        run(mName.string(), ANDROID_PRIORITY_AUDIO);
-    } else {
-        run("AudioCommand", ANDROID_PRIORITY_AUDIO);
-    }
+    run(mName.string(), ANDROID_PRIORITY_AUDIO);
 }
 
 bool AudioPolicyService::AudioCommandThread::threadLoop()
@@ -735,6 +751,32 @@
                     }
                     delete data;
                     }break;
+                case STOP_OUTPUT: {
+                    StopOutputData *data = (StopOutputData *)command->mParam;
+                    ALOGV("AudioCommandThread() processing stop output %d",
+                            data->mIO);
+                    sp<AudioPolicyService> svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doStopOutput(data->mIO, data->mStream, data->mSession);
+                    mLock.lock();
+                    delete data;
+                    }break;
+                case RELEASE_OUTPUT: {
+                    ReleaseOutputData *data = (ReleaseOutputData *)command->mParam;
+                    ALOGV("AudioCommandThread() processing release output %d",
+                            data->mIO);
+                    sp<AudioPolicyService> svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doReleaseOutput(data->mIO);
+                    mLock.lock();
+                    delete data;
+                    }break;
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
@@ -746,7 +788,7 @@
             }
         }
         // release delayed commands wake lock
-        if (mName != "" && mAudioCommands.isEmpty()) {
+        if (mAudioCommands.isEmpty()) {
             release_wake_lock(mName.string());
         }
         ALOGV("AudioCommandThread() going to sleep");
@@ -890,17 +932,46 @@
     return status;
 }
 
+void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
+                                                               audio_stream_type_t stream,
+                                                               int session)
+{
+    AudioCommand *command = new AudioCommand();
+    command->mCommand = STOP_OUTPUT;
+    StopOutputData *data = new StopOutputData();
+    data->mIO = output;
+    data->mStream = stream;
+    data->mSession = session;
+    command->mParam = (void *)data;
+    Mutex::Autolock _l(mLock);
+    insertCommand_l(command);
+    ALOGV("AudioCommandThread() adding stop output %d", output);
+    mWaitWorkCV.signal();
+}
+
+void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output)
+{
+    AudioCommand *command = new AudioCommand();
+    command->mCommand = RELEASE_OUTPUT;
+    ReleaseOutputData *data = new ReleaseOutputData();
+    data->mIO = output;
+    command->mParam = (void *)data;
+    Mutex::Autolock _l(mLock);
+    insertCommand_l(command);
+    ALOGV("AudioCommandThread() adding release output %d", output);
+    mWaitWorkCV.signal();
+}
+
 // insertCommand_l() must be called with mLock held
 void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
 {
     ssize_t i;  // not size_t because i will count down to -1
     Vector <AudioCommand *> removedCommands;
-
     nsecs_t time = 0;
     command->mTime = systemTime() + milliseconds(delayMs);
 
     // acquire wake lock to make sure delayed commands are processed
-    if (mName != "" && mAudioCommands.isEmpty()) {
+    if (mAudioCommands.isEmpty()) {
         acquire_wake_lock(PARTIAL_WAKE_LOCK, mName.string());
     }
 
@@ -1055,6 +1126,21 @@
     return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs);
 }
 
+bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
+{
+    if (mpAudioPolicy == NULL) {
+        ALOGV("mpAudioPolicy == NULL");
+        return false;
+    }
+
+    if (mpAudioPolicy->is_offload_supported == NULL) {
+        ALOGV("HAL does not implement is_offload_supported");
+        return false;
+    }
+
+    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
+}
+
 // ----------------------------------------------------------------------------
 // Audio pre-processing configuration
 // ----------------------------------------------------------------------------
@@ -1387,7 +1473,8 @@
                                                    audio_format_t *pFormat,
                                                    audio_channel_mask_t *pChannelMask,
                                                    uint32_t *pLatencyMs,
-                                                   audio_output_flags_t flags)
+                                                   audio_output_flags_t flags,
+                                                   const audio_offload_info_t *offloadInfo)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -1395,7 +1482,7 @@
         return 0;
     }
     return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags);
+                          pLatencyMs, flags, offloadInfo);
 }
 
 static audio_io_handle_t aps_open_dup_output(void *service,
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 35cf368..ae053a9 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -19,6 +19,7 @@
 
 #include <cutils/misc.h>
 #include <cutils/config_utils.h>
+#include <cutils/compiler.h>
 #include <utils/String8.h>
 #include <utils/Vector.h>
 #include <utils/SortedVector.h>
@@ -44,7 +45,7 @@
 
 public:
     // for BinderService
-    static const char *getServiceName() { return "media.audio_policy"; }
+    static const char *getServiceName() ANDROID_API { return "media.audio_policy"; }
 
     virtual status_t    dump(int fd, const Vector<String16>& args);
 
@@ -66,7 +67,8 @@
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = 0,
                                         audio_output_flags_t flags =
-                                                AUDIO_OUTPUT_FLAG_NONE);
+                                                AUDIO_OUTPUT_FLAG_NONE,
+                                        const audio_offload_info_t *offloadInfo = NULL);
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0);
@@ -135,9 +137,15 @@
     virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
     virtual status_t stopTone();
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
+    virtual bool isOffloadSupported(const audio_offload_info_t &config);
+
+            status_t doStopOutput(audio_io_handle_t output,
+                                  audio_stream_type_t stream,
+                                  int session = 0);
+            void doReleaseOutput(audio_io_handle_t output);
 
 private:
-                        AudioPolicyService();
+                        AudioPolicyService() ANDROID_API;
     virtual             ~AudioPolicyService();
 
             status_t dumpInternals(int fd);
@@ -158,10 +166,12 @@
             STOP_TONE,
             SET_VOLUME,
             SET_PARAMETERS,
-            SET_VOICE_VOLUME
+            SET_VOICE_VOLUME,
+            STOP_OUTPUT,
+            RELEASE_OUTPUT
         };
 
-        AudioCommandThread (String8 name);
+        AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
         virtual             ~AudioCommandThread();
 
                     status_t    dump(int fd);
@@ -179,6 +189,11 @@
                     status_t    parametersCommand(audio_io_handle_t ioHandle,
                                             const char *keyValuePairs, int delayMs = 0);
                     status_t    voiceVolumeCommand(float volume, int delayMs = 0);
+                    void        stopOutputCommand(audio_io_handle_t output,
+                                                  audio_stream_type_t stream,
+                                                  int session);
+                    void        releaseOutputCommand(audio_io_handle_t output);
+
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
 
     private:
@@ -223,12 +238,25 @@
             float mVolume;
         };
 
+        class StopOutputData {
+        public:
+            audio_io_handle_t mIO;
+            audio_stream_type_t mStream;
+            int mSession;
+        };
+
+        class ReleaseOutputData {
+        public:
+            audio_io_handle_t mIO;
+        };
+
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector <AudioCommand *> mAudioCommands; // list of pending commands
         ToneGenerator *mpToneGenerator;     // the tone generator
         AudioCommand mLastCommand;          // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
+        wp<AudioPolicyService> mService;
     };
 
     class EffectDesc {
@@ -313,6 +341,7 @@
                             // device connection state  or routing
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
     sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
+    sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
     KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index 2b8694f..33e64ce 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -19,13 +19,14 @@
 
 #include <stdint.h>
 #include <sys/types.h>
+#include <cutils/compiler.h>
 
 #include <media/AudioBufferProvider.h>
 
 namespace android {
 // ----------------------------------------------------------------------------
 
-class AudioResampler {
+class ANDROID_API AudioResampler {
 public:
     // Determines quality of SRC.
     //  LOW_QUALITY: linear interpolator (1st order)
@@ -55,6 +56,14 @@
     // set the PTS of the next buffer output by the resampler
     virtual void setPTS(int64_t pts);
 
+    // Resample int16_t samples from provider and accumulate into 'out'.
+    // A mono provider delivers a sequence of samples.
+    // A stereo provider delivers a sequence of interleaved pairs of samples.
+    // Multi-channel providers are not supported.
+    // In either case, 'out' holds interleaved pairs of fixed-point signed Q19.12.
+    // That is, for a mono provider, there is an implicit up-channeling.
+    // Since this method accumulates, the caller is responsible for clearing 'out' initially.
+    // FIXME assumes provider is always successful; it should return the actual frame count.
     virtual void resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider) = 0;
 
diff --git a/services/audioflinger/AudioWatchdog.cpp b/services/audioflinger/AudioWatchdog.cpp
index 8f328ee..93d185e 100644
--- a/services/audioflinger/AudioWatchdog.cpp
+++ b/services/audioflinger/AudioWatchdog.cpp
@@ -17,9 +17,12 @@
 #define LOG_TAG "AudioWatchdog"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <utils/Log.h>
 #include "AudioWatchdog.h"
 
+#ifdef AUDIO_WATCHDOG
+
 namespace android {
 
 void AudioWatchdogDump::dump(int fd)
@@ -132,3 +135,5 @@
 }
 
 }   // namespace android
+
+#endif // AUDIO_WATCHDOG
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
new file mode 100644
index 0000000..bc2038a
--- /dev/null
+++ b/services/audioflinger/Configuration.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Put build-time configuration options here rather than Android.mk,
+// so that the instantiate for AudioFlinger service will pick up the same options.
+
+#ifndef ANDROID_AUDIOFLINGER_CONFIGURATION_H
+#define ANDROID_AUDIOFLINGER_CONFIGURATION_H
+
+// uncomment to enable detailed battery usage reporting (not debugged)
+//#define ADD_BATTERY_DATA
+
+// uncomment to enable the audio watchdog
+//#define AUDIO_WATCHDOG
+
+// uncomment to display CPU load adjusted for CPU frequency
+//#define CPU_FREQUENCY_STATISTICS
+
+// uncomment to enable fast mixer to take performance samples for later statistical analysis
+#define FAST_MIXER_STATISTICS
+
+// uncomment to allow fast tracks at non-native sample rate
+//#define FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
+
+// uncomment for debugging timing problems related to StateQueue::push()
+//#define STATE_QUEUE_DUMP
+
+// uncomment to allow tee sink debugging to be enabled by property
+//#define TEE_SINK
+
+// uncomment to log CPU statistics every n wall clock seconds
+//#define DEBUG_CPU_USAGE 10
+
+#endif // ANDROID_AUDIOFLINGER_CONFIGURATION_H
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 942ea35..d5a21a7 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -19,6 +19,7 @@
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <utils/Log.h>
 #include <audio_effects/effect_visualizer.h>
 #include <audio_utils/primitives.h>
@@ -94,16 +95,7 @@
 {
     ALOGV("Destructor %p", this);
     if (mEffectInterface != NULL) {
-        if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
-                (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
-            sp<ThreadBase> thread = mThread.promote();
-            if (thread != 0) {
-                audio_stream_t *stream = thread->stream();
-                if (stream != NULL) {
-                    stream->remove_audio_effect(stream, mEffectInterface);
-                }
-            }
-        }
+        remove_effect_from_hal_l();
         // release effect engine
         EffectRelease(mEffectInterface);
     }
@@ -487,7 +479,7 @@
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
-    status_t cmdStatus;
+    status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
     status_t status = (*mEffectInterface)->command(mEffectInterface,
                                                    EFFECT_CMD_DISABLE,
@@ -495,12 +487,19 @@
                                                    NULL,
                                                    &size,
                                                    &cmdStatus);
-    if (status == 0) {
+    if (status == NO_ERROR) {
         status = cmdStatus;
     }
-    if (status == 0 &&
-            ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
-             (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
+    if (status == NO_ERROR) {
+        status = remove_effect_from_hal_l();
+    }
+    return status;
+}
+
+status_t AudioFlinger::EffectModule::remove_effect_from_hal_l()
+{
+    if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+             (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
             audio_stream_t *stream = thread->stream();
@@ -509,7 +508,7 @@
             }
         }
     }
-    return status;
+    return NO_ERROR;
 }
 
 status_t AudioFlinger::EffectModule::command(uint32_t cmdCode,
@@ -594,6 +593,17 @@
                 h->setEnabled(enabled);
             }
         }
+//EL_FIXME not sure why this is needed?
+//        sp<ThreadBase> thread = mThread.promote();
+//        if (thread == 0) {
+//            return NO_ERROR;
+//        }
+//
+//        if ((thread->type() == ThreadBase::OFFLOAD) && (enabled)) {
+//            PlaybackThread *p = (PlaybackThread *)thread.get();
+//            ALOGV("setEnabled: Offload, invalidate tracks");
+//            p->invalidateTracks(AUDIO_STREAM_MUSIC);
+//        }
     }
     return NO_ERROR;
 }
@@ -1217,9 +1227,7 @@
 // Must be called with EffectChain::mLock locked
 void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread)
 {
-    size_t numSamples = thread->frameCount() * thread->channelCount();
-    memset(mInBuffer, 0, numSamples * sizeof(int16_t));
-
+    memset(mInBuffer, 0, thread->frameCount() * thread->frameSize());
 }
 
 // Must be called with EffectChain::mLock locked
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 91303ee..0b7fb83 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -126,6 +126,7 @@
 
     status_t start_l();
     status_t stop_l();
+    status_t remove_effect_from_hal_l();
 
 mutable Mutex               mLock;      // mutex for process, commands and handles list protection
     wp<ThreadBase>      mThread;    // parent thread
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 21df1d7..ad9f4f2 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -25,6 +25,7 @@
 
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 
+#include "Configuration.h"
 #include <sys/atomics.h>
 #include <time.h>
 #include <utils/Log.h>
@@ -44,6 +45,8 @@
 #define MIN_WARMUP_CYCLES          2    // minimum number of loop cycles to wait for warmup
 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
 
+#define FCC_2                       2   // fixed channel count assumption
+
 namespace android {
 
 // Fast mixer thread
@@ -82,7 +85,7 @@
     struct timespec oldLoad = {0, 0};    // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
     bool oldLoadValid = false;  // whether oldLoad is valid
     uint32_t bounds = 0;
-    bool full = false;      // whether we have collected at least kSamplingN samples
+    bool full = false;      // whether we have collected at least mSamplingN samples
 #ifdef CPU_FREQUENCY_STATISTICS
     ThreadCpuUsage tcu;     // for reading the current CPU clock frequency in kHz
 #endif
@@ -142,7 +145,9 @@
                     preIdle = *current;
                     current = &preIdle;
                     oldTsValid = false;
+#ifdef FAST_MIXER_STATISTICS
                     oldLoadValid = false;
+#endif
                     ignoreNextOverrun = true;
                 }
                 previous = current;
@@ -182,8 +187,10 @@
                 warmupCycles = 0;
                 sleepNs = -1;
                 coldGen = current->mColdGen;
+#ifdef FAST_MIXER_STATISTICS
                 bounds = 0;
                 full = false;
+#endif
                 oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs);
             } else {
                 sleepNs = FAST_HOT_IDLE_NS;
@@ -220,7 +227,7 @@
                 } else {
                     format = outputSink->format();
                     sampleRate = Format_sampleRate(format);
-                    ALOG_ASSERT(Format_channelCount(format) == 2);
+                    ALOG_ASSERT(Format_channelCount(format) == FCC_2);
                 }
                 dumpState->mSampleRate = sampleRate;
             }
@@ -236,7 +243,7 @@
                     //       implementation; it would be better to have normal mixer allocate for us
                     //       to avoid blocking here and to prevent possible priority inversion
                     mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
-                    mixBuffer = new short[frameCount * 2];
+                    mixBuffer = new short[frameCount * FCC_2];
                     periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
                     underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
                     overrunNs = (frameCount * 500000000LL) / sampleRate;    // 0.50
@@ -433,7 +440,7 @@
         //bool didFullWrite = false;    // dumpsys could display a count of partial writes
         if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) {
             if (mixBufferState == UNDEFINED) {
-                memset(mixBuffer, 0, frameCount * 2 * sizeof(short));
+                memset(mixBuffer, 0, frameCount * FCC_2 * sizeof(short));
                 mixBufferState = ZEROED;
             }
             if (teeSink != NULL) {
@@ -498,91 +505,91 @@
                     }
                 }
                 sleepNs = -1;
-              if (isWarm) {
-                if (sec > 0 || nsec > underrunNs) {
-                    ATRACE_NAME("underrun");
-                    // FIXME only log occasionally
-                    ALOGV("underrun: time since last cycle %d.%03ld sec",
-                            (int) sec, nsec / 1000000L);
-                    dumpState->mUnderruns++;
-                    ignoreNextOverrun = true;
-                } else if (nsec < overrunNs) {
-                    if (ignoreNextOverrun) {
-                        ignoreNextOverrun = false;
-                    } else {
+                if (isWarm) {
+                    if (sec > 0 || nsec > underrunNs) {
+                        ATRACE_NAME("underrun");
                         // FIXME only log occasionally
-                        ALOGV("overrun: time since last cycle %d.%03ld sec",
+                        ALOGV("underrun: time since last cycle %d.%03ld sec",
                                 (int) sec, nsec / 1000000L);
-                        dumpState->mOverruns++;
-                    }
-                    // This forces a minimum cycle time. It:
-                    //   - compensates for an audio HAL with jitter due to sample rate conversion
-                    //   - works with a variable buffer depth audio HAL that never pulls at a rate
-                    //     < than overrunNs per buffer.
-                    //   - recovers from overrun immediately after underrun
-                    // It doesn't work with a non-blocking audio HAL.
-                    sleepNs = forceNs - nsec;
-                } else {
-                    ignoreNextOverrun = false;
-                }
-              }
-#ifdef FAST_MIXER_STATISTICS
-              if (isWarm) {
-                // advance the FIFO queue bounds
-                size_t i = bounds & (FastMixerDumpState::kSamplingN - 1);
-                bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
-                if (full) {
-                    bounds += 0x10000;
-                } else if (!(bounds & (FastMixerDumpState::kSamplingN - 1))) {
-                    full = true;
-                }
-                // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
-                uint32_t monotonicNs = nsec;
-                if (sec > 0 && sec < 4) {
-                    monotonicNs += sec * 1000000000;
-                }
-                // compute the raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
-                uint32_t loadNs = 0;
-                struct timespec newLoad;
-                rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
-                if (rc == 0) {
-                    if (oldLoadValid) {
-                        sec = newLoad.tv_sec - oldLoad.tv_sec;
-                        nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
-                        if (nsec < 0) {
-                            --sec;
-                            nsec += 1000000000;
+                        dumpState->mUnderruns++;
+                        ignoreNextOverrun = true;
+                    } else if (nsec < overrunNs) {
+                        if (ignoreNextOverrun) {
+                            ignoreNextOverrun = false;
+                        } else {
+                            // FIXME only log occasionally
+                            ALOGV("overrun: time since last cycle %d.%03ld sec",
+                                    (int) sec, nsec / 1000000L);
+                            dumpState->mOverruns++;
                         }
-                        loadNs = nsec;
-                        if (sec > 0 && sec < 4) {
-                            loadNs += sec * 1000000000;
-                        }
+                        // This forces a minimum cycle time. It:
+                        //  - compensates for an audio HAL with jitter due to sample rate conversion
+                        //  - works with a variable buffer depth audio HAL that never pulls at a
+                        //    rate < than overrunNs per buffer.
+                        //  - recovers from overrun immediately after underrun
+                        // It doesn't work with a non-blocking audio HAL.
+                        sleepNs = forceNs - nsec;
                     } else {
-                        // first time through the loop
-                        oldLoadValid = true;
+                        ignoreNextOverrun = false;
                     }
-                    oldLoad = newLoad;
                 }
+#ifdef FAST_MIXER_STATISTICS
+                if (isWarm) {
+                    // advance the FIFO queue bounds
+                    size_t i = bounds & (dumpState->mSamplingN - 1);
+                    bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
+                    if (full) {
+                        bounds += 0x10000;
+                    } else if (!(bounds & (dumpState->mSamplingN - 1))) {
+                        full = true;
+                    }
+                    // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
+                    uint32_t monotonicNs = nsec;
+                    if (sec > 0 && sec < 4) {
+                        monotonicNs += sec * 1000000000;
+                    }
+                    // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
+                    uint32_t loadNs = 0;
+                    struct timespec newLoad;
+                    rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
+                    if (rc == 0) {
+                        if (oldLoadValid) {
+                            sec = newLoad.tv_sec - oldLoad.tv_sec;
+                            nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
+                            if (nsec < 0) {
+                                --sec;
+                                nsec += 1000000000;
+                            }
+                            loadNs = nsec;
+                            if (sec > 0 && sec < 4) {
+                                loadNs += sec * 1000000000;
+                            }
+                        } else {
+                            // first time through the loop
+                            oldLoadValid = true;
+                        }
+                        oldLoad = newLoad;
+                    }
 #ifdef CPU_FREQUENCY_STATISTICS
-                // get the absolute value of CPU clock frequency in kHz
-                int cpuNum = sched_getcpu();
-                uint32_t kHz = tcu.getCpukHz(cpuNum);
-                kHz = (kHz << 4) | (cpuNum & 0xF);
+                    // get the absolute value of CPU clock frequency in kHz
+                    int cpuNum = sched_getcpu();
+                    uint32_t kHz = tcu.getCpukHz(cpuNum);
+                    kHz = (kHz << 4) | (cpuNum & 0xF);
 #endif
-                // save values in FIFO queues for dumpsys
-                // these stores #1, #2, #3 are not atomic with respect to each other,
-                // or with respect to store #4 below
-                dumpState->mMonotonicNs[i] = monotonicNs;
-                dumpState->mLoadNs[i] = loadNs;
+                    // save values in FIFO queues for dumpsys
+                    // these stores #1, #2, #3 are not atomic with respect to each other,
+                    // or with respect to store #4 below
+                    dumpState->mMonotonicNs[i] = monotonicNs;
+                    dumpState->mLoadNs[i] = loadNs;
 #ifdef CPU_FREQUENCY_STATISTICS
-                dumpState->mCpukHz[i] = kHz;
+                    dumpState->mCpukHz[i] = kHz;
 #endif
-                // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
-                // the newest open and oldest closed halves are atomic with respect to each other
-                dumpState->mBounds = bounds;
-                ATRACE_INT("cycle_ms", monotonicNs / 1000000);
-                ATRACE_INT("load_us", loadNs / 1000);
-              }
+                    // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
+                    // the newest open & oldest closed halves are atomic with respect to each other
+                    dumpState->mBounds = bounds;
+                    ATRACE_INT("cycle_ms", monotonicNs / 1000000);
+                    ATRACE_INT("load_us", loadNs / 1000);
+                }
 #endif
             } else {
                 // first time through the loop
@@ -603,26 +610,44 @@
     // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
 }
 
-FastMixerDumpState::FastMixerDumpState() :
+FastMixerDumpState::FastMixerDumpState(
+#ifdef FAST_MIXER_STATISTICS
+        uint32_t samplingN
+#endif
+        ) :
     mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0),
     mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0),
     mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0),
     mTrackMask(0)
 #ifdef FAST_MIXER_STATISTICS
-    , mBounds(0)
+    , mSamplingN(0), mBounds(0)
 #endif
 {
     mMeasuredWarmupTs.tv_sec = 0;
     mMeasuredWarmupTs.tv_nsec = 0;
-    // sample arrays aren't accessed atomically with respect to the bounds,
-    // so clearing reduces chance for dumpsys to read random uninitialized samples
-    memset(&mMonotonicNs, 0, sizeof(mMonotonicNs));
-    memset(&mLoadNs, 0, sizeof(mLoadNs));
-#ifdef CPU_FREQUENCY_STATISTICS
-    memset(&mCpukHz, 0, sizeof(mCpukHz));
+#ifdef FAST_MIXER_STATISTICS
+    increaseSamplingN(samplingN);
 #endif
 }
 
+#ifdef FAST_MIXER_STATISTICS
+void FastMixerDumpState::increaseSamplingN(uint32_t samplingN)
+{
+    if (samplingN <= mSamplingN || samplingN > kSamplingN || roundup(samplingN) != samplingN) {
+        return;
+    }
+    uint32_t additional = samplingN - mSamplingN;
+    // sample arrays aren't accessed atomically with respect to the bounds,
+    // so clearing reduces chance for dumpsys to read random uninitialized samples
+    memset(&mMonotonicNs[mSamplingN], 0, sizeof(mMonotonicNs[0]) * additional);
+    memset(&mLoadNs[mSamplingN], 0, sizeof(mLoadNs[0]) * additional);
+#ifdef CPU_FREQUENCY_STATISTICS
+    memset(&mCpukHz[mSamplingN], 0, sizeof(mCpukHz[0]) * additional);
+#endif
+    mSamplingN = samplingN;
+}
+#endif
+
 FastMixerDumpState::~FastMixerDumpState()
 {
 }
@@ -641,7 +666,7 @@
     }
 }
 
-void FastMixerDumpState::dump(int fd)
+void FastMixerDumpState::dump(int fd) const
 {
     if (mCommand == FastMixerState::INITIAL) {
         fdprintf(fd, "FastMixer not initialized\n");
@@ -692,9 +717,9 @@
     uint32_t newestOpen = bounds & 0xFFFF;
     uint32_t oldestClosed = bounds >> 16;
     uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
-    if (n > kSamplingN) {
+    if (n > mSamplingN) {
         ALOGE("too many samples %u", n);
-        n = kSamplingN;
+        n = mSamplingN;
     }
     // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
     // and adjusted CPU load in MHz normalized for CPU clock frequency
@@ -710,7 +735,7 @@
     uint32_t *tail = n >= kTailDenominator ? new uint32_t[n] : NULL;
     // loop over all the samples
     for (uint32_t j = 0; j < n; ++j) {
-        size_t i = oldestClosed++ & (kSamplingN - 1);
+        size_t i = oldestClosed++ & (mSamplingN - 1);
         uint32_t wallNs = mMonotonicNs[i];
         if (tail != NULL) {
             tail[j] = wallNs;
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 2ab1d04..6158925 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -85,10 +85,14 @@
 // Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
 // It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer.
 struct FastMixerDumpState {
-    FastMixerDumpState();
+    FastMixerDumpState(
+#ifdef FAST_MIXER_STATISTICS
+            uint32_t samplingN = kSamplingNforLowRamDevice
+#endif
+            );
     /*virtual*/ ~FastMixerDumpState();
 
-    void dump(int fd);          // should only be called on a stable copy, not the original
+    void dump(int fd) const;    // should only be called on a stable copy, not the original
 
     FastMixerState::Command mCommand;   // current command
     uint32_t mWriteSequence;    // incremented before and after each write()
@@ -106,8 +110,15 @@
 
 #ifdef FAST_MIXER_STATISTICS
     // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency.
-    // kSamplingN is the size of the sampling frame, and must be a power of 2 <= 0x8000.
+    // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000.
+    // The sample arrays are virtually allocated based on this compile-time constant,
+    // but are only initialized and used based on the runtime parameter mSamplingN.
     static const uint32_t kSamplingN = 0x8000;
+    // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN.
+    // This value was chosen such that each array uses 1 small page (4 Kbytes).
+    static const uint32_t kSamplingNforLowRamDevice = 0x400;
+    // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN.
+    uint32_t mSamplingN;
     // The bounds define the interval of valid samples, and are represented as follows:
     //      newest open (excluded) endpoint   = lower 16 bits of bounds, modulo N
     //      oldest closed (included) endpoint = upper 16 bits of bounds, modulo N
@@ -119,6 +130,8 @@
 #ifdef CPU_FREQUENCY_STATISTICS
     uint32_t mCpukHz[kSamplingN];       // absolute CPU clock frequency in kHz, bits 0-3 are CPU#
 #endif
+    // Increase sampling window after construction, must be a power of 2 <= kSamplingN
+    void    increaseSamplingN(uint32_t samplingN);
 #endif
 };
 
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index c45c81b..737de97 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "Configuration.h"
 #include "FastMixerState.h"
 
 namespace android {
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a749d7a..5600411c 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -46,9 +46,13 @@
             void        destroy();
             int         name() const { return mName; }
 
+    virtual uint32_t    sampleRate() const;
+
             audio_stream_type_t streamType() const {
                 return mStreamType;
             }
+            bool        isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; }
+            status_t    setParameters(const String8& keyValuePairs);
             status_t    attachAuxEffect(int EffectId);
             void        setAuxBuffer(int EffectId, int32_t *buffer);
             int32_t     *auxBuffer() const { return mAuxBuffer; }
@@ -66,6 +70,7 @@
     friend class PlaybackThread;
     friend class MixerThread;
     friend class DirectOutputThread;
+    friend class OffloadThread;
 
                         Track(const Track&);
                         Track& operator = (const Track&);
@@ -101,6 +106,7 @@
     bool isInvalid() const { return mIsInvalid; }
     virtual bool isTimedTrack() const { return false; }
     bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
+    int fastIndex() const { return mFastIndex; }
 
 protected:
 
@@ -134,11 +140,12 @@
                                     // but the slot is only used if track is active
     FastTrackUnderruns  mObservedUnderruns; // Most recently observed value of
                                     // mFastMixerDumpState.mTracks[mFastIndex].mUnderruns
-    uint32_t            mUnderrunCount; // Counter of total number of underruns, never reset
     volatile float      mCachedVolume;  // combined master volume and stream type volume;
                                         // 'volatile' means accessed without lock or
                                         // barrier, but is read/written atomically
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
+    AudioTrackServerProxy*  mAudioTrackServerProxy;
+    bool                mResumeToStopping; // track was paused in stopping state.
 };  // end of Track
 
 class TimedTrack : public Track {
@@ -255,10 +262,6 @@
 
 private:
 
-    enum {
-        NO_MORE_BUFFERS = 0x80000001,   // same in AudioTrack.h, ok to be different value
-    };
-
     status_t            obtainBuffer(AudioBufferProvider::Buffer* buffer,
                                      uint32_t waitTimeMs);
     void                clearBufferQueue();
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 6c0d1d3..ffe3e9f 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -57,4 +57,5 @@
     // releaseBuffer() not overridden
 
     bool                mOverflow;  // overflow on most recent attempt to fill client buffer
+    AudioRecordServerProxy* mAudioRecordServerProxy;
 };
diff --git a/services/audioflinger/StateQueue.cpp b/services/audioflinger/StateQueue.cpp
index 3e891a5..c2d3bbd 100644
--- a/services/audioflinger/StateQueue.cpp
+++ b/services/audioflinger/StateQueue.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "StateQueue"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <time.h>
 #include <cutils/atomic.h>
 #include <utils/Log.h>
diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h
index e33b3c6..9cde642 100644
--- a/services/audioflinger/StateQueue.h
+++ b/services/audioflinger/StateQueue.h
@@ -31,8 +31,14 @@
 //        and this may result in an audible artifact
 //      needs read-only access to a recent stable state,
 //        but not necessarily the most current one
+//      only allocate and free memory when configuration changes
+//      avoid conventional logging, as this is a form of I/O and could block
+//      defer computation to other threads when feasible; for example
+//        cycle times are collected by fast mixer thread but the floating-point
+//        statistical calculations on these cycle times are computed by normal mixer
+//      these requirements also apply to callouts such as AudioBufferProvider and VolumeProvider
 //  Normal mixer thread:
-//      periodic with typical period ~40 ms
+//      periodic with typical period ~20 ms
 //      SCHED_OTHER scheduling policy and nice priority == urgent audio
 //      ok to block, but prefer to avoid as much as possible
 //      needs read/write access to state
diff --git a/services/audioflinger/StateQueueInstantiations.cpp b/services/audioflinger/StateQueueInstantiations.cpp
index 077582f..0d5cd0c 100644
--- a/services/audioflinger/StateQueueInstantiations.cpp
+++ b/services/audioflinger/StateQueueInstantiations.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "Configuration.h"
 #include "FastMixerState.h"
 #include "StateQueue.h"
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 0b88c0e..2c2931f 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -20,11 +20,12 @@
 //#define LOG_NDEBUG 0
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 
+#include "Configuration.h"
 #include <math.h>
 #include <fcntl.h>
 #include <sys/stat.h>
 #include <cutils/properties.h>
-#include <cutils/compiler.h>
+#include <media/AudioParameter.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
@@ -53,14 +54,11 @@
 #include "ServiceUtilities.h"
 #include "SchedulingPolicyService.h"
 
-#undef ADD_BATTERY_DATA
-
 #ifdef ADD_BATTERY_DATA
 #include <media/IMediaPlayerService.h>
 #include <media/IMediaDeathNotifier.h>
 #endif
 
-// #define DEBUG_CPU_USAGE 10  // log statistics every n wall clock seconds
 #ifdef DEBUG_CPU_USAGE
 #include <cpustats/CentralTendencyStatistics.h>
 #include <cpustats/ThreadCpuUsage.h>
@@ -139,7 +137,7 @@
 // FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or
 // N-buffering, so AudioFlinger could allocate the right amount of memory.
 // See the client's minBufCount and mNotificationFramesAct calculations for details.
-static const int kFastTrackMultiplier = 2;
+static const int kFastTrackMultiplier = 1;
 
 // ----------------------------------------------------------------------------
 
@@ -267,10 +265,9 @@
         audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
     :   Thread(false /*canCallJava*/),
         mType(type),
-        mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mNormalFrameCount(0),
-        // mChannelMask
-        mChannelCount(0),
-        mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID),
+        mAudioFlinger(audioFlinger),
+        // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are
+        // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
         mParamStatus(NO_ERROR),
         mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
         mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
@@ -281,6 +278,12 @@
 
 AudioFlinger::ThreadBase::~ThreadBase()
 {
+    // mConfigEvents should be empty, but just in case it isn't, free the memory it owns
+    for (size_t i = 0; i < mConfigEvents.size(); i++) {
+        delete mConfigEvents[i];
+    }
+    mConfigEvents.clear();
+
     mParamCond.broadcast();
     // do not lock the mutex in destructor
     releaseWakeLock_l();
@@ -420,9 +423,7 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount);
     result.append(buffer);
-    snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "Channel Count: %d\n", mChannelCount);
+    snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount);
     result.append(buffer);
     snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
     result.append(buffer);
@@ -495,7 +496,8 @@
         sp<IBinder> binder = new BBinder();
         status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                                                          binder,
-                                                         String16(mName));
+                                                         String16(mName),
+                                                         String16("media"));
         if (status == NO_ERROR) {
             mWakeLockToken = binder;
         }
@@ -926,13 +928,19 @@
                                              audio_devices_t device,
                                              type_t type)
     :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
-        mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+        mNormalFrameCount(0), mMixBuffer(NULL),
+        mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
         mMixerStatus(MIXER_IDLE),
         mMixerStatusIgnoringFastTracks(MIXER_IDLE),
         standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
+        mBytesRemaining(0),
+        mCurrentWriteLength(0),
+        mUseAsyncWrite(false),
+        mWriteBlocked(false),
+        mDraining(false),
         mScreenState(AudioFlinger::mScreenState),
         // index 0 is reserved for normal mixer's submix
         mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1)
@@ -975,7 +983,7 @@
 AudioFlinger::PlaybackThread::~PlaybackThread()
 {
     mAudioFlinger->unregisterWriter(mNBLogWriter);
-    delete [] mMixBuffer;
+    delete [] mAllocMixBuffer;
 }
 
 void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
@@ -1043,6 +1051,8 @@
 
     snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this);
     result.append(buffer);
+    snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount);
+    result.append(buffer);
     snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n",
             ns2ms(systemTime() - mLastWriteTime));
     result.append(buffer);
@@ -1181,7 +1191,22 @@
                 goto Exit;
             }
         }
+    } else if (mType == OFFLOAD) {
+        if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
+            ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
+                    "for output %p with format %d",
+                    sampleRate, format, channelMask, mOutput, mFormat);
+            lStatus = BAD_VALUE;
+            goto Exit;
+        }
     } else {
+        if ((format & AUDIO_FORMAT_MAIN_MASK) != AUDIO_FORMAT_PCM) {
+                ALOGE("createTrack_l() Bad parameter: format %d \""
+                        "for output %p with format %d",
+                        format, mOutput, mFormat);
+                lStatus = BAD_VALUE;
+                goto Exit;
+        }
         // Resampler implementation limits input sampling rate to 2 x output sampling rate.
         if (sampleRate > mSampleRate*2) {
             ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
@@ -1227,6 +1252,7 @@
             lStatus = NO_MEMORY;
             goto Exit;
         }
+
         mTracks.add(track);
 
         sp<EffectChain> chain = getEffectChain_l(sessionId);
@@ -1301,12 +1327,14 @@
 {
     Mutex::Autolock _l(mLock);
     mStreamTypes[stream].volume = value;
+    signal_l();
 }
 
 void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
 {
     Mutex::Autolock _l(mLock);
     mStreamTypes[stream].mute = muted;
+    signal_l();
 }
 
 float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
@@ -1326,7 +1354,31 @@
         // the track is newly added, make sure it fills up all its
         // buffers before playing. This is to ensure the client will
         // effectively get the latency it requested.
-        track->mFillingUpStatus = Track::FS_FILLING;
+        if (!track->isOutputTrack()) {
+            TrackBase::track_state state = track->mState;
+            mLock.unlock();
+            status = AudioSystem::startOutput(mId, track->streamType(), track->sessionId());
+            mLock.lock();
+            // abort track was stopped/paused while we released the lock
+            if (state != track->mState) {
+                if (status == NO_ERROR) {
+                    mLock.unlock();
+                    AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
+                    mLock.lock();
+                }
+                return INVALID_OPERATION;
+            }
+            // abort if start is rejected by audio policy manager
+            if (status != NO_ERROR) {
+                return PERMISSION_DENIED;
+            }
+#ifdef ADD_BATTERY_DATA
+            // to track the speaker usage
+            addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
+#endif
+        }
+
+        track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
         mActiveTracks.add(track);
@@ -1346,14 +1398,19 @@
     return status;
 }
 
-// destroyTrack_l() must be called with ThreadBase::mLock held
-void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
+bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
 {
-    track->mState = TrackBase::TERMINATED;
+    track->terminate();
     // active tracks are removed by threadLoop()
-    if (mActiveTracks.indexOf(track) < 0) {
+    bool trackActive = (mActiveTracks.indexOf(track) >= 0);
+    track->mState = TrackBase::STOPPED;
+    if (!trackActive) {
         removeTrack_l(track);
+    } else if (track->isFastTrack() || track->isOffloaded()) {
+        track->mState = TrackBase::STOPPING_1;
     }
+
+    return trackActive;
 }
 
 void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
@@ -1377,18 +1434,25 @@
     }
 }
 
+void AudioFlinger::PlaybackThread::signal_l()
+{
+    // Thread could be blocked waiting for async
+    // so signal it to handle state changes immediately
+    // If threadLoop is currently unlocked a signal of mWaitWorkCV will
+    // be lost so we also flag to prevent it blocking on mWaitWorkCV
+    mSignalPending = true;
+    mWaitWorkCV.signal();
+}
+
 String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
 {
-    String8 out_s8 = String8("");
-    char *s;
-
     Mutex::Autolock _l(mLock);
     if (initCheck() != NO_ERROR) {
-        return out_s8;
+        return String8();
     }
 
-    s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
-    out_s8 = String8(s);
+    char *s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
+    const String8 out_s8(s);
     free(s);
     return out_s8;
 }
@@ -1404,7 +1468,7 @@
     switch (event) {
     case AudioSystem::OUTPUT_OPENED:
     case AudioSystem::OUTPUT_CONFIG_CHANGED:
-        desc.channels = mChannelMask;
+        desc.channelMask = mChannelMask;
         desc.samplingRate = mSampleRate;
         desc.format = mFormat;
         desc.frameCount = mNormalFrameCount; // FIXME see
@@ -1422,12 +1486,78 @@
     mAudioFlinger->audioConfigChanged_l(event, mId, param2);
 }
 
+void AudioFlinger::PlaybackThread::writeCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setWriteBlocked(false);
+}
+
+void AudioFlinger::PlaybackThread::drainCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setDraining(false);
+}
+
+void AudioFlinger::PlaybackThread::setWriteBlocked(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mWriteBlocked = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+void AudioFlinger::PlaybackThread::setDraining(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mDraining = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+// static
+int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event,
+                                                void *param,
+                                                void *cookie)
+{
+    AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie;
+    ALOGV("asyncCallback() event %d", event);
+    switch (event) {
+    case STREAM_CBK_EVENT_WRITE_READY:
+        me->writeCallback();
+        break;
+    case STREAM_CBK_EVENT_DRAIN_READY:
+        me->drainCallback();
+        break;
+    default:
+        ALOGW("asyncCallback() unknown event %d", event);
+        break;
+    }
+    return 0;
+}
+
 void AudioFlinger::PlaybackThread::readOutputParameters()
 {
+    // unfortunately we have no way of recovering from errors here, hence the LOG_FATAL
     mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
     mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
-    mChannelCount = (uint16_t)popcount(mChannelMask);
+    if (!audio_is_output_channel(mChannelMask)) {
+        LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
+    }
+    if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
+        LOG_FATAL("HAL channel mask %#x not supported for mixed output; "
+                "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
+    }
+    mChannelCount = popcount(mChannelMask);
     mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
+    if (!audio_is_valid_format(mFormat)) {
+        LOG_FATAL("HAL format %d not valid for output", mFormat);
+    }
+    if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) {
+        LOG_FATAL("HAL format %d not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT",
+                mFormat);
+    }
     mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
     mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
     if (mFrameCount & 15) {
@@ -1435,6 +1565,14 @@
                 mFrameCount);
     }
 
+    if ((mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) &&
+            (mOutput->stream->set_callback != NULL)) {
+        if (mOutput->stream->set_callback(mOutput->stream,
+                                      AudioFlinger::PlaybackThread::asyncCallback, this) == 0) {
+            mUseAsyncWrite = true;
+        }
+    }
+
     // Calculate size of normal mix buffer relative to the HAL output buffer size
     double multiplier = 1.0;
     if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
@@ -1477,9 +1615,11 @@
     ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
-    delete[] mMixBuffer;
-    mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount];
-    memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+    delete[] mAllocMixBuffer;
+    size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize;
+    mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1];
+    mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align);
+    memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize);
 
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
@@ -1613,16 +1753,21 @@
         const Vector< sp<Track> >& tracksToRemove)
 {
     size_t count = tracksToRemove.size();
-    if (CC_UNLIKELY(count)) {
+    if (count) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
-            if ((track->sharedBuffer() != 0) &&
-                    (track->mState == TrackBase::ACTIVE || track->mState == TrackBase::RESUMING)) {
+            if (!track->isOutputTrack()) {
                 AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
+#ifdef ADD_BATTERY_DATA
+                // to track the speaker usage
+                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
+                if (track->isTerminated()) {
+                    AudioSystem::releaseOutput(mId);
+                }
             }
         }
     }
-
 }
 
 void AudioFlinger::PlaybackThread::checkSilentMode_l()
@@ -1643,17 +1788,18 @@
 }
 
 // shared by MIXER and DIRECT, overridden by DUPLICATING
-void AudioFlinger::PlaybackThread::threadLoop_write()
+ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
 {
     // FIXME rewrite to reduce number of system calls
     mLastWriteTime = systemTime();
     mInWrite = true;
-    int bytesWritten;
+    ssize_t bytesWritten;
 
     // If an NBAIO sink is present, use it to write the normal mixer's submix
     if (mNormalSink != 0) {
 #define mBitShift 2 // FIXME
-        size_t count = mixBufferSize >> mBitShift;
+        size_t count = mBytesRemaining >> mBitShift;
+        size_t offset = (mCurrentWriteLength - mBytesRemaining) >> 1;
         ATRACE_BEGIN("write");
         // update the setpoint when AudioFlinger::mScreenState changes
         uint32_t screenState = AudioFlinger::mScreenState;
@@ -1665,7 +1811,7 @@
                         (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
             }
         }
-        ssize_t framesWritten = mNormalSink->write(mMixBuffer, count);
+        ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count);
         ATRACE_END();
         if (framesWritten > 0) {
             bytesWritten = framesWritten << mBitShift;
@@ -1674,15 +1820,48 @@
         }
     // otherwise use the HAL / AudioStreamOut directly
     } else {
-        // Direct output thread.
-        bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
+        // Direct output and offload threads
+        size_t offset = (mCurrentWriteLength - mBytesRemaining) / sizeof(int16_t);
+        if (mUseAsyncWrite) {
+            mWriteBlocked = true;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setWriteBlocked(true);
+        }
+        bytesWritten = mOutput->stream->write(mOutput->stream,
+                                                   mMixBuffer + offset, mBytesRemaining);
+        if (mUseAsyncWrite &&
+                ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
+            // do not wait for async callback in case of error of full write
+            mWriteBlocked = false;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setWriteBlocked(false);
+        }
     }
 
-    if (bytesWritten > 0) {
-        mBytesWritten += mixBufferSize;
-    }
     mNumWrites++;
     mInWrite = false;
+
+    return bytesWritten;
+}
+
+void AudioFlinger::PlaybackThread::threadLoop_drain()
+{
+    if (mOutput->stream->drain) {
+        ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
+        if (mUseAsyncWrite) {
+            mDraining = true;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setDraining(true);
+        }
+        mOutput->stream->drain(mOutput->stream,
+            (mMixerStatus == MIXER_DRAIN_TRACK) ? AUDIO_DRAIN_EARLY_NOTIFY
+                                                : AUDIO_DRAIN_ALL);
+    }
+}
+
+void AudioFlinger::PlaybackThread::threadLoop_exit()
+{
+    // Default implementation has nothing to do
 }
 
 /*
@@ -1713,7 +1892,7 @@
 
 void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
 {
-    ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+    ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
             this,  streamType, mTracks.size());
     Mutex::Autolock _l(mLock);
 
@@ -1923,10 +2102,29 @@
 
             saveOutputTracks();
 
-            // put audio hardware into standby after short delay
-            if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
-                        isSuspended())) {
-                if (!mStandby) {
+            if (mSignalPending) {
+                // A signal was raised while we were unlocked
+                mSignalPending = false;
+            } else if (waitingAsyncCallback_l()) {
+                if (exitPending()) {
+                    break;
+                }
+                releaseWakeLock_l();
+                ALOGV("wait async completion");
+                mWaitWorkCV.wait(mLock);
+                ALOGV("async completion/wake");
+                acquireWakeLock_l();
+                if (exitPending()) {
+                    break;
+                }
+                if (!mActiveTracks.size() && (systemTime() > standbyTime)) {
+                    continue;
+                }
+                sleepTime = 0;
+            } else if ((!mActiveTracks.size() && systemTime() > standbyTime) ||
+                                   isSuspended()) {
+                // put audio hardware into standby after short delay
+                if (shouldStandby_l()) {
 
                     threadLoop_standby();
 
@@ -1953,7 +2151,7 @@
                     mMixerStatus = MIXER_IDLE;
                     mMixerStatusIgnoringFastTracks = MIXER_IDLE;
                     mBytesWritten = 0;
-
+                    mBytesRemaining = 0;
                     checkSilentMode_l();
 
                     standbyTime = systemTime() + standbyDelay;
@@ -1975,50 +2173,73 @@
             lockEffectChains_l(effectChains);
         }
 
-        if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) {
-            threadLoop_mix();
-        } else {
-            threadLoop_sleepTime();
-        }
+        if (mBytesRemaining == 0) {
+            mCurrentWriteLength = 0;
+            if (mMixerStatus == MIXER_TRACKS_READY) {
+                // threadLoop_mix() sets mCurrentWriteLength
+                threadLoop_mix();
+            } else if ((mMixerStatus != MIXER_DRAIN_TRACK)
+                        && (mMixerStatus != MIXER_DRAIN_ALL)) {
+                // threadLoop_sleepTime sets sleepTime to 0 if data
+                // must be written to HAL
+                threadLoop_sleepTime();
+                if (sleepTime == 0) {
+                    mCurrentWriteLength = mixBufferSize;
+                }
+            }
+            mBytesRemaining = mCurrentWriteLength;
+            if (isSuspended()) {
+                sleepTime = suspendSleepTimeUs();
+                // simulate write to HAL when suspended
+                mBytesWritten += mixBufferSize;
+                mBytesRemaining = 0;
+            }
 
-        if (isSuspended()) {
-            sleepTime = suspendSleepTimeUs();
-            mBytesWritten += mixBufferSize;
-        }
-
-        // only process effects if we're going to write
-        if (sleepTime == 0) {
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
+            // only process effects if we're going to write
+            if (sleepTime == 0) {
+                for (size_t i = 0; i < effectChains.size(); i ++) {
+                    effectChains[i]->process_l();
+                }
             }
         }
 
         // enable changes in effect chain
         unlockEffectChains(effectChains);
 
-        // sleepTime == 0 means we must write to audio hardware
-        if (sleepTime == 0) {
-
-            threadLoop_write();
-
-if (mType == MIXER) {
-            // write blocked detection
-            nsecs_t now = systemTime();
-            nsecs_t delta = now - mLastWriteTime;
-            if (!mStandby && delta > maxPeriod) {
-                mNumDelayedWrites++;
-                if ((now - lastWarning) > kWarningThrottleNs) {
-                    ATRACE_NAME("underrun");
-                    ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
-                            ns2ms(delta), mNumDelayedWrites, this);
-                    lastWarning = now;
+        if (!waitingAsyncCallback()) {
+            // sleepTime == 0 means we must write to audio hardware
+            if (sleepTime == 0) {
+                if (mBytesRemaining) {
+                    ssize_t ret = threadLoop_write();
+                    if (ret < 0) {
+                        mBytesRemaining = 0;
+                    } else {
+                        mBytesWritten += ret;
+                        mBytesRemaining -= ret;
+                    }
+                } else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
+                        (mMixerStatus == MIXER_DRAIN_ALL)) {
+                    threadLoop_drain();
                 }
-            }
+if (mType == MIXER) {
+                // write blocked detection
+                nsecs_t now = systemTime();
+                nsecs_t delta = now - mLastWriteTime;
+                if (!mStandby && delta > maxPeriod) {
+                    mNumDelayedWrites++;
+                    if ((now - lastWarning) > kWarningThrottleNs) {
+                        ATRACE_NAME("underrun");
+                        ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
+                                ns2ms(delta), mNumDelayedWrites, this);
+                        lastWarning = now;
+                    }
+                }
 }
 
-            mStandby = false;
-        } else {
-            usleep(sleepTime);
+                mStandby = false;
+            } else {
+                usleep(sleepTime);
+            }
         }
 
         // Finally let go of removed track(s), without the lock held
@@ -2040,8 +2261,10 @@
         // is now local to this block, but will keep it for now (at least until merge done).
     }
 
+    threadLoop_exit();
+
     // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
-    if (mType == MIXER || mType == DIRECT) {
+    if (mType == MIXER || mType == DIRECT || mType == OFFLOAD) {
         // put output stream into standby mode
         if (!mStandby) {
             mOutput->stream->common.standby(&mOutput->stream->common);
@@ -2054,6 +2277,28 @@
     return false;
 }
 
+// removeTracks_l() must be called with ThreadBase::mLock held
+void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
+{
+    size_t count = tracksToRemove.size();
+    if (count) {
+        for (size_t i=0 ; i<count ; i++) {
+            const sp<Track>& track = tracksToRemove.itemAt(i);
+            mActiveTracks.remove(track);
+            ALOGV("removeTracks_l removing track on session %d", track->sessionId());
+            sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+            if (chain != 0) {
+                ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
+                        track->sessionId());
+                chain->decActiveTrackCnt();
+            }
+            if (track->isTerminated()) {
+                removeTrack_l(track);
+            }
+        }
+    }
+
+}
 
 // ----------------------------------------------------------------------------
 
@@ -2068,7 +2313,7 @@
         // mNormalSink below
 {
     ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
-    ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%d, mFormat=%d, mFrameSize=%u, "
+    ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%u, "
             "mFrameCount=%d, mNormalFrameCount=%d",
             mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
             mNormalFrameCount);
@@ -2258,7 +2503,7 @@
     PlaybackThread::threadLoop_removeTracks(tracksToRemove);
 }
 
-void AudioFlinger::MixerThread::threadLoop_write()
+ssize_t AudioFlinger::MixerThread::threadLoop_write()
 {
     // FIXME we should only do one push per cycle; confirm this is true
     // Start the fast mixer if it's not already running
@@ -2279,6 +2524,8 @@
 #endif
             }
             state->mCommand = FastMixerState::MIX_WRITE;
+            mFastMixerDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
+                    FastMixerDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN);
             sq->end();
             sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
             if (kUseFastMixer == FastMixer_Dynamic) {
@@ -2288,7 +2535,7 @@
             sq->end(false /*didModify*/);
         }
     }
-    PlaybackThread::threadLoop_write();
+    return PlaybackThread::threadLoop_write();
 }
 
 void AudioFlinger::MixerThread::threadLoop_standby()
@@ -2320,11 +2567,40 @@
     PlaybackThread::threadLoop_standby();
 }
 
+// Empty implementation for standard mixer
+// Overridden for offloaded playback
+void AudioFlinger::PlaybackThread::flushOutput_l()
+{
+}
+
+bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l()
+{
+    return false;
+}
+
+bool AudioFlinger::PlaybackThread::shouldStandby_l()
+{
+    return !mStandby;
+}
+
+bool AudioFlinger::PlaybackThread::waitingAsyncCallback()
+{
+    Mutex::Autolock _l(mLock);
+    return waitingAsyncCallback_l();
+}
+
 // shared by MIXER and DIRECT, overridden by DUPLICATING
 void AudioFlinger::PlaybackThread::threadLoop_standby()
 {
     ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
     mOutput->stream->common.standby(&mOutput->stream->common);
+    if (mUseAsyncWrite != 0) {
+        mWriteBlocked = false;
+        mDraining = false;
+        ALOG_ASSERT(mCallbackThread != 0);
+        mCallbackThread->setWriteBlocked(false);
+        mCallbackThread->setDraining(false);
+    }
 }
 
 void AudioFlinger::MixerThread::threadLoop_mix()
@@ -2345,6 +2621,7 @@
 
     // mix buffers...
     mAudioMixer->process(pts);
+    mCurrentWriteLength = mixBufferSize;
     // increase sleep time progressively when application underrun condition clears.
     // Only increase sleep time if the mixer is ready for two consecutive times to avoid
     // that a steady state of alternating ready/not ready conditions keeps the sleep time
@@ -2426,7 +2703,7 @@
     }
 
     for (size_t i=0 ; i<count ; i++) {
-        sp<Track> t = mActiveTracks[i].promote();
+        const sp<Track> t = mActiveTracks[i].promote();
         if (t == 0) {
             continue;
         }
@@ -2462,8 +2739,10 @@
             track->mObservedUnderruns = underruns;
             // don't count underruns that occur while stopping or pausing
             // or stopped which can occur when flush() is called while active
-            if (!(track->isStopping() || track->isPausing() || track->isStopped())) {
-                track->mUnderrunCount += recentUnderruns;
+            if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
+                    recentUnderruns > 0) {
+                // FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
+                track->mAudioTrackServerProxy->tallyUnderrunFrames(recentUnderruns * mFrameCount);
             }
 
             // This is similar to the state machine for normal tracks,
@@ -2472,7 +2751,7 @@
             switch (track->mState) {
             case TrackBase::STOPPING_1:
                 // track stays active in STOPPING_1 state until first underrun
-                if (recentUnderruns > 0) {
+                if (recentUnderruns > 0 || track->isTerminated()) {
                     track->mState = TrackBase::STOPPING_2;
                 }
                 break;
@@ -2506,7 +2785,7 @@
                     }
                     // indicate to client process that the track was disabled because of underrun;
                     // it will then automatically call start() when data is available
-                    android_atomic_or(CBLK_DISABLED, &track->mCblk->flags);
+                    android_atomic_or(CBLK_DISABLED, &track->mCblk->mFlags);
                     // remove from active list, but state remains ACTIVE [confusing but true]
                     isActive = false;
                     break;
@@ -2514,7 +2793,6 @@
                 // fall through
             case TrackBase::STOPPING_2:
             case TrackBase::PAUSED:
-            case TrackBase::TERMINATED:
             case TrackBase::STOPPED:
             case TrackBase::FLUSHED:   // flush() while active
                 // Check for presentation complete if track is inactive
@@ -2595,28 +2873,39 @@
         // app does not call stop() and relies on underrun to stop:
         // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
         // during last round
+        size_t desiredFrames;
+        uint32_t sr = track->sampleRate();
+        if (sr == mSampleRate) {
+            desiredFrames = mNormalFrameCount;
+        } else {
+            // +1 for rounding and +1 for additional sample needed for interpolation
+            desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1;
+            // add frames already consumed but not yet released by the resampler
+            // because cblk->framesReady() will include these frames
+            desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
+            // the minimum track buffer size is normally twice the number of frames necessary
+            // to fill one buffer and the resampler should not leave more than one buffer worth
+            // of unreleased frames after each pass, but just in case...
+            ALOG_ASSERT(desiredFrames <= cblk->frameCount_);
+        }
         uint32_t minFrames = 1;
         if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
                 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
-            if (t->sampleRate() == mSampleRate) {
-                minFrames = mNormalFrameCount;
-            } else {
-                // +1 for rounding and +1 for additional sample needed for interpolation
-                minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
-                // add frames already consumed but not yet released by the resampler
-                // because cblk->framesReady() will include these frames
-                minFrames += mAudioMixer->getUnreleasedFrames(track->name());
-                // the minimum track buffer size is normally twice the number of frames necessary
-                // to fill one buffer and the resampler should not leave more than one buffer worth
-                // of unreleased frames after each pass, but just in case...
-                ALOG_ASSERT(minFrames <= cblk->frameCount_);
-            }
+            minFrames = desiredFrames;
         }
-        if ((track->framesReady() >= minFrames) && track->isReady() &&
+        // It's not safe to call framesReady() for a static buffer track, so assume it's ready
+        size_t framesReady;
+        if (track->sharedBuffer() == 0) {
+            framesReady = track->framesReady();
+        } else if (track->isStopped()) {
+            framesReady = 0;
+        } else {
+            framesReady = 1;
+        }
+        if ((framesReady >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
-            ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server,
-                    this);
+            ALOGVV("track %d s=%08x [OK] on thread %p", name, cblk->mServer, this);
 
             mixedTracks++;
 
@@ -2645,7 +2934,8 @@
                     param = AudioMixer::RAMP_VOLUME;
                 }
                 mAudioMixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
-            } else if (cblk->server != 0) {
+            // FIXME should not make a decision based on mServer
+            } else if (cblk->mServer != 0) {
                 // If the track is stopped before the first frame was mixed,
                 // do not apply ramp
                 param = AudioMixer::RAMP_VOLUME;
@@ -2663,7 +2953,7 @@
                 // read original volumes with volume control
                 float typeVolume = mStreamTypes[track->streamType()].volume;
                 float v = masterVolume * typeVolume;
-                ServerProxy *proxy = track->mServerProxy;
+                AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
                 uint32_t vlr = proxy->getVolumeLR();
                 vl = vlr & 0xFFFF;
                 vr = vlr >> 16;
@@ -2690,6 +2980,7 @@
                 }
                 va = (uint32_t)(v * sendLevel);
             }
+
             // Delegate volume control to effect in track effect chain if needed
             if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
                 // Do not ramp volume if volume is controlled by effect
@@ -2736,7 +3027,7 @@
                 AudioMixer::CHANNEL_MASK, (void *)track->channelMask());
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
             uint32_t maxSampleRate = mSampleRate * 2;
-            uint32_t reqSampleRate = track->mServerProxy->getSampleRate();
+            uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
             if (reqSampleRate == 0) {
                 reqSampleRate = mSampleRate;
             } else if (reqSampleRate > maxSampleRate) {
@@ -2767,6 +3058,9 @@
                 mixerStatus = MIXER_TRACKS_READY;
             }
         } else {
+            if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
+                track->mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
+            }
             // clear effect chain input buffer if an active track underruns to avoid sending
             // previous audio buffer again to effects
             chain = getEffectChain_l(track->sessionId());
@@ -2774,8 +3068,7 @@
                 chain->clearInputBuffer();
             }
 
-            ALOGVV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user,
-                    cblk->server, this);
+            ALOGVV("track %d s=%08x [NOT READY] on thread %p", name, cblk->mServer, this);
             if ((track->sharedBuffer() != 0) || track->isTerminated() ||
                     track->isStopped() || track->isPaused()) {
                 // We have consumed all the buffers of this track.
@@ -2791,7 +3084,6 @@
                     tracksToRemove->add(track);
                 }
             } else {
-                track->mUnderrunCount++;
                 // No buffers for this track. Give it a few chances to
                 // fill a buffer, then remove it from active list.
                 if (--(track->mRetryCount) <= 0) {
@@ -2799,7 +3091,7 @@
                     tracksToRemove->add(track);
                     // indicate to client process that the track was disabled because of underrun;
                     // it will then automatically call start() when data is available
-                    android_atomic_or(CBLK_DISABLED, &cblk->flags);
+                    android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
                 // If one track is not ready, mark the mixer also not ready if:
                 //  - the mixer was ready during previous round OR
                 //  - no other track is ready
@@ -2861,30 +3153,13 @@
     }
 
     // remove all the tracks that need to be...
-    count = tracksToRemove->size();
-    if (CC_UNLIKELY(count)) {
-        for (size_t i=0 ; i<count ; i++) {
-            const sp<Track>& track = tracksToRemove->itemAt(i);
-            mActiveTracks.remove(track);
-            if (track->mainBuffer() != mMixBuffer) {
-                chain = getEffectChain_l(track->sessionId());
-                if (chain != 0) {
-                    ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
-                            track->sessionId());
-                    chain->decActiveTrackCnt();
-                }
-            }
-            if (track->isTerminated()) {
-                removeTrack_l(track);
-            }
-        }
-    }
+    removeTracks_l(*tracksToRemove);
 
     // mix buffer must be cleared if all tracks are connected to an
     // effect chain as in this case the mixer will not write to
     // mix buffer and track effects will accumulate into it
-    if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
-            (mixedTracks == 0 && fastTracks > 0)) {
+    if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+            (mixedTracks == 0 && fastTracks > 0))) {
         // FIXME as a performance optimization, should remember previous zero status
         memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
     }
@@ -2948,7 +3223,7 @@
             }
         }
         if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
-            if (value != AUDIO_CHANNEL_OUT_STEREO) {
+            if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) {
                 status = BAD_VALUE;
             } else {
                 reconfig = true;
@@ -3009,10 +3284,8 @@
                                                        keyValuePair.string());
             }
             if (status == NO_ERROR && reconfig) {
-                delete mAudioMixer;
-                // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't)
-                mAudioMixer = NULL;
                 readOutputParameters();
+                delete mAudioMixer;
                 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
                 for (size_t i = 0; i < mTracks.size() ; i++) {
                     int name = getTrackName_l(mTracks[i]->mChannelMask, mTracks[i]->mSessionId);
@@ -3061,7 +3334,7 @@
     write(fd, result.string(), result.size());
 
     // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
-    FastMixerDumpState copy = mFastMixerDumpState;
+    const FastMixerDumpState copy(mFastMixerDumpState);
     copy.dump(fd);
 
 #ifdef STATE_QUEUE_DUMP
@@ -3116,10 +3389,63 @@
 {
 }
 
+AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
+        ThreadBase::type_t type)
+    :   PlaybackThread(audioFlinger, output, id, device, type)
+        // mLeftVolFloat, mRightVolFloat
+{
+}
+
 AudioFlinger::DirectOutputThread::~DirectOutputThread()
 {
 }
 
+void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
+{
+    audio_track_cblk_t* cblk = track->cblk();
+    float left, right;
+
+    if (mMasterMute || mStreamTypes[track->streamType()].mute) {
+        left = right = 0;
+    } else {
+        float typeVolume = mStreamTypes[track->streamType()].volume;
+        float v = mMasterVolume * typeVolume;
+        AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+        uint32_t vlr = proxy->getVolumeLR();
+        float v_clamped = v * (vlr & 0xFFFF);
+        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+        left = v_clamped/MAX_GAIN;
+        v_clamped = v * (vlr >> 16);
+        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+        right = v_clamped/MAX_GAIN;
+    }
+
+    if (lastTrack) {
+        if (left != mLeftVolFloat || right != mRightVolFloat) {
+            mLeftVolFloat = left;
+            mRightVolFloat = right;
+
+            // Convert volumes from float to 8.24
+            uint32_t vl = (uint32_t)(left * (1 << 24));
+            uint32_t vr = (uint32_t)(right * (1 << 24));
+
+            // Delegate volume control to effect in track effect chain if needed
+            // only one effect chain can be present on DirectOutputThread, so if
+            // there is one, the track is connected to it
+            if (!mEffectChains.isEmpty()) {
+                mEffectChains[0]->setVolume_l(&vl, &vr);
+                left = (float)vl / (1 << 24);
+                right = (float)vr / (1 << 24);
+            }
+            if (mOutput->stream->set_volume) {
+                mOutput->stream->set_volume(mOutput->stream, left, right);
+            }
+        }
+    }
+}
+
+
 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
     Vector< sp<Track> > *tracksToRemove
 )
@@ -3146,10 +3472,16 @@
         } else {
             minFrames = 1;
         }
+        // Only consider last track started for volume and mixer state control.
+        // This is the last entry in mActiveTracks unless a track underruns.
+        // As we only care about the transition phase between two tracks on a
+        // direct output, it is not a problem to ignore the underrun case.
+        bool last = (i == (count - 1));
+
         if ((track->framesReady() >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
-            ALOGVV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
+            ALOGVV("track %d s=%08x [OK]", track->name(), cblk->mServer);
 
             if (track->mFillingUpStatus == Track::FS_FILLED) {
                 track->mFillingUpStatus = Track::FS_ACTIVE;
@@ -3160,52 +3492,8 @@
             }
 
             // compute volume for this track
-            float left, right;
-            if (mMasterMute || track->isPausing() || mStreamTypes[track->streamType()].mute) {
-                left = right = 0;
-                if (track->isPausing()) {
-                    track->setPaused();
-                }
-            } else {
-                float typeVolume = mStreamTypes[track->streamType()].volume;
-                float v = mMasterVolume * typeVolume;
-                uint32_t vlr = track->mServerProxy->getVolumeLR();
-                float v_clamped = v * (vlr & 0xFFFF);
-                if (v_clamped > MAX_GAIN) {
-                    v_clamped = MAX_GAIN;
-                }
-                left = v_clamped/MAX_GAIN;
-                v_clamped = v * (vlr >> 16);
-                if (v_clamped > MAX_GAIN) {
-                    v_clamped = MAX_GAIN;
-                }
-                right = v_clamped/MAX_GAIN;
-            }
-            // Only consider last track started for volume and mixer state control.
-            // This is the last entry in mActiveTracks unless a track underruns.
-            // As we only care about the transition phase between two tracks on a
-            // direct output, it is not a problem to ignore the underrun case.
-            if (i == (count - 1)) {
-                if (left != mLeftVolFloat || right != mRightVolFloat) {
-                    mLeftVolFloat = left;
-                    mRightVolFloat = right;
-
-                    // Convert volumes from float to 8.24
-                    uint32_t vl = (uint32_t)(left * (1 << 24));
-                    uint32_t vr = (uint32_t)(right * (1 << 24));
-
-                    // Delegate volume control to effect in track effect chain if needed
-                    // only one effect chain can be present on DirectOutputThread, so if
-                    // there is one, the track is connected to it
-                    if (!mEffectChains.isEmpty()) {
-                        // Do not ramp volume if volume is controlled by effect
-                        mEffectChains[0]->setVolume_l(&vl, &vr);
-                        left = (float)vl / (1 << 24);
-                        right = (float)vr / (1 << 24);
-                    }
-                    mOutput->stream->set_volume(mOutput->stream, left, right);
-                }
-
+            processVolume_l(track, last);
+            if (last) {
                 // reset retry count
                 track->mRetryCount = kMaxTrackRetriesDirect;
                 mActiveTrack = t;
@@ -3218,7 +3506,7 @@
                 mEffectChains[0]->clearInputBuffer();
             }
 
-            ALOGVV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
+            ALOGVV("track %d s=%08x [NOT READY]", track->name(), cblk->mServer);
             if ((track->sharedBuffer() != 0) || track->isTerminated() ||
                     track->isStopped() || track->isPaused()) {
                 // We have consumed all the buffers of this track.
@@ -3239,7 +3527,7 @@
                 if (--(track->mRetryCount) <= 0) {
                     ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
                     tracksToRemove->add(track);
-                } else if (i == (count -1)){
+                } else if (last) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
             }
@@ -3247,35 +3535,21 @@
     }
 
     // remove all the tracks that need to be...
-    count = tracksToRemove->size();
-    if (CC_UNLIKELY(count)) {
-        for (size_t i = 0 ; i < count ; i++) {
-            const sp<Track>& track = tracksToRemove->itemAt(i);
-            mActiveTracks.remove(track);
-            if (!mEffectChains.isEmpty()) {
-                ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(),
-                      track->sessionId());
-                mEffectChains[0]->decActiveTrackCnt();
-            }
-            if (track->isTerminated()) {
-                removeTrack_l(track);
-            }
-        }
-    }
+    removeTracks_l(*tracksToRemove);
 
     return mixerStatus;
 }
 
 void AudioFlinger::DirectOutputThread::threadLoop_mix()
 {
-    AudioBufferProvider::Buffer buffer;
     size_t frameCount = mFrameCount;
     int8_t *curBuf = (int8_t *)mMixBuffer;
     // output audio to hardware
     while (frameCount) {
+        AudioBufferProvider::Buffer buffer;
         buffer.frameCount = frameCount;
         mActiveTrack->getNextBuffer(&buffer);
-        if (CC_UNLIKELY(buffer.raw == NULL)) {
+        if (buffer.raw == NULL) {
             memset(curBuf, 0, frameCount * mFrameSize);
             break;
         }
@@ -3284,10 +3558,10 @@
         curBuf += buffer.frameCount * mFrameSize;
         mActiveTrack->releaseBuffer(&buffer);
     }
+    mCurrentWriteLength = curBuf - (int8_t *)mMixBuffer;
     sleepTime = 0;
     standbyTime = systemTime() + standbyDelay;
     mActiveTrack.clear();
-
 }
 
 void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
@@ -3408,6 +3682,307 @@
 
 // ----------------------------------------------------------------------------
 
+AudioFlinger::AsyncCallbackThread::AsyncCallbackThread(
+        const sp<AudioFlinger::OffloadThread>& offloadThread)
+    :   Thread(false /*canCallJava*/),
+        mOffloadThread(offloadThread),
+        mWriteBlocked(false),
+        mDraining(false)
+{
+}
+
+AudioFlinger::AsyncCallbackThread::~AsyncCallbackThread()
+{
+}
+
+void AudioFlinger::AsyncCallbackThread::onFirstRef()
+{
+    run("Offload Cbk", ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+bool AudioFlinger::AsyncCallbackThread::threadLoop()
+{
+    while (!exitPending()) {
+        bool writeBlocked;
+        bool draining;
+
+        {
+            Mutex::Autolock _l(mLock);
+            mWaitWorkCV.wait(mLock);
+            if (exitPending()) {
+                break;
+            }
+            writeBlocked = mWriteBlocked;
+            draining = mDraining;
+            ALOGV("AsyncCallbackThread mWriteBlocked %d mDraining %d", mWriteBlocked, mDraining);
+        }
+        {
+            sp<AudioFlinger::OffloadThread> offloadThread = mOffloadThread.promote();
+            if (offloadThread != 0) {
+                if (writeBlocked == false) {
+                    offloadThread->setWriteBlocked(false);
+                }
+                if (draining == false) {
+                    offloadThread->setDraining(false);
+                }
+            }
+        }
+    }
+    return false;
+}
+
+void AudioFlinger::AsyncCallbackThread::exit()
+{
+    ALOGV("AsyncCallbackThread::exit");
+    Mutex::Autolock _l(mLock);
+    requestExit();
+    mWaitWorkCV.broadcast();
+}
+
+void AudioFlinger::AsyncCallbackThread::setWriteBlocked(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mWriteBlocked = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+void AudioFlinger::AsyncCallbackThread::setDraining(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mDraining = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+
+// ----------------------------------------------------------------------------
+AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device)
+    :   DirectOutputThread(audioFlinger, output, id, device, OFFLOAD),
+        mHwPaused(false),
+        mPausedBytesRemaining(0)
+{
+    mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
+}
+
+AudioFlinger::OffloadThread::~OffloadThread()
+{
+    mPreviousTrack.clear();
+}
+
+void AudioFlinger::OffloadThread::threadLoop_exit()
+{
+    if (mFlushPending || mHwPaused) {
+        // If a flush is pending or track was paused, just discard buffered data
+        flushHw_l();
+    } else {
+        mMixerStatus = MIXER_DRAIN_ALL;
+        threadLoop_drain();
+    }
+    mCallbackThread->exit();
+    PlaybackThread::threadLoop_exit();
+}
+
+AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTracks_l(
+    Vector< sp<Track> > *tracksToRemove
+)
+{
+    ALOGV("OffloadThread::prepareTracks_l");
+    size_t count = mActiveTracks.size();
+
+    mixer_state mixerStatus = MIXER_IDLE;
+    if (mFlushPending) {
+        flushHw_l();
+        mFlushPending = false;
+    }
+    // find out which tracks need to be processed
+    for (size_t i = 0; i < count; i++) {
+        sp<Track> t = mActiveTracks[i].promote();
+        // The track died recently
+        if (t == 0) {
+            continue;
+        }
+        Track* const track = t.get();
+        audio_track_cblk_t* cblk = track->cblk();
+        if (mPreviousTrack != NULL) {
+            if (t != mPreviousTrack) {
+                // Flush any data still being written from last track
+                mBytesRemaining = 0;
+                if (mPausedBytesRemaining) {
+                    // Last track was paused so we also need to flush saved
+                    // mixbuffer state and invalidate track so that it will
+                    // re-submit that unwritten data when it is next resumed
+                    mPausedBytesRemaining = 0;
+                    // Invalidate is a bit drastic - would be more efficient
+                    // to have a flag to tell client that some of the
+                    // previously written data was lost
+                    mPreviousTrack->invalidate();
+                }
+            }
+        }
+        mPreviousTrack = t;
+        bool last = (i == (count - 1));
+        if (track->isPausing()) {
+            track->setPaused();
+            if (last) {
+                if (!mHwPaused) {
+                    mOutput->stream->pause(mOutput->stream);
+                    mHwPaused = true;
+                }
+                // If we were part way through writing the mixbuffer to
+                // the HAL we must save this until we resume
+                // BUG - this will be wrong if a different track is made active,
+                // in that case we want to discard the pending data in the
+                // mixbuffer and tell the client to present it again when the
+                // track is resumed
+                mPausedWriteLength = mCurrentWriteLength;
+                mPausedBytesRemaining = mBytesRemaining;
+                mBytesRemaining = 0;    // stop writing
+            }
+            tracksToRemove->add(track);
+        } else if (track->framesReady() && track->isReady() &&
+                !track->isPaused() && !track->isTerminated()) {
+            ALOGVV("OffloadThread: track %d s=%08x [OK]", track->name(), cblk->mServer);
+            if (track->mFillingUpStatus == Track::FS_FILLED) {
+                track->mFillingUpStatus = Track::FS_ACTIVE;
+                mLeftVolFloat = mRightVolFloat = 0;
+                if (track->mState == TrackBase::RESUMING) {
+                    if (mPausedBytesRemaining) {
+                        // Need to continue write that was interrupted
+                        mCurrentWriteLength = mPausedWriteLength;
+                        mBytesRemaining = mPausedBytesRemaining;
+                        mPausedBytesRemaining = 0;
+                    }
+                    track->mState = TrackBase::ACTIVE;
+                }
+            }
+
+            if (last) {
+                if (mHwPaused) {
+                    mOutput->stream->resume(mOutput->stream);
+                    mHwPaused = false;
+                    // threadLoop_mix() will handle the case that we need to
+                    // resume an interrupted write
+                }
+                // reset retry count
+                track->mRetryCount = kMaxTrackRetriesOffload;
+                mActiveTrack = t;
+                mixerStatus = MIXER_TRACKS_READY;
+            }
+        } else {
+            ALOGVV("OffloadThread: track %d s=%08x [NOT READY]", track->name(), cblk->mServer);
+            if (track->isStopping_1()) {
+                // Hardware buffer can hold a large amount of audio so we must
+                // wait for all current track's data to drain before we say
+                // that the track is stopped.
+                if (mBytesRemaining == 0) {
+                    // Only start draining when all data in mixbuffer
+                    // has been written
+                    ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
+                    track->mState = TrackBase::STOPPING_2; // so presentation completes after drain
+                    sleepTime = 0;
+                    standbyTime = systemTime() + standbyDelay;
+                    if (last) {
+                        mixerStatus = MIXER_DRAIN_TRACK;
+                        if (mHwPaused) {
+                            // It is possible to move from PAUSED to STOPPING_1 without
+                            // a resume so we must ensure hardware is running
+                            mOutput->stream->resume(mOutput->stream);
+                            mHwPaused = false;
+                        }
+                    }
+                }
+            } else if (track->isStopping_2()) {
+                // Drain has completed, signal presentation complete
+                if (!mDraining || !last) {
+                    track->mState = TrackBase::STOPPED;
+                    size_t audioHALFrames =
+                            (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+                    size_t framesWritten =
+                            mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
+                    track->presentationComplete(framesWritten, audioHALFrames);
+                    track->reset();
+                    tracksToRemove->add(track);
+                }
+            } else {
+                // No buffers for this track. Give it a few chances to
+                // fill a buffer, then remove it from active list.
+                if (--(track->mRetryCount) <= 0) {
+                    ALOGV("OffloadThread: BUFFER TIMEOUT: remove(%d) from active list",
+                          track->name());
+                    tracksToRemove->add(track);
+                } else if (last){
+                    mixerStatus = MIXER_TRACKS_ENABLED;
+                }
+            }
+        }
+        // compute volume for this track
+        processVolume_l(track, last);
+    }
+    // remove all the tracks that need to be...
+    removeTracks_l(*tracksToRemove);
+
+    return mixerStatus;
+}
+
+void AudioFlinger::OffloadThread::flushOutput_l()
+{
+    mFlushPending = true;
+}
+
+// must be called with thread mutex locked
+bool AudioFlinger::OffloadThread::waitingAsyncCallback_l()
+{
+    ALOGV("waitingAsyncCallback_l mWriteBlocked %d mDraining %d", mWriteBlocked, mDraining);
+    if (mUseAsyncWrite && (mWriteBlocked || mDraining)) {
+        return true;
+    }
+    return false;
+}
+
+// must be called with thread mutex locked
+bool AudioFlinger::OffloadThread::shouldStandby_l()
+{
+    bool TrackPaused = false;
+
+    // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
+    // after a timeout and we will enter standby then.
+    if (mTracks.size() > 0) {
+        TrackPaused = mTracks[mTracks.size() - 1]->isPaused();
+    }
+
+    return !mStandby && !TrackPaused;
+}
+
+
+bool AudioFlinger::OffloadThread::waitingAsyncCallback()
+{
+    Mutex::Autolock _l(mLock);
+    return waitingAsyncCallback_l();
+}
+
+void AudioFlinger::OffloadThread::flushHw_l()
+{
+    mOutput->stream->flush(mOutput->stream);
+    // Flush anything still waiting in the mixbuffer
+    mCurrentWriteLength = 0;
+    mBytesRemaining = 0;
+    mPausedWriteLength = 0;
+    mPausedBytesRemaining = 0;
+    if (mUseAsyncWrite) {
+        mWriteBlocked = false;
+        mDraining = false;
+        ALOG_ASSERT(mCallbackThread != 0);
+        mCallbackThread->setWriteBlocked(false);
+        mCallbackThread->setDraining(false);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
         AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
     :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(),
@@ -3434,6 +4009,7 @@
     }
     sleepTime = 0;
     writeFrames = mNormalFrameCount;
+    mCurrentWriteLength = mixBufferSize;
     standbyTime = systemTime() + standbyDelay;
 }
 
@@ -3457,12 +4033,12 @@
     }
 }
 
-void AudioFlinger::DuplicatingThread::threadLoop_write()
+ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
 {
     for (size_t i = 0; i < outputTracks.size(); i++) {
         outputTracks[i]->write(mMixBuffer, writeFrames);
     }
-    mBytesWritten += mixBufferSize;
+    return (ssize_t)mixBufferSize;
 }
 
 void AudioFlinger::DuplicatingThread::threadLoop_standby()
@@ -3583,7 +4159,7 @@
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
     mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
-    // mRsmpInIndex and mInputBytes set by readInputParameters()
+    // mRsmpInIndex and mBufferSize set by readInputParameters()
     mReqChannelCount(popcount(channelMask)),
     mReqSampleRate(sampleRate)
     // mBytesRead is only meaningful while active, and so is cleared in start()
@@ -3656,7 +4232,10 @@
                 continue;
             }
             if (mActiveTrack != 0) {
-                if (mActiveTrack->mState == TrackBase::PAUSING) {
+                if (mActiveTrack->isTerminated()) {
+                    removeTrack_l(mActiveTrack);
+                    mActiveTrack.clear();
+                } else if (mActiveTrack->mState == TrackBase::PAUSING) {
                     standby();
                     mActiveTrack.clear();
                     mStartStopCond.broadcast();
@@ -3675,9 +4254,6 @@
                         mStartStopCond.broadcast();
                     }
                     mStandby = false;
-                } else if (mActiveTrack->mState == TrackBase::TERMINATED) {
-                    removeTrack_l(mActiveTrack);
-                    mActiveTrack.clear();
                 }
             }
             lockEffectChains_l(effectChains);
@@ -3695,7 +4271,8 @@
             }
 
             buffer.frameCount = mFrameCount;
-            if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+            status_t status = mActiveTrack->getNextBuffer(&buffer);
+            if (status == NO_ERROR) {
                 readOnce = true;
                 size_t framesOut = buffer.frameCount;
                 if (mResampler == NULL) {
@@ -3710,8 +4287,7 @@
                                 framesIn = framesOut;
                             mRsmpInIndex += framesIn;
                             framesOut -= framesIn;
-                            if (mChannelCount == mReqChannelCount ||
-                                mFormat != AUDIO_FORMAT_PCM_16_BIT) {
+                            if (mChannelCount == mReqChannelCount) {
                                 memcpy(dst, src, framesIn * mFrameSize);
                             } else {
                                 if (mChannelCount == 1) {
@@ -3725,9 +4301,7 @@
                         }
                         if (framesOut && mFrameCount == mRsmpInIndex) {
                             void *readInto;
-                            if (framesOut == mFrameCount &&
-                                (mChannelCount == mReqChannelCount ||
-                                        mFormat != AUDIO_FORMAT_PCM_16_BIT)) {
+                            if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) {
                                 readInto = buffer.raw;
                                 framesOut = 0;
                             } else {
@@ -3735,7 +4309,7 @@
                                 mRsmpInIndex = 0;
                             }
                             mBytesRead = mInput->stream->read(mInput->stream, readInto,
-                                    mInputBytes);
+                                    mBufferSize);
                             if (mBytesRead <= 0) {
                                 if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE))
                                 {
@@ -3760,7 +4334,8 @@
                 } else {
                     // resampling
 
-                    memset(mRsmpOutBuffer, 0, framesOut * 2 * sizeof(int32_t));
+                    // resampler accumulates, but we only have one source track
+                    memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
                     // alter output frame count as if we were expecting stereo samples
                     if (mChannelCount == 1 && mReqChannelCount == 1) {
                         framesOut >>= 1;
@@ -3770,6 +4345,7 @@
                     // ditherAndClamp() works as long as all buffers returned by
                     // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true.
                     if (mChannelCount == 2 && mReqChannelCount == 1) {
+                        // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t
                         ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
                         // the resampler always outputs stereo samples:
                         // do post stereo to mono conversion
@@ -3778,6 +4354,7 @@
                     } else {
                         ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
                     }
+                    // now done with mRsmpOutBuffer
 
                 }
                 if (mFramestoDrop == 0) {
@@ -3856,7 +4433,7 @@
         audio_channel_mask_t channelMask,
         size_t frameCount,
         int sessionId,
-        IAudioFlinger::track_flags_t flags,
+        IAudioFlinger::track_flags_t *flags,
         pid_t tid,
         status_t *status)
 {
@@ -3869,6 +4446,57 @@
         goto Exit;
     }
 
+    // client expresses a preference for FAST, but we get the final say
+    if (*flags & IAudioFlinger::TRACK_FAST) {
+      if (
+            // use case: callback handler and frame count is default or at least as large as HAL
+            (
+                (tid != -1) &&
+                ((frameCount == 0) ||
+                (frameCount >= (mFrameCount * kFastTrackMultiplier)))
+            ) &&
+            // FIXME when record supports non-PCM data, also check for audio_is_linear_pcm(format)
+            // mono or stereo
+            ( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
+              (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
+            // hardware sample rate
+            (sampleRate == mSampleRate) &&
+            // record thread has an associated fast recorder
+            hasFastRecorder()
+            // FIXME test that RecordThread for this fast track has a capable output HAL
+            // FIXME add a permission test also?
+        ) {
+        // if frameCount not specified, then it defaults to fast recorder (HAL) frame count
+        if (frameCount == 0) {
+            frameCount = mFrameCount * kFastTrackMultiplier;
+        }
+        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+                frameCount, mFrameCount);
+      } else {
+        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d "
+                "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
+                "hasFastRecorder=%d tid=%d",
+                frameCount, mFrameCount, format,
+                audio_is_linear_pcm(format),
+                channelMask, sampleRate, mSampleRate, hasFastRecorder(), tid);
+        *flags &= ~IAudioFlinger::TRACK_FAST;
+        // For compatibility with AudioRecord calculation, buffer depth is forced
+        // to be at least 2 x the record thread frame count and cover audio hardware latency.
+        // This is probably too conservative, but legacy application code may depend on it.
+        // If you change this calculation, also review the start threshold which is related.
+        uint32_t latencyMs = 50; // FIXME mInput->stream->get_latency(mInput->stream);
+        size_t mNormalFrameCount = 2048; // FIXME
+        uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
+        if (minBufCount < 2) {
+            minBufCount = 2;
+        }
+        size_t minFrameCount = mNormalFrameCount * minBufCount;
+        if (frameCount < minFrameCount) {
+            frameCount = minFrameCount;
+        }
+      }
+    }
+
     // FIXME use flags and tid similar to createTrack_l()
 
     { // scope for mLock
@@ -3888,6 +4516,13 @@
                         mAudioFlinger->btNrecIsOff();
         setEffectSuspended_l(FX_IID_AEC, suspend, sessionId);
         setEffectSuspended_l(FX_IID_NS, suspend, sessionId);
+
+        if ((*flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) {
+            pid_t callingPid = IPCThreadState::self()->getCallingPid();
+            // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
+            // so ask activity manager to do this on our behalf
+            sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
+        }
     }
     lStatus = NO_ERROR;
 
@@ -3969,6 +4604,7 @@
         ALOGV("Record started OK");
         return status;
     }
+
 startError:
     AudioSystem::stopInput(mId);
     clearSyncStartEvent();
@@ -4003,8 +4639,9 @@
     }
 }
 
-bool AudioFlinger::RecordThread::stop_l(RecordThread::RecordTrack* recordTrack) {
+bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
     ALOGV("RecordThread::stop");
+    AutoMutex _l(mLock);
     if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) {
         return false;
     }
@@ -4055,7 +4692,8 @@
 // destroyTrack_l() must be called with ThreadBase::mLock held
 void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track)
 {
-    track->mState = TrackBase::TERMINATED;
+    track->terminate();
+    track->mState = TrackBase::STOPPED;
     // active tracks are removed by threadLoop()
     if (mActiveTrack != track) {
         removeTrack_l(track);
@@ -4087,7 +4725,7 @@
     if (mActiveTrack != 0) {
         snprintf(buffer, SIZE, "In index: %d\n", mRsmpInIndex);
         result.append(buffer);
-        snprintf(buffer, SIZE, "In size: %d\n", mInputBytes);
+        snprintf(buffer, SIZE, "Buffer size: %u bytes\n", mBufferSize);
         result.append(buffer);
         snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL));
         result.append(buffer);
@@ -4140,7 +4778,7 @@
     int channelCount;
 
     if (framesReady == 0) {
-        mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mInputBytes);
+        mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mBufferSize);
         if (mBytesRead <= 0) {
             if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) {
                 ALOGE("RecordThread::getNextBuffer() Error reading audio input");
@@ -4196,8 +4834,12 @@
             reconfig = true;
         }
         if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
-            reqFormat = (audio_format_t) value;
-            reconfig = true;
+            if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
+                status = BAD_VALUE;
+            } else {
+                reqFormat = (audio_format_t) value;
+                reconfig = true;
+            }
         }
         if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
             reqChannelCount = popcount(value);
@@ -4289,16 +4931,13 @@
 
 String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
 {
-    char *s;
-    String8 out_s8 = String8();
-
     Mutex::Autolock _l(mLock);
     if (initCheck() != NO_ERROR) {
-        return out_s8;
+        return String8();
     }
 
-    s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
-    out_s8 = String8(s);
+    char *s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
+    const String8 out_s8(s);
     free(s);
     return out_s8;
 }
@@ -4310,7 +4949,7 @@
     switch (event) {
     case AudioSystem::INPUT_OPENED:
     case AudioSystem::INPUT_CONFIG_CHANGED:
-        desc.channels = mChannelMask;
+        desc.channelMask = mChannelMask;
         desc.samplingRate = mSampleRate;
         desc.format = mFormat;
         desc.frameCount = mFrameCount;
@@ -4336,12 +4975,14 @@
 
     mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
     mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
-    mChannelCount = (uint16_t)popcount(mChannelMask);
+    mChannelCount = popcount(mChannelMask);
     mFormat = mInput->stream->common.get_format(&mInput->stream->common);
+    if (mFormat != AUDIO_FORMAT_PCM_16_BIT) {
+        ALOGE("HAL format %d not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat);
+    }
     mFrameSize = audio_stream_frame_size(&mInput->stream->common);
-    mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
-    mFrameCount = mInputBytes / mFrameSize;
-    mNormalFrameCount = mFrameCount; // not used by record, but used by input effects
+    mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
+    mFrameCount = mBufferSize / mFrameSize;
     mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
 
     if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
@@ -4357,7 +4998,7 @@
         mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
         mResampler->setSampleRate(mSampleRate);
         mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN);
-        mRsmpOutBuffer = new int32_t[mFrameCount * 2];
+        mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2];
 
         // optmization: if mono to mono, alter input frame count as if we were inputing
         // stereo samples
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7de6872..31d5323 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -28,7 +28,8 @@
         MIXER,              // Thread class is MixerThread
         DIRECT,             // Thread class is DirectOutputThread
         DUPLICATING,        // Thread class is DuplicatingThread
-        RECORD              // Thread class is RecordThread
+        RECORD,             // Thread class is RecordThread
+        OFFLOAD             // Thread class is OffloadThread
     };
 
     ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
@@ -125,10 +126,9 @@
                 audio_channel_mask_t channelMask() const { return mChannelMask; }
                 audio_format_t format() const { return mFormat; }
                 // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
-                // and returns the normal mix buffer's frame count.
-                size_t      frameCount() const { return mNormalFrameCount; }
-                // Return's the HAL's frame count i.e. fast mixer buffer size.
-                size_t      frameCountHAL() const { return mFrameCount; }
+                // and returns the [normal mix] buffer's frame count.
+    virtual     size_t      frameCount() const = 0;
+                size_t      frameSize() const { return mFrameSize; }
 
     // Should be "virtual status_t requestExitAndWait()" and override same
     // method in Thread, but Thread::requestExitAndWait() is not yet virtual.
@@ -184,6 +184,8 @@
                 void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains);
                 // unlock effect chains after process
                 void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains);
+                // get a copy of mEffectChains vector
+                Vector< sp<EffectChain> > getEffectChains_l() const { return mEffectChains; };
                 // set audio mode to all effect chains
                 void setMode(audio_mode_t mode);
                 // get effect module with corresponding ID on specified audio session
@@ -259,11 +261,13 @@
                 Condition               mWaitWorkCV;
 
                 const sp<AudioFlinger>  mAudioFlinger;
+
+                // updated by PlaybackThread::readOutputParameters() or
+                // RecordThread::readInputParameters()
                 uint32_t                mSampleRate;
                 size_t                  mFrameCount;       // output HAL, direct output, record
-                size_t                  mNormalFrameCount; // normal mixer and effects
                 audio_channel_mask_t    mChannelMask;
-                uint16_t                mChannelCount;
+                uint32_t                mChannelCount;
                 size_t                  mFrameSize;
                 audio_format_t          mFormat;
 
@@ -290,6 +294,7 @@
                 Vector<String8>         mNewParameters;
                 status_t                mParamStatus;
 
+                // vector owns each ConfigEvent *, so must delete after removing
                 Vector<ConfigEvent *>     mConfigEvents;
 
                 // These fields are written and read by thread itself without lock or barrier,
@@ -328,11 +333,19 @@
     enum mixer_state {
         MIXER_IDLE,             // no active tracks
         MIXER_TRACKS_ENABLED,   // at least one active track, but no track has any data ready
-        MIXER_TRACKS_READY      // at least one active track, and at least one track has data
+        MIXER_TRACKS_READY,      // at least one active track, and at least one track has data
+        MIXER_DRAIN_TRACK,      // drain currently playing track
+        MIXER_DRAIN_ALL,        // fully drain the hardware
         // standby mode does not have an enum value
         // suspend by audio policy manager is orthogonal to mixer state
     };
 
+    // retry count before removing active track in case of underrun on offloaded thread:
+    // we need to make sure that AudioTrack client has enough time to send large buffers
+//FIXME may be more appropriate if expressed in time units. Need to revise how underrun is handled
+    // for offloaded tracks
+    static const int8_t kMaxTrackRetriesOffload = 20;
+
     PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
                    audio_io_handle_t id, audio_devices_t device, type_t type);
     virtual             ~PlaybackThread();
@@ -350,8 +363,10 @@
     // Code snippets that were lifted up out of threadLoop()
     virtual     void        threadLoop_mix() = 0;
     virtual     void        threadLoop_sleepTime() = 0;
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
+    virtual     void        threadLoop_drain();
     virtual     void        threadLoop_standby();
+    virtual     void        threadLoop_exit();
     virtual     void        threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
 
                 // prepareTracks_l reads and writes mActiveTracks, and returns
@@ -359,6 +374,19 @@
                 // is responsible for clearing or destroying this Vector later on, when it
                 // is safe to do so. That will drop the final ref count and destroy the tracks.
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
+                void        removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
+
+                void        writeCallback();
+                void        setWriteBlocked(bool value);
+                void        drainCallback();
+                void        setDraining(bool value);
+
+    static      int         asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+
+    virtual     bool        waitingAsyncCallback();
+    virtual     bool        waitingAsyncCallback_l();
+    virtual     bool        shouldStandby_l();
+
 
     // ThreadBase virtuals
     virtual     void        preExit();
@@ -429,11 +457,21 @@
 
                 virtual status_t setSyncEvent(const sp<SyncEvent>& event);
                 virtual bool     isValidSyncEvent(const sp<SyncEvent>& event) const;
+
+                // called with AudioFlinger lock held
                         void     invalidateTracks(audio_stream_type_t streamType);
 
+    virtual     size_t      frameCount() const { return mNormalFrameCount; }
+
+                // Return's the HAL's frame count i.e. fast mixer buffer size.
+                size_t      frameCountHAL() const { return mFrameCount; }
 
 protected:
-    int16_t*                        mMixBuffer;
+    // updated by readOutputParameters()
+    size_t                          mNormalFrameCount;  // normal mixer and effects
+
+    int16_t*                        mMixBuffer;         // frame size aligned mix buffer
+    int8_t*                         mAllocMixBuffer;    // mixer buffer allocation address
 
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
@@ -486,8 +524,9 @@
     PlaybackThread& operator = (const PlaybackThread&);
 
     status_t    addTrack_l(const sp<Track>& track);
-    void        destroyTrack_l(const sp<Track>& track);
+    bool        destroyTrack_l(const sp<Track>& track);
     void        removeTrack_l(const sp<Track>& track);
+    void        signal_l();
 
     void        readOutputParameters();
 
@@ -535,6 +574,14 @@
     // DUPLICATING only
     uint32_t                        writeFrames;
 
+    size_t                          mBytesRemaining;
+    size_t                          mCurrentWriteLength;
+    bool                            mUseAsyncWrite;
+    bool                            mWriteBlocked;
+    bool                            mDraining;
+    bool                            mSignalPending;
+    sp<AsyncCallbackThread>         mCallbackThread;
+
 private:
     // The HAL output sink is treated as non-blocking, but current implementation is blocking
     sp<NBAIO_Sink>          mOutputSink;
@@ -558,7 +605,7 @@
 protected:
                 // accessed by both binder threads and within threadLoop(), lock on mutex needed
                 unsigned    mFastTrackAvailMask;    // bit i set if fast track [i] is available
-
+    virtual     void        flushOutput_l();
 };
 
 class MixerThread : public PlaybackThread {
@@ -584,7 +631,7 @@
     virtual     void        cacheParameters_l();
 
     // threadLoop snippets
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
@@ -641,17 +688,73 @@
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
 
-private:
     // volumes last sent to audio HAL with stream->set_volume()
     float mLeftVolFloat;
     float mRightVolFloat;
 
+    DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                        audio_io_handle_t id, uint32_t device, ThreadBase::type_t type);
+    void processVolume_l(Track *track, bool lastTrack);
+
     // prepareTracks_l() tells threadLoop_mix() the name of the single active track
     sp<Track>               mActiveTrack;
 public:
     virtual     bool        hasFastMixer() const { return false; }
 };
 
+class OffloadThread : public DirectOutputThread {
+public:
+
+    OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                        audio_io_handle_t id, uint32_t device);
+    virtual                 ~OffloadThread();
+
+protected:
+    // threadLoop snippets
+    virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
+    virtual     void        threadLoop_exit();
+    virtual     void        flushOutput_l();
+
+    virtual     bool        waitingAsyncCallback();
+    virtual     bool        waitingAsyncCallback_l();
+    virtual     bool        shouldStandby_l();
+
+private:
+                void        flushHw_l();
+
+private:
+    bool        mHwPaused;
+    bool        mFlushPending;
+    size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
+    size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
+    sp<Track>   mPreviousTrack;         // used to detect track switch
+};
+
+class AsyncCallbackThread : public Thread {
+public:
+
+    AsyncCallbackThread(const sp<OffloadThread>& offloadThread);
+
+    virtual             ~AsyncCallbackThread();
+
+    // Thread virtuals
+    virtual bool        threadLoop();
+
+    // RefBase
+    virtual void        onFirstRef();
+
+            void        exit();
+            void        setWriteBlocked(bool value);
+            void        setDraining(bool value);
+
+private:
+    wp<OffloadThread>   mOffloadThread;
+    bool                mWriteBlocked;
+    bool                mDraining;
+    Condition           mWaitWorkCV;
+    Mutex               mLock;
+};
+
 class DuplicatingThread : public MixerThread {
 public:
     DuplicatingThread(const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
@@ -671,7 +774,7 @@
     // threadLoop snippets
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
     virtual     void        cacheParameters_l();
 
@@ -734,7 +837,7 @@
                     audio_channel_mask_t channelMask,
                     size_t frameCount,
                     int sessionId,
-                    IAudioFlinger::track_flags_t flags,
+                    IAudioFlinger::track_flags_t *flags,
                     pid_t tid,
                     status_t *status);
 
@@ -744,7 +847,7 @@
 
             // ask the thread to stop the specified track, and
             // return true if the caller should then do it's part of the stopping process
-            bool        stop_l(RecordTrack* recordTrack);
+            bool        stop(RecordTrack* recordTrack);
 
             void        dump(int fd, const Vector<String16>& args);
             AudioStreamIn* clearInput();
@@ -775,6 +878,9 @@
     static void syncStartEventCallback(const wp<SyncEvent>& event);
            void handleSyncStartEvent(const sp<SyncEvent>& event);
 
+    virtual size_t      frameCount() const { return mFrameCount; }
+            bool        hasFastRecorder() const { return false; }
+
 private:
             void clearSyncStartEvent();
 
@@ -790,11 +896,14 @@
             // is used together with mStartStopCond to indicate start()/stop() progress
             sp<RecordTrack>                     mActiveTrack;
             Condition                           mStartStopCond;
+
+            // updated by RecordThread::readInputParameters()
             AudioResampler                      *mResampler;
+            // interleaved stereo pairs of fixed-point signed Q19.12
             int32_t                             *mRsmpOutBuffer;
-            int16_t                             *mRsmpInBuffer;
+            int16_t                             *mRsmpInBuffer; // [mFrameCount * mChannelCount]
             size_t                              mRsmpInIndex;
-            size_t                              mInputBytes;
+            size_t                              mBufferSize;    // stream buffer size for read()
             const uint32_t                      mReqChannelCount;
             const uint32_t                      mReqSampleRate;
             ssize_t                             mBytesRead;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index fac7071..523e4b2 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -25,10 +25,10 @@
 public:
     enum track_state {
         IDLE,
-        TERMINATED,
         FLUSHED,
         STOPPED,
-        // next 2 states are currently used for fast tracks only
+        // next 2 states are currently used for fast tracks
+        // and offloaded tracks only
         STOPPING_1,     // waiting for first underrun
         STOPPING_2,     // waiting for presentation complete
         RESUMING,
@@ -74,7 +74,7 @@
 
     audio_channel_mask_t channelMask() const { return mChannelMask; }
 
-    uint32_t sampleRate() const; // FIXME inline after cblk sr moved
+    virtual uint32_t sampleRate() const { return mSampleRate; }
 
     // Return a pointer to the start of a contiguous slice of the track buffer.
     // Parameter 'offset' is the requested start position, expressed in
@@ -89,7 +89,7 @@
         return (mState == STOPPED || mState == FLUSHED);
     }
 
-    // for fast tracks only
+    // for fast tracks and offloaded tracks only
     bool isStopping() const {
         return mState == STOPPING_1 || mState == STOPPING_2;
     }
@@ -101,11 +101,12 @@
     }
 
     bool isTerminated() const {
-        return mState == TERMINATED;
+        return mTerminated;
     }
 
-    bool step();    // mStepCount is an implicit input
-    void reset();
+    void terminate() {
+        mTerminated = true;
+    }
 
     bool isOut() const { return mIsOut; }
                                     // true for Track and TimedTrack, false for RecordTrack,
@@ -117,24 +118,19 @@
     audio_track_cblk_t* mCblk;
     void*               mBuffer;    // start of track buffer, typically in shared memory
                                     // except for OutputTrack when it is in local memory
-    void*               mBufferEnd; // &mBuffer[mFrameCount * frameSize], where frameSize
-                                    //   is based on mChannelCount and 16-bit samples
-    uint32_t            mStepCount; // saves AudioBufferProvider::Buffer::frameCount as of
-                                    // time of releaseBuffer() for later use by step()
     // we don't really need a lock for these
     track_state         mState;
     const uint32_t      mSampleRate;    // initial sample rate only; for tracks which
                         // support dynamic rates, the current value is in control block
     const audio_format_t mFormat;
     const audio_channel_mask_t mChannelMask;
-    const uint8_t       mChannelCount;
+    const uint32_t      mChannelCount;
     const size_t        mFrameSize; // AudioFlinger's view of frame size in shared memory,
                                     // where for AudioTrack (but not AudioRecord),
                                     // 8-bit PCM samples are stored as 16-bit
     const size_t        mFrameCount;// size of track buffer given at createTrack() or
                                     // openRecord(), and then adjusted as needed
 
-    bool                mStepServerFailed;
     const int           mSessionId;
     Vector < sp<SyncEvent> >mSyncEvents;
     const bool          mIsOut;
@@ -142,4 +138,5 @@
     const int           mId;
     sp<NBAIO_Sink>      mTeeSink;
     sp<NBAIO_Source>    mTeeSource;
+    bool                mTerminated;
 };
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 5ac3129..e676365 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -19,8 +19,8 @@
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
 
+#include "Configuration.h"
 #include <math.h>
-#include <cutils/compiler.h>
 #include <utils/Log.h>
 
 #include <private/media/AudioTrackShared.h>
@@ -74,8 +74,6 @@
         mClient(client),
         mCblk(NULL),
         // mBuffer
-        // mBufferEnd
-        mStepCount(0),
         mState(IDLE),
         mSampleRate(sampleRate),
         mFormat(format),
@@ -84,11 +82,11 @@
         mFrameSize(audio_is_linear_pcm(format) ?
                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
         mFrameCount(frameCount),
-        mStepServerFailed(false),
         mSessionId(sessionId),
         mIsOut(isOut),
         mServerProxy(NULL),
-        mId(android_atomic_inc(&nextTrackId))
+        mId(android_atomic_inc(&nextTrackId)),
+        mTerminated(false)
 {
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -98,7 +96,7 @@
 
     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
     size_t size = sizeof(audio_track_cblk_t);
-    size_t bufferSize = frameCount * mFrameSize;
+    size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
     if (sharedBuffer == 0) {
         size += bufferSize;
     }
@@ -124,22 +122,15 @@
         new(mCblk) audio_track_cblk_t();
         // clear all buffers
         mCblk->frameCount_ = frameCount;
-// uncomment the following lines to quickly test 32-bit wraparound
-//      mCblk->user = 0xffff0000;
-//      mCblk->server = 0xffff0000;
-//      mCblk->userBase = 0xffff0000;
-//      mCblk->serverBase = 0xffff0000;
         if (sharedBuffer == 0) {
             mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
             memset(mBuffer, 0, bufferSize);
-            // Force underrun condition to avoid false underrun callback until first data is
-            // written to buffer (other flags are cleared)
-            mCblk->flags = CBLK_UNDERRUN;
         } else {
             mBuffer = sharedBuffer->pointer();
+#if 0
+            mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
+#endif
         }
-        mBufferEnd = (uint8_t *)mBuffer + bufferSize;
-        mServerProxy = new ServerProxy(mCblk, mBuffer, frameCount, mFrameSize, isOut);
 
 #ifdef TEE_SINK
         if (mTeeSinkTrackEnabled) {
@@ -199,51 +190,12 @@
     }
 #endif
 
-    buffer->raw = NULL;
-    mStepCount = buffer->frameCount;
-    // FIXME See note at getNextBuffer()
-    (void) step();      // ignore return value of step()
+    ServerProxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    buf.mRaw = buffer->raw;
     buffer->frameCount = 0;
-}
-
-bool AudioFlinger::ThreadBase::TrackBase::step() {
-    bool result = mServerProxy->step(mStepCount);
-    if (!result) {
-        ALOGV("stepServer failed acquiring cblk mutex");
-        mStepServerFailed = true;
-    }
-    return result;
-}
-
-void AudioFlinger::ThreadBase::TrackBase::reset() {
-    audio_track_cblk_t* cblk = this->cblk();
-
-    cblk->user = 0;
-    cblk->server = 0;
-    cblk->userBase = 0;
-    cblk->serverBase = 0;
-    mStepServerFailed = false;
-    ALOGV("TrackBase::reset");
-}
-
-uint32_t AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
-    return mServerProxy->getSampleRate();
-}
-
-void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
-    audio_track_cblk_t* cblk = this->cblk();
-    int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase) * mFrameSize;
-    int8_t *bufferEnd = bufferStart + frames * mFrameSize;
-
-    // Check validity of returned pointer in case the track control block would have been corrupted.
-    ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
-            "TrackBase::getBuffer buffer out of range:\n"
-                "    start: %p, end %p , mBuffer %p mBufferEnd %p\n"
-                "    server %u, serverBase %u, user %u, userBase %u, frameSize %u",
-                bufferStart, bufferEnd, mBuffer, mBufferEnd,
-                cblk->server, cblk->serverBase, cblk->user, cblk->userBase, mFrameSize);
-
-    return bufferStart;
+    buffer->raw = NULL;
+    mServerProxy->releaseBuffer(&buf);
 }
 
 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
@@ -327,6 +279,10 @@
         xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
 }
 
+status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
+    return mTrack->setParameters(keyValuePairs);
+}
+
 status_t AudioFlinger::TrackHandle::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
@@ -360,20 +316,29 @@
     mPresentationCompleteFrames(0),
     mFlags(flags),
     mFastIndex(-1),
-    mUnderrunCount(0),
     mCachedVolume(1.0),
-    mIsInvalid(false)
+    mIsInvalid(false),
+    mAudioTrackServerProxy(NULL),
+    mResumeToStopping(false)
 {
     if (mCblk != NULL) {
+        if (sharedBuffer == 0) {
+            mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
+                    mFrameSize);
+        } else {
+            mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
+                    mFrameSize);
+        }
+        mServerProxy = mAudioTrackServerProxy;
         // to avoid leaking a track name, do not allocate one unless there is an mCblk
         mName = thread->getTrackName_l(channelMask, sessionId);
-        mCblk->mName = mName;
         if (mName < 0) {
             ALOGE("no more track names available");
             return;
         }
         // only allocate a fast track index if we were able to allocate a normal track name
         if (flags & IAudioFlinger::TRACK_FAST) {
+            mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
             ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
             int i = __builtin_ctz(thread->mFastTrackAvailMask);
             ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
@@ -382,7 +347,6 @@
             //       this means we are potentially denying other more important fast tracks from
             //       being created.  It would be better to allocate the index dynamically.
             mFastIndex = i;
-            mCblk->mName = i;
             // Read the initial underruns because this field is never cleared by the fast mixer
             mObservedUnderruns = thread->getFastTrackUnderruns(i);
             thread->mFastTrackAvailMask &= ~(1 << i);
@@ -411,33 +375,25 @@
     { // scope for mLock
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            if (!isOutputTrack()) {
-                if (mState == ACTIVE || mState == RESUMING) {
-                    AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-
-#ifdef ADD_BATTERY_DATA
-                    // to track the speaker usage
-                    addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-                }
-                AudioSystem::releaseOutput(thread->id());
-            }
             Mutex::Autolock _l(thread->mLock);
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-            playbackThread->destroyTrack_l(this);
+            bool wasActive = playbackThread->destroyTrack_l(this);
+            if (!isOutputTrack() && !wasActive) {
+                AudioSystem::releaseOutput(thread->id());
+            }
         }
     }
 }
 
 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
 {
-    result.append("   Name Client Type Fmt Chn mask   Session StpCnt fCount S F SRate  "
-                  "L dB  R dB    Server      User     Main buf    Aux Buf  Flags Underruns\n");
+    result.append("   Name Client Type Fmt Chn mask Session fCount S F SRate  "
+                  "L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt\n");
 }
 
 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
 {
-    uint32_t vlr = mServerProxy->getVolumeLR();
+    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
     if (isFastTrack()) {
         sprintf(buffer, "   F %2d", mFastIndex);
     } else {
@@ -445,40 +401,41 @@
     }
     track_state state = mState;
     char stateChar;
-    switch (state) {
-    case IDLE:
-        stateChar = 'I';
-        break;
-    case TERMINATED:
+    if (isTerminated()) {
         stateChar = 'T';
-        break;
-    case STOPPING_1:
-        stateChar = 's';
-        break;
-    case STOPPING_2:
-        stateChar = '5';
-        break;
-    case STOPPED:
-        stateChar = 'S';
-        break;
-    case RESUMING:
-        stateChar = 'R';
-        break;
-    case ACTIVE:
-        stateChar = 'A';
-        break;
-    case PAUSING:
-        stateChar = 'p';
-        break;
-    case PAUSED:
-        stateChar = 'P';
-        break;
-    case FLUSHED:
-        stateChar = 'F';
-        break;
-    default:
-        stateChar = '?';
-        break;
+    } else {
+        switch (state) {
+        case IDLE:
+            stateChar = 'I';
+            break;
+        case STOPPING_1:
+            stateChar = 's';
+            break;
+        case STOPPING_2:
+            stateChar = '5';
+            break;
+        case STOPPED:
+            stateChar = 'S';
+            break;
+        case RESUMING:
+            stateChar = 'R';
+            break;
+        case ACTIVE:
+            stateChar = 'A';
+            break;
+        case PAUSING:
+            stateChar = 'p';
+            break;
+        case PAUSED:
+            stateChar = 'P';
+            break;
+        case FLUSHED:
+            stateChar = 'F';
+            break;
+        default:
+            stateChar = '?';
+            break;
+        }
     }
     char nowInUnderrun;
     switch (mObservedUnderruns.mBitFields.mMostRecent) {
@@ -495,76 +452,45 @@
         nowInUnderrun = '?';
         break;
     }
-    snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g  "
-            "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
+    snprintf(&buffer[7], size-7, " %6u %4u %3u %08X %7u %6u %1c %1d %5u %5.2g %5.2g  "
+                                 "%08X %08X %08X 0x%03X %9u%c\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mStreamType,
             mFormat,
             mChannelMask,
             mSessionId,
-            mStepCount,
             mFrameCount,
             stateChar,
             mFillingUpStatus,
-            mServerProxy->getSampleRate(),
+            mAudioTrackServerProxy->getSampleRate(),
             20.0 * log10((vlr & 0xFFFF) / 4096.0),
             20.0 * log10((vlr >> 16) / 4096.0),
-            mCblk->server,
-            mCblk->user,
+            mCblk->mServer,
             (int)mMainBuffer,
             (int)mAuxBuffer,
-            mCblk->flags,
-            mUnderrunCount,
+            mCblk->mFlags,
+            mAudioTrackServerProxy->getUnderrunFrames(),
             nowInUnderrun);
 }
 
+uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
+    return mAudioTrackServerProxy->getSampleRate();
+}
+
 // AudioBufferProvider interface
 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
         AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
-    audio_track_cblk_t* cblk = this->cblk();
-    uint32_t framesReady;
-    uint32_t framesReq = buffer->frameCount;
-
-    // Check if last stepServer failed, try to step now
-    if (mStepServerFailed) {
-        // FIXME When called by fast mixer, this takes a mutex with tryLock().
-        //       Since the fast mixer is higher priority than client callback thread,
-        //       it does not result in priority inversion for client.
-        //       But a non-blocking solution would be preferable to avoid
-        //       fast mixer being unable to tryLock(), and
-        //       to avoid the extra context switches if the client wakes up,
-        //       discovers the mutex is locked, then has to wait for fast mixer to unlock.
-        if (!step())  goto getNextBuffer_exit;
-        ALOGV("stepServer recovered");
-        mStepServerFailed = false;
+    ServerProxy::Buffer buf;
+    size_t desiredFrames = buffer->frameCount;
+    buf.mFrameCount = desiredFrames;
+    status_t status = mServerProxy->obtainBuffer(&buf);
+    buffer->frameCount = buf.mFrameCount;
+    buffer->raw = buf.mRaw;
+    if (buf.mFrameCount == 0) {
+        mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
     }
-
-    // FIXME Same as above
-    framesReady = mServerProxy->framesReady();
-
-    if (CC_LIKELY(framesReady)) {
-        uint32_t s = cblk->server;
-        uint32_t bufferEnd = cblk->serverBase + mFrameCount;
-
-        bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
-        if (framesReq > framesReady) {
-            framesReq = framesReady;
-        }
-        if (framesReq > bufferEnd - s) {
-            framesReq = bufferEnd - s;
-        }
-
-        buffer->raw = getBuffer(s, framesReq);
-        buffer->frameCount = framesReq;
-        return NO_ERROR;
-    }
-
-getNextBuffer_exit:
-    buffer->raw = NULL;
-    buffer->frameCount = 0;
-    ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
-    return NOT_ENOUGH_DATA;
+    return status;
 }
 
 // Note that framesReady() takes a mutex on the control block using tryLock().
@@ -576,7 +502,7 @@
 // the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
 // FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
-    return mServerProxy->framesReady();
+    return mAudioTrackServerProxy->framesReady();
 }
 
 // Don't call for fast tracks; the framesReady() could result in priority inversion
@@ -586,9 +512,9 @@
     }
 
     if (framesReady() >= mFrameCount ||
-            (mCblk->flags & CBLK_FORCEREADY)) {
+            (mCblk->mFlags & CBLK_FORCEREADY)) {
         mFillingUpStatus = FS_FILLED;
-        android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
+        android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
         return true;
     }
     return false;
@@ -607,32 +533,33 @@
         track_state state = mState;
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
+
         if (state == PAUSED) {
-            mState = TrackBase::RESUMING;
-            ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
+            if (mResumeToStopping) {
+                // happened we need to resume to STOPPING_1
+                mState = TrackBase::STOPPING_1;
+                ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
+            } else {
+                mState = TrackBase::RESUMING;
+                ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
+            }
         } else {
             mState = TrackBase::ACTIVE;
             ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
         }
 
-        if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
-            thread->mLock.unlock();
-            status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
-            thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
-            // to track the speaker usage
-            if (status == NO_ERROR) {
-                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
-            }
-#endif
-        }
-        if (status == NO_ERROR) {
-            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-            playbackThread->addTrack_l(this);
-        } else {
-            mState = state;
+        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        status = playbackThread->addTrack_l(this);
+        if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+            //  restore previous state if start was rejected by policy manager
+            if (status == PERMISSION_DENIED) {
+                mState = state;
+            }
+        }
+        // track was already in the active list, not a problem
+        if (status == ALREADY_EXISTS) {
+            status = NO_ERROR;
         }
     } else {
         status = BAD_VALUE;
@@ -653,26 +580,18 @@
             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
                 reset();
                 mState = STOPPED;
-            } else if (!isFastTrack()) {
+            } else if (!isFastTrack() && !isOffloaded()) {
                 mState = STOPPED;
             } else {
-                // prepareTracks_l() will set state to STOPPING_2 after next underrun,
-                // and then to STOPPED and reset() when presentation is complete
+                // For fast tracks prepareTracks_l() will set state to STOPPING_2
+                // presentation is complete
+                // For an offloaded track this starts a drain and state will
+                // move to STOPPING_2 when drain completes and then STOPPED
                 mState = STOPPING_1;
             }
             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
                     playbackThread);
         }
-        if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
-            thread->mLock.unlock();
-            AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-            thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
-            // to track the speaker usage
-            addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-        }
     }
 }
 
@@ -682,19 +601,27 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        if (mState == ACTIVE || mState == RESUMING) {
+        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        switch (mState) {
+        case STOPPING_1:
+        case STOPPING_2:
+            if (!isOffloaded()) {
+                /* nothing to do if track is not offloaded */
+                break;
+            }
+
+            // Offloaded track was draining, we need to carry on draining when resumed
+            mResumeToStopping = true;
+            // fall through...
+        case ACTIVE:
+        case RESUMING:
             mState = PAUSING;
             ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
-            if (!isOutputTrack()) {
-                thread->mLock.unlock();
-                AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-                thread->mLock.lock();
+            playbackThread->signal_l();
+            break;
 
-#ifdef ADD_BATTERY_DATA
-                // to track the speaker usage
-                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-            }
+        default:
+            break;
         }
     }
 }
@@ -705,21 +632,52 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
-                mState != PAUSING && mState != IDLE && mState != FLUSHED) {
-            return;
-        }
-        // No point remaining in PAUSED state after a flush => go to
-        // FLUSHED state
-        mState = FLUSHED;
-        // do not reset the track if it is still in the process of being stopped or paused.
-        // this will be done by prepareTracks_l() when the track is stopped.
-        // prepareTracks_l() will see mState == FLUSHED, then
-        // remove from active track list, reset(), and trigger presentation complete
         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-        if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+
+        if (isOffloaded()) {
+            // If offloaded we allow flush during any state except terminated
+            // and keep the track active to avoid problems if user is seeking
+            // rapidly and underlying hardware has a significant delay handling
+            // a pause
+            if (isTerminated()) {
+                return;
+            }
+
+            ALOGV("flush: offload flush");
             reset();
+
+            if (mState == STOPPING_1 || mState == STOPPING_2) {
+                ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
+                mState = ACTIVE;
+            }
+
+            if (mState == ACTIVE) {
+                ALOGV("flush called in active state, resetting buffer time out retry count");
+                mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
+            }
+
+            mResumeToStopping = false;
+        } else {
+            if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
+                    mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
+                return;
+            }
+            // No point remaining in PAUSED state after a flush => go to
+            // FLUSHED state
+            mState = FLUSHED;
+            // do not reset the track if it is still in the process of being stopped or paused.
+            // this will be done by prepareTracks_l() when the track is stopped.
+            // prepareTracks_l() will see mState == FLUSHED, then
+            // remove from active track list, reset(), and trigger presentation complete
+            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+                reset();
+            }
         }
+        // Prevent flush being lost if the track is flushed and then resumed
+        // before mixer thread can run. This is important when offloading
+        // because the hardware buffer could hold a large amount of audio
+        playbackThread->flushOutput_l();
+        playbackThread->signal_l();
     }
 }
 
@@ -728,11 +686,9 @@
     // Do not reset twice to avoid discarding data written just after a flush and before
     // the audioflinger thread detects the track is stopped.
     if (!mResetDone) {
-        TrackBase::reset();
         // Force underrun condition to avoid false underrun callback until first data is
         // written to buffer
-        android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
-        android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
+        android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
         mFillingUpStatus = FS_FILLING;
         mResetDone = true;
         if (mState == FLUSHED) {
@@ -741,6 +697,20 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
+{
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread == 0) {
+        ALOGE("thread is dead");
+        return FAILED_TRANSACTION;
+    } else if ((thread->type() == ThreadBase::DIRECT) ||
+                    (thread->type() == ThreadBase::OFFLOAD)) {
+        return thread->setParameters(keyValuePairs);
+    } else {
+        return PERMISSION_DENIED;
+    }
+}
+
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
 {
     status_t status = DEAD_OBJECT;
@@ -802,15 +772,23 @@
     // a track is considered presented when the total number of frames written to audio HAL
     // corresponds to the number of frames written when presentationComplete() is called for the
     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
+    // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
+    // to detect when all frames have been played. In this case framesWritten isn't
+    // useful because it doesn't always reflect whether there is data in the h/w
+    // buffers, particularly if a track has been paused and resumed during draining
+    ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
+                      mPresentationCompleteFrames, framesWritten);
     if (mPresentationCompleteFrames == 0) {
         mPresentationCompleteFrames = framesWritten + audioHalFrames;
         ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
                   mPresentationCompleteFrames, audioHalFrames);
     }
-    if (framesWritten >= mPresentationCompleteFrames) {
+
+    if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
         ALOGV("presentationComplete() session %d complete: framesWritten %d",
                   mSessionId, framesWritten);
         triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+        mAudioTrackServerProxy->setStreamEndDone();
         return true;
     }
     return false;
@@ -833,7 +811,7 @@
 {
     // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
     ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
-    uint32_t vlr = mServerProxy->getVolumeLR();
+    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
     uint32_t vl = vlr & 0xFFFF;
     uint32_t vr = vlr >> 16;
     // track volumes come from shared memory, so can't be trusted and must be clamped
@@ -856,7 +834,7 @@
 
 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
 {
-    if (mState == TERMINATED || mState == PAUSED ||
+    if (isTerminated() || mState == PAUSED ||
             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
                                       (mState == STOPPED)))) {
         ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
@@ -870,9 +848,12 @@
 
 void AudioFlinger::PlaybackThread::Track::invalidate()
 {
-    // FIXME should use proxy
-    android_atomic_or(CBLK_INVALID, &mCblk->flags);
-    mCblk->cv.signal();
+    // FIXME should use proxy, and needs work
+    audio_track_cblk_t* cblk = mCblk;
+    android_atomic_or(CBLK_INVALID, &cblk->mFlags);
+    android_atomic_release_store(0x40000000, &cblk->mFutex);
+    // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
+    (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX);
     mIsInvalid = true;
 }
 
@@ -1185,10 +1166,12 @@
             }
         }
 
+        uint32_t sr = sampleRate();
+
         // adjust the head buffer's PTS to reflect the portion of the head buffer
         // that has already been consumed
         int64_t effectivePTS = headLocalPTS +
-                ((head.position() / mFrameSize) * mLocalTimeFreq / sampleRate());
+                ((head.position() / mFrameSize) * mLocalTimeFreq / sr);
 
         // Calculate the delta in samples between the head of the input buffer
         // queue and the start of the next output buffer that will be written.
@@ -1220,7 +1203,7 @@
         // the current output position is within this threshold, then we will
         // concatenate the next input samples to the previous output
         const int64_t kSampleContinuityThreshold =
-                (static_cast<int64_t>(sampleRate()) << 32) / 250;
+                (static_cast<int64_t>(sr) << 32) / 250;
 
         // if this is the first buffer of audio that we're emitting from this track
         // then it should be almost exactly on time.
@@ -1409,15 +1392,17 @@
         mOutBuffer.frameCount = 0;
         playbackThread->mTracks.add(this);
         ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
-                "mCblk->frameCount_ %u, mChannelMask 0x%08x mBufferEnd %p",
+                "mCblk->frameCount_ %u, mChannelMask 0x%08x",
                 mCblk, mBuffer,
-                mCblk->frameCount_, mChannelMask, mBufferEnd);
+                mCblk->frameCount_, mChannelMask);
         // since client and server are in the same process,
         // the buffer has the same virtual address on both sides
         mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
         mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000));
         mClientProxy->setSendLevel(0.0);
         mClientProxy->setSampleRate(sampleRate);
+        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
+                true /*clientInServer*/);
     } else {
         ALOGW("Error creating output track on thread %p", playbackThread);
     }
@@ -1477,7 +1462,7 @@
                     memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
                     mBufferQueue.add(pInBuffer);
                 } else {
-                    ALOGW ("OutputTrack::write() %p no more buffers in queue", this);
+                    ALOGW("OutputTrack::write() %p no more buffers in queue", this);
                 }
             }
         }
@@ -1498,9 +1483,10 @@
         if (mOutBuffer.frameCount == 0) {
             mOutBuffer.frameCount = pInBuffer->frameCount;
             nsecs_t startTime = systemTime();
-            if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
-                ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this,
-                        mThread.unsafe_get());
+            status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
+            if (status != NO_ERROR) {
+                ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
+                        mThread.unsafe_get(), status);
                 outputBufferFull = true;
                 break;
             }
@@ -1515,7 +1501,10 @@
         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
                 pInBuffer->frameCount;
         memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
-        mClientProxy->stepUser(outFrames);
+        Proxy::Buffer buf;
+        buf.mFrameCount = outFrames;
+        buf.mRaw = NULL;
+        mClientProxy->releaseBuffer(&buf);
         pInBuffer->frameCount -= outFrames;
         pInBuffer->i16 += outFrames * channelCount;
         mOutBuffer.frameCount -= outFrames;
@@ -1559,8 +1548,10 @@
     // If no more buffers are pending, fill output track buffer to make sure it is started
     // by output mixer.
     if (frames == 0 && mBufferQueue.size() == 0) {
-        if (mCblk->user < mFrameCount) {
-            frames = mFrameCount - mCblk->user;
+        // FIXME borken, replace by getting framesReady() from proxy
+        size_t user = 0;    // was mCblk->user
+        if (user < mFrameCount) {
+            frames = mFrameCount - user;
             pInBuffer = new Buffer;
             pInBuffer->mBuffer = new int16_t[frames * channelCount];
             pInBuffer->frameCount = frames;
@@ -1578,46 +1569,17 @@
 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
         AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
 {
-    audio_track_cblk_t* cblk = mCblk;
-    uint32_t framesReq = buffer->frameCount;
-
-    ALOGVV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
-    buffer->frameCount  = 0;
-
-    size_t framesAvail;
-    {
-        Mutex::Autolock _l(cblk->lock);
-
-        // read the server count again
-        while (!(framesAvail = mClientProxy->framesAvailable_l())) {
-            if (CC_UNLIKELY(!mActive)) {
-                ALOGV("Not active and NO_MORE_BUFFERS");
-                return NO_MORE_BUFFERS;
-            }
-            status_t result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
-            if (result != NO_ERROR) {
-                return NO_MORE_BUFFERS;
-            }
-        }
-    }
-
-    if (framesReq > framesAvail) {
-        framesReq = framesAvail;
-    }
-
-    uint32_t u = cblk->user;
-    uint32_t bufferEnd = cblk->userBase + mFrameCount;
-
-    if (framesReq > bufferEnd - u) {
-        framesReq = bufferEnd - u;
-    }
-
-    buffer->frameCount  = framesReq;
-    buffer->raw         = mClientProxy->buffer(u);
-    return NO_ERROR;
+    ClientProxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    struct timespec timeout;
+    timeout.tv_sec = waitTimeMs / 1000;
+    timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
+    status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
+    buffer->frameCount = buf.mFrameCount;
+    buffer->raw = buf.mRaw;
+    return status;
 }
 
-
 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
 {
     size_t size = mBufferQueue.size();
@@ -1687,7 +1649,12 @@
                   channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, false /*isOut*/),
         mOverflow(false)
 {
-    ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+    ALOGV("RecordTrack constructor");
+    if (mCblk != NULL) {
+        mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
+                mFrameSize);
+        mServerProxy = mAudioRecordServerProxy;
+    }
 }
 
 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
@@ -1699,42 +1666,16 @@
 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
         int64_t pts)
 {
-    audio_track_cblk_t* cblk = this->cblk();
-    uint32_t framesAvail;
-    uint32_t framesReq = buffer->frameCount;
-
-    // Check if last stepServer failed, try to step now
-    if (mStepServerFailed) {
-        if (!step()) {
-            goto getNextBuffer_exit;
-        }
-        ALOGV("stepServer recovered");
-        mStepServerFailed = false;
+    ServerProxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    status_t status = mServerProxy->obtainBuffer(&buf);
+    buffer->frameCount = buf.mFrameCount;
+    buffer->raw = buf.mRaw;
+    if (buf.mFrameCount == 0) {
+        // FIXME also wake futex so that overrun is noticed more quickly
+        (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
     }
-
-    // FIXME lock is not actually held, so overrun is possible
-    framesAvail = mServerProxy->framesAvailableIn_l();
-
-    if (CC_LIKELY(framesAvail)) {
-        uint32_t s = cblk->server;
-        uint32_t bufferEnd = cblk->serverBase + mFrameCount;
-
-        if (framesReq > framesAvail) {
-            framesReq = framesAvail;
-        }
-        if (framesReq > bufferEnd - s) {
-            framesReq = bufferEnd - s;
-        }
-
-        buffer->raw = getBuffer(s, framesReq);
-        buffer->frameCount = framesReq;
-        return NO_ERROR;
-    }
-
-getNextBuffer_exit:
-    buffer->raw = NULL;
-    buffer->frameCount = 0;
-    return NOT_ENOUGH_DATA;
+    return status;
 }
 
 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
@@ -1754,16 +1695,7 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        recordThread->mLock.lock();
-        bool doStop = recordThread->stop_l(this);
-        if (doStop) {
-            TrackBase::reset();
-            // Force overrun condition to avoid false overrun callback until first data is
-            // read from buffer
-            android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
-        }
-        recordThread->mLock.unlock();
-        if (doStop) {
+        if (recordThread->stop(this)) {
             AudioSystem::stopInput(recordThread->id());
         }
     }
@@ -1790,20 +1722,18 @@
 
 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.append("   Clien Fmt Chn mask   Session Step S Serv     User   FrameCount\n");
+    result.append("Client Fmt Chn mask Session S   Server fCount\n");
 }
 
 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
 {
-    snprintf(buffer, size, "   %05d %03u 0x%08x %05d   %04u %01d %08x %08x %05d\n",
+    snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6u\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mFormat,
             mChannelMask,
             mSessionId,
-            mStepCount,
             mState,
-            mCblk->server,
-            mCblk->user,
+            mCblk->mServer,
             mFrameCount);
 }
 
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 83d9ccd..d659ebb 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -8,29 +8,31 @@
 
 LOCAL_SRC_FILES:=               \
     CameraService.cpp \
-    CameraClient.cpp \
-    Camera2Client.cpp \
-    ProCamera2Client.cpp \
-    Camera2ClientBase.cpp \
-    CameraDeviceBase.cpp \
-    Camera2Device.cpp \
-    Camera3Device.cpp \
-    camera2/Parameters.cpp \
-    camera2/FrameProcessor.cpp \
-    camera2/StreamingProcessor.cpp \
-    camera2/JpegProcessor.cpp \
-    camera2/CallbackProcessor.cpp \
-    camera2/ZslProcessor.cpp \
-    camera2/BurstCapture.cpp \
-    camera2/JpegCompressor.cpp \
-    camera2/CaptureSequencer.cpp \
-    camera2/ProFrameProcessor.cpp \
-    camera2/ZslProcessor3.cpp \
-    camera3/Camera3Stream.cpp \
-    camera3/Camera3IOStreamBase.cpp \
-    camera3/Camera3InputStream.cpp \
-    camera3/Camera3OutputStream.cpp \
-    camera3/Camera3ZslStream.cpp \
+    CameraDeviceFactory.cpp \
+    common/Camera2ClientBase.cpp \
+    common/CameraDeviceBase.cpp \
+    common/FrameProcessorBase.cpp \
+    api1/CameraClient.cpp \
+    api1/Camera2Client.cpp \
+    api1/client2/Parameters.cpp \
+    api1/client2/FrameProcessor.cpp \
+    api1/client2/StreamingProcessor.cpp \
+    api1/client2/JpegProcessor.cpp \
+    api1/client2/CallbackProcessor.cpp \
+    api1/client2/ZslProcessor.cpp \
+    api1/client2/BurstCapture.cpp \
+    api1/client2/JpegCompressor.cpp \
+    api1/client2/CaptureSequencer.cpp \
+    api1/client2/ZslProcessor3.cpp \
+    api2/CameraDeviceClient.cpp \
+    api_pro/ProCamera2Client.cpp \
+    device2/Camera2Device.cpp \
+    device3/Camera3Device.cpp \
+    device3/Camera3Stream.cpp \
+    device3/Camera3IOStreamBase.cpp \
+    device3/Camera3InputStream.cpp \
+    device3/Camera3OutputStream.cpp \
+    device3/Camera3ZslStream.cpp \
     gui/RingBufferConsumer.cpp \
 
 LOCAL_SHARED_LIBRARIES:= \
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
new file mode 100644
index 0000000..7fdf304
--- /dev/null
+++ b/services/camera/libcameraservice/CameraDeviceFactory.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "CameraDeviceFactory"
+#include <utils/Log.h>
+
+#include "CameraService.h"
+#include "CameraDeviceFactory.h"
+#include "common/CameraDeviceBase.h"
+#include "device2/Camera2Device.h"
+#include "device3/Camera3Device.h"
+
+namespace android {
+
+wp<CameraService> CameraDeviceFactory::sService;
+
+sp<CameraDeviceBase> CameraDeviceFactory::createDevice(int cameraId) {
+
+    sp<CameraService> svc = sService.promote();
+    if (svc == 0) {
+        ALOGE("%s: No service registered", __FUNCTION__);
+        return NULL;
+    }
+
+    int deviceVersion = svc->getDeviceVersion(cameraId, /*facing*/NULL);
+
+    sp<CameraDeviceBase> device;
+
+    switch (deviceVersion) {
+        case CAMERA_DEVICE_API_VERSION_2_0:
+        case CAMERA_DEVICE_API_VERSION_2_1:
+            device = new Camera2Device(cameraId);
+            break;
+        case CAMERA_DEVICE_API_VERSION_3_0:
+            device = new Camera3Device(cameraId);
+            break;
+        default:
+            ALOGE("%s: Camera %d: Unknown HAL device version %d",
+                  __FUNCTION__, cameraId, deviceVersion);
+            device = NULL;
+            break;
+    }
+
+    ALOGV_IF(device != 0, "Created a new camera device for version %d",
+                          deviceVersion);
+
+    return device;
+}
+
+void CameraDeviceFactory::registerService(wp<CameraService> service) {
+    ALOGV("%s: Registered service %p", __FUNCTION__,
+          service.promote().get());
+
+    sService = service;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.h b/services/camera/libcameraservice/CameraDeviceFactory.h
new file mode 100644
index 0000000..236dc56
--- /dev/null
+++ b/services/camera/libcameraservice/CameraDeviceFactory.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
+#define ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
+
+#include <utils/RefBase.h>
+
+namespace android {
+
+class CameraDeviceBase;
+class CameraService;
+
+/**
+ * Create the right instance of Camera2Device or Camera3Device
+ * automatically based on the device version.
+ */
+class CameraDeviceFactory : public virtual RefBase {
+  public:
+    static void registerService(wp<CameraService> service);
+
+    // Prerequisite: Call registerService.
+    static sp<CameraDeviceBase> createDevice(int cameraId);
+  private:
+    CameraDeviceFactory(wp<CameraService> service);
+
+    static wp<CameraService> sService;
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 757a781..bf9bc71 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -38,9 +38,11 @@
 #include <utils/String16.h>
 
 #include "CameraService.h"
-#include "CameraClient.h"
-#include "Camera2Client.h"
-#include "ProCamera2Client.h"
+#include "api1/CameraClient.h"
+#include "api1/Camera2Client.h"
+#include "api_pro/ProCamera2Client.h"
+#include "api2/CameraDeviceClient.h"
+#include "CameraDeviceFactory.h"
 
 namespace android {
 
@@ -126,6 +128,8 @@
                 CAMERA_MODULE_API_VERSION_2_1) {
             mModule->set_callbacks(this);
         }
+
+        CameraDeviceFactory::registerService(this);
     }
 }
 
@@ -164,7 +168,7 @@
            Mutex::Autolock al(mServiceLock);
 
            /* Find all clients that we need to disconnect */
-           sp<Client> client = mClient[cameraId].promote();
+           sp<BasicClient> client = mClient[cameraId].promote();
            if (client.get() != NULL) {
                clientsToDisconnect.push_back(client);
            }
@@ -207,7 +211,7 @@
 status_t CameraService::getCameraInfo(int cameraId,
                                       struct CameraInfo* cameraInfo) {
     if (!mModule) {
-        return NO_INIT;
+        return -ENODEV;
     }
 
     if (cameraId < 0 || cameraId >= mNumberOfCameras) {
@@ -258,7 +262,7 @@
     return false;
 }
 
-bool CameraService::validateConnect(int cameraId,
+status_t CameraService::validateConnect(int cameraId,
                                     /*inout*/
                                     int& clientUid) const {
 
@@ -271,19 +275,19 @@
         if (callingPid != getpid()) {
             ALOGE("CameraService::connect X (pid %d) rejected (don't trust clientUid)",
                     callingPid);
-            return false;
+            return PERMISSION_DENIED;
         }
     }
 
     if (!mModule) {
         ALOGE("Camera HAL module not loaded");
-        return false;
+        return -ENODEV;
     }
 
     if (cameraId < 0 || cameraId >= mNumberOfCameras) {
         ALOGE("CameraService::connect X (pid %d) rejected (invalid cameraId %d).",
             callingPid, cameraId);
-        return false;
+        return -ENODEV;
     }
 
     char value[PROPERTY_VALUE_MAX];
@@ -291,36 +295,36 @@
     if (strcmp(value, "1") == 0) {
         // Camera is disabled by DevicePolicyManager.
         ALOGI("Camera is disabled. connect X (pid %d) rejected", callingPid);
-        return false;
+        return -EACCES;
     }
 
     ICameraServiceListener::Status currentStatus = getStatus(cameraId);
     if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
         ALOGI("Camera is not plugged in,"
                " connect X (pid %d) rejected", callingPid);
-        return false;
+        return -ENODEV;
     } else if (currentStatus == ICameraServiceListener::STATUS_ENUMERATING) {
         ALOGI("Camera is enumerating,"
                " connect X (pid %d) rejected", callingPid);
-        return false;
+        return -EBUSY;
     }
     // Else don't check for STATUS_NOT_AVAILABLE.
     //  -- It's done implicitly in canConnectUnsafe /w the mBusy array
 
-    return true;
+    return OK;
 }
 
 bool CameraService::canConnectUnsafe(int cameraId,
                                      const String16& clientPackageName,
                                      const sp<IBinder>& remoteCallback,
-                                     sp<Client> &client) {
+                                     sp<BasicClient> &client) {
     String8 clientName8(clientPackageName);
     int callingPid = getCallingPid();
 
     if (mClient[cameraId] != 0) {
         client = mClient[cameraId].promote();
         if (client != 0) {
-            if (remoteCallback == client->getRemoteCallback()->asBinder()) {
+            if (remoteCallback == client->getRemote()) {
                 LOG1("CameraService::connect X (pid %d) (the same client)",
                      callingPid);
                 return true;
@@ -354,11 +358,13 @@
     return true;
 }
 
-sp<ICamera> CameraService::connect(
+status_t CameraService::connect(
         const sp<ICameraClient>& cameraClient,
         int cameraId,
         const String16& clientPackageName,
-        int clientUid) {
+        int clientUid,
+        /*out*/
+        sp<ICamera>& device) {
 
     String8 clientName8(clientPackageName);
     int callingPid = getCallingPid();
@@ -366,20 +372,23 @@
     LOG1("CameraService::connect E (pid %d \"%s\", id %d)", callingPid,
             clientName8.string(), cameraId);
 
-    if (!validateConnect(cameraId, /*inout*/clientUid)) {
-        return NULL;
+    status_t status = validateConnect(cameraId, /*inout*/clientUid);
+    if (status != OK) {
+        return status;
     }
 
-    sp<Client> client;
 
+    sp<Client> client;
     {
         Mutex::Autolock lock(mServiceLock);
+        sp<BasicClient> clientTmp;
         if (!canConnectUnsafe(cameraId, clientPackageName,
                               cameraClient->asBinder(),
-                              /*out*/client)) {
-            return NULL;
+                              /*out*/clientTmp)) {
+            return -EBUSY;
         } else if (client.get() != NULL) {
-            return client;
+            device = static_cast<Client*>(clientTmp.get());
+            return OK;
         }
 
         int facing = -1;
@@ -409,18 +418,18 @@
             break;
           case -1:
             ALOGE("Invalid camera id %d", cameraId);
-            return NULL;
+            return BAD_VALUE;
           default:
             ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return NULL;
+            return INVALID_OPERATION;
         }
 
-        if (!connectFinishUnsafe(client, client->asBinder())) {
+        status_t status = connectFinishUnsafe(client, client->getRemote());
+        if (status != OK) {
             // this is probably not recoverable.. maybe the client can try again
             // OK: we can only get here if we were originally in PRESENT state
             updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId);
-
-            return NULL;
+            return status;
         }
 
         mClient[cameraId] = client;
@@ -430,45 +439,49 @@
     // important: release the mutex here so the client can call back
     //    into the service from its destructor (can be at the end of the call)
 
-    return client;
+    device = client;
+    return OK;
 }
 
-bool CameraService::connectFinishUnsafe(const sp<BasicClient>& client,
-                                        const sp<IBinder>& clientBinder) {
-    if (client->initialize(mModule) != OK) {
-        return false;
+status_t CameraService::connectFinishUnsafe(const sp<BasicClient>& client,
+                                            const sp<IBinder>& remoteCallback) {
+    status_t status = client->initialize(mModule);
+    if (status != OK) {
+        return status;
     }
 
-    clientBinder->linkToDeath(this);
+    remoteCallback->linkToDeath(this);
 
-    return true;
+    return OK;
 }
 
-sp<IProCameraUser> CameraService::connect(
+status_t CameraService::connectPro(
                                         const sp<IProCameraCallbacks>& cameraCb,
                                         int cameraId,
                                         const String16& clientPackageName,
-                                        int clientUid)
+                                        int clientUid,
+                                        /*out*/
+                                        sp<IProCameraUser>& device)
 {
     String8 clientName8(clientPackageName);
     int callingPid = getCallingPid();
 
     LOG1("CameraService::connectPro E (pid %d \"%s\", id %d)", callingPid,
             clientName8.string(), cameraId);
-
-    if (!validateConnect(cameraId, /*inout*/clientUid)) {
-        return NULL;
+    status_t status = validateConnect(cameraId, /*inout*/clientUid);
+    if (status != OK) {
+        return status;
     }
 
     sp<ProClient> client;
     {
         Mutex::Autolock lock(mServiceLock);
         {
-            sp<Client> client;
+            sp<BasicClient> client;
             if (!canConnectUnsafe(cameraId, clientPackageName,
                                   cameraCb->asBinder(),
                                   /*out*/client)) {
-                return NULL;
+                return -EBUSY;
             }
         }
 
@@ -479,23 +492,25 @@
           case CAMERA_DEVICE_API_VERSION_1_0:
             ALOGE("Camera id %d uses HALv1, doesn't support ProCamera",
                   cameraId);
-            return NULL;
+            return -ENOTSUP;
             break;
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
+          case CAMERA_DEVICE_API_VERSION_3_0:
             client = new ProCamera2Client(this, cameraCb, String16(),
                     cameraId, facing, callingPid, USE_CALLING_UID, getpid());
             break;
           case -1:
             ALOGE("Invalid camera id %d", cameraId);
-            return NULL;
+            return BAD_VALUE;
           default:
             ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return NULL;
+            return INVALID_OPERATION;
         }
 
-        if (!connectFinishUnsafe(client, client->asBinder())) {
-            return NULL;
+        status_t status = connectFinishUnsafe(client, client->getRemote());
+        if (status != OK) {
+            return status;
         }
 
         mProClientList[cameraId].push(client);
@@ -505,10 +520,93 @@
     }
     // important: release the mutex here so the client can call back
     //    into the service from its destructor (can be at the end of the call)
-
-    return client;
+    device = client;
+    return OK;
 }
 
+status_t CameraService::connectDevice(
+        const sp<ICameraDeviceCallbacks>& cameraCb,
+        int cameraId,
+        const String16& clientPackageName,
+        int clientUid,
+        /*out*/
+        sp<ICameraDeviceUser>& device)
+{
+
+    String8 clientName8(clientPackageName);
+    int callingPid = getCallingPid();
+
+    LOG1("CameraService::connectDevice E (pid %d \"%s\", id %d)", callingPid,
+            clientName8.string(), cameraId);
+
+    status_t status = validateConnect(cameraId, /*inout*/clientUid);
+    if (status != OK) {
+        return status;
+    }
+
+    sp<CameraDeviceClient> client;
+    {
+        Mutex::Autolock lock(mServiceLock);
+        {
+            sp<BasicClient> client;
+            if (!canConnectUnsafe(cameraId, clientPackageName,
+                                  cameraCb->asBinder(),
+                                  /*out*/client)) {
+                return -EBUSY;
+            }
+        }
+
+        int facing = -1;
+        int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+        // If there are other non-exclusive users of the camera,
+        //  this will tear them down before we can reuse the camera
+        if (isValidCameraId(cameraId)) {
+            // transition from PRESENT -> NOT_AVAILABLE
+            updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
+                         cameraId);
+        }
+
+        switch(deviceVersion) {
+          case CAMERA_DEVICE_API_VERSION_1_0:
+            ALOGW("Camera using old HAL version: %d", deviceVersion);
+            return -ENOTSUP;
+           // TODO: don't allow 2.0  Only allow 2.1 and higher
+          case CAMERA_DEVICE_API_VERSION_2_0:
+          case CAMERA_DEVICE_API_VERSION_2_1:
+          case CAMERA_DEVICE_API_VERSION_3_0:
+            client = new CameraDeviceClient(this, cameraCb, String16(),
+                    cameraId, facing, callingPid, USE_CALLING_UID, getpid());
+            break;
+          case -1:
+            ALOGE("Invalid camera id %d", cameraId);
+            return BAD_VALUE;
+          default:
+            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+            return INVALID_OPERATION;
+        }
+
+        status_t status = connectFinishUnsafe(client, client->getRemote());
+        if (status != OK) {
+            // this is probably not recoverable.. maybe the client can try again
+            // OK: we can only get here if we were originally in PRESENT state
+            updateStatus(ICameraServiceListener::STATUS_PRESENT, cameraId);
+            return status;
+        }
+
+        LOG1("CameraService::connectDevice X (id %d, this pid is %d)", cameraId,
+                getpid());
+
+        mClient[cameraId] = client;
+    }
+    // important: release the mutex here so the client can call back
+    //    into the service from its destructor (can be at the end of the call)
+
+    device = client;
+    return OK;
+}
+
+
 status_t CameraService::addListener(
                                 const sp<ICameraServiceListener>& listener) {
     ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
@@ -566,14 +664,14 @@
     Mutex::Autolock lock(mServiceLock);
 
     int outIndex;
-    sp<Client> client = findClientUnsafe(remoteBinder, outIndex);
+    sp<BasicClient> client = findClientUnsafe(remoteBinder, outIndex);
 
     if (client != 0) {
         // Found our camera, clear and leave.
         LOG1("removeClient: clear camera %d", outIndex);
         mClient[outIndex].clear();
 
-        client->unlinkToDeath(this);
+        client->getRemote()->unlinkToDeath(this);
     } else {
 
         sp<ProClient> clientPro = findProClientUnsafe(remoteBinder);
@@ -620,9 +718,9 @@
     return clientPro;
 }
 
-sp<CameraService::Client> CameraService::findClientUnsafe(
+sp<CameraService::BasicClient> CameraService::findClientUnsafe(
                         const wp<IBinder>& cameraClient, int& outIndex) {
-    sp<Client> client;
+    sp<BasicClient> client;
 
     for (int i = 0; i < mNumberOfCameras; i++) {
 
@@ -640,7 +738,7 @@
             continue;
         }
 
-        if (cameraClient == client->getRemoteCallback()->asBinder()) {
+        if (cameraClient == client->getRemote()) {
             // Found our camera
             outIndex = i;
             return client;
@@ -651,7 +749,7 @@
     return NULL;
 }
 
-CameraService::Client* CameraService::getClientByIdUnsafe(int cameraId) {
+CameraService::BasicClient* CameraService::getClientByIdUnsafe(int cameraId) {
     if (cameraId < 0 || cameraId >= mNumberOfCameras) return NULL;
     return mClient[cameraId].unsafe_get();
 }
@@ -906,7 +1004,9 @@
 // Provide client pointer for callbacks. Client lock returned from getClientLockFromCookie should
 // be acquired for this to be safe
 CameraService::Client* CameraService::Client::getClientFromCookie(void* user) {
-    Client* client = gCameraService->getClientByIdUnsafe((int) user);
+    BasicClient *basicClient = gCameraService->getClientByIdUnsafe((int) user);
+    // OK: only CameraClient calls this, and they already cast anyway.
+    Client* client = static_cast<Client*>(basicClient);
 
     // This could happen if the Client is in the process of shutting down (the
     // last strong reference is gone, but the destructor hasn't finished
@@ -1058,7 +1158,7 @@
                 }
             }
 
-            sp<Client> client = mClient[i].promote();
+            sp<BasicClient> client = mClient[i].promote();
             if (client == 0) {
                 result = String8::format("  Device is closed, no client instance\n");
                 write(fd, result.string(), result.size());
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 710f164..3921cbd 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -29,6 +29,8 @@
 #include <camera/ICameraClient.h>
 #include <camera/IProCameraUser.h>
 #include <camera/IProCameraCallbacks.h>
+#include <camera/camera2/ICameraDeviceUser.h>
+#include <camera/camera2/ICameraDeviceCallbacks.h>
 
 #include <camera/ICameraServiceListener.h>
 
@@ -70,10 +72,23 @@
     virtual status_t    getCameraInfo(int cameraId,
                                       struct CameraInfo* cameraInfo);
 
-    virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId,
-            const String16& clientPackageName, int clientUid);
-    virtual sp<IProCameraUser> connect(const sp<IProCameraCallbacks>& cameraCb,
-            int cameraId, const String16& clientPackageName, int clientUid);
+    virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
+            const String16& clientPackageName, int clientUid,
+            /*out*/
+            sp<ICamera>& device);
+
+    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
+            int cameraId, const String16& clientPackageName, int clientUid,
+            /*out*/
+            sp<IProCameraUser>& device);
+
+    virtual status_t connectDevice(
+            const sp<ICameraDeviceCallbacks>& cameraCb,
+            int cameraId,
+            const String16& clientPackageName,
+            int clientUid,
+            /*out*/
+            sp<ICameraDeviceUser>& device);
 
     virtual status_t    addListener(const sp<ICameraServiceListener>& listener);
     virtual status_t    removeListener(
@@ -99,13 +114,17 @@
     void                playSound(sound_kind kind);
     void                releaseSound();
 
+    /////////////////////////////////////////////////////////////////////
+    // CameraDeviceFactory functionality
+    int                 getDeviceVersion(int cameraId, int* facing = NULL);
+
 
     /////////////////////////////////////////////////////////////////////
     // CameraClient functionality
 
     // returns plain pointer of client. Note that mClientLock should be acquired to
     // prevent the client from destruction. The result can be NULL.
-    virtual Client*     getClientByIdUnsafe(int cameraId);
+    virtual BasicClient* getClientByIdUnsafe(int cameraId);
     virtual Mutex*      getClientLockById(int cameraId);
 
     class BasicClient : public virtual RefBase {
@@ -114,11 +133,17 @@
 
         virtual void          disconnect() = 0;
 
+        // because we can't virtually inherit IInterface, which breaks
+        // virtual inheritance
+        virtual sp<IBinder> asBinderWrapper() = 0;
+
         // Return the remote callback binder object (e.g. IProCameraCallbacks)
-        wp<IBinder>     getRemote() {
+        sp<IBinder>     getRemote() {
             return mRemoteBinder;
         }
 
+        virtual status_t      dump(int fd, const Vector<String16>& args) = 0;
+
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
@@ -147,7 +172,7 @@
         pid_t                           mServicePid;     // immutable after constructor
 
         // - The app-side Binder interface to receive callbacks from us
-        wp<IBinder>                     mRemoteBinder;   // immutable after constructor
+        sp<IBinder>                     mRemoteBinder;   // immutable after constructor
 
         // permissions management
         status_t                        startCameraOps();
@@ -190,6 +215,8 @@
         virtual status_t      setPreviewDisplay(const sp<Surface>& surface) = 0;
         virtual status_t      setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer)=0;
         virtual void          setPreviewCallbackFlag(int flag) = 0;
+        virtual status_t      setPreviewCallbackTarget(
+                const sp<IGraphicBufferProducer>& callbackProducer) = 0;
         virtual status_t      startPreview() = 0;
         virtual void          stopPreview() = 0;
         virtual bool          previewEnabled() = 0;
@@ -221,6 +248,10 @@
             return mRemoteCallback;
         }
 
+        virtual sp<IBinder> asBinderWrapper() {
+            return asBinder();
+        }
+
     protected:
         static Mutex*        getClientLockFromCookie(void* user);
         // convert client from cookie. Client lock should be acquired before getting Client.
@@ -285,7 +316,7 @@
     virtual void onFirstRef();
 
     // Step 1. Check if we can connect, before we acquire the service lock.
-    bool                validateConnect(int cameraId,
+    status_t            validateConnect(int cameraId,
                                         /*inout*/
                                         int& clientUid) const;
 
@@ -294,16 +325,17 @@
                                          const String16& clientPackageName,
                                          const sp<IBinder>& remoteCallback,
                                          /*out*/
-                                         sp<Client> &client);
+                                         sp<BasicClient> &client);
 
     // When connection is successful, initialize client and track its death
-    bool                connectFinishUnsafe(const sp<BasicClient>& client,
-                                            const sp<IBinder>& clientBinder);
+    status_t            connectFinishUnsafe(const sp<BasicClient>& client,
+                                            const sp<IBinder>& remoteCallback);
 
     virtual sp<BasicClient>  getClientByRemote(const wp<IBinder>& cameraClient);
 
     Mutex               mServiceLock;
-    wp<Client>          mClient[MAX_CAMERAS];  // protected by mServiceLock
+    // either a Client or CameraDeviceClient
+    wp<BasicClient>     mClient[MAX_CAMERAS];  // protected by mServiceLock
     Mutex               mClientLock[MAX_CAMERAS]; // prevent Client destruction inside callbacks
     int                 mNumberOfCameras;
 
@@ -311,7 +343,7 @@
     Vector<weak_pro_client_ptr> mProClientList[MAX_CAMERAS];
 
     // needs to be called with mServiceLock held
-    sp<Client>          findClientUnsafe(const wp<IBinder>& cameraClient, int& outIndex);
+    sp<BasicClient>     findClientUnsafe(const wp<IBinder>& cameraClient, int& outIndex);
     sp<ProClient>       findProClientUnsafe(
                                      const wp<IBinder>& cameraCallbacksRemote);
 
@@ -352,7 +384,6 @@
     virtual void        binderDied(const wp<IBinder> &who);
 
     // Helpers
-    int                 getDeviceVersion(int cameraId, int* facing);
 
     bool                isValidCameraId(int cameraId);
 };
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
similarity index 95%
rename from services/camera/libcameraservice/Camera2Client.cpp
rename to services/camera/libcameraservice/api1/Camera2Client.cpp
index 6942006..46aa60c 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -23,13 +23,15 @@
 
 #include <cutils/properties.h>
 #include <gui/Surface.h>
-#include "camera2/Parameters.h"
-#include "Camera2Client.h"
-#include "Camera2Device.h"
-#include "Camera3Device.h"
 
-#include "camera2/ZslProcessor.h"
-#include "camera2/ZslProcessor3.h"
+#include "api1/Camera2Client.h"
+
+#include "api1/client2/StreamingProcessor.h"
+#include "api1/client2/JpegProcessor.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/CallbackProcessor.h"
+#include "api1/client2/ZslProcessor.h"
+#include "api1/client2/ZslProcessor3.h"
 
 #define ALOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
 #define ALOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
@@ -58,22 +60,6 @@
         mDeviceVersion(deviceVersion)
 {
     ATRACE_CALL();
-    ALOGI("Camera %d: Opened", cameraId);
-
-    switch (mDeviceVersion) {
-        case CAMERA_DEVICE_API_VERSION_2_0:
-            mDevice = new Camera2Device(cameraId);
-            break;
-        case CAMERA_DEVICE_API_VERSION_3_0:
-            mDevice = new Camera3Device(cameraId);
-            break;
-        default:
-            ALOGE("Camera %d: Unknown HAL device version %d",
-                    cameraId, mDeviceVersion);
-            mDevice = NULL;
-            break;
-    }
-
 
     SharedParameters::Lock l(mParameters);
     l.mParameters.state = Parameters::DISCONNECTED;
@@ -632,6 +618,19 @@
         params.previewCallbackOneShot = true;
     }
     if (params.previewCallbackFlags != (uint32_t)flag) {
+
+        if (flag != CAMERA_FRAME_CALLBACK_FLAG_NOOP) {
+            // Disable any existing preview callback window when enabling
+            // preview callback flags
+            res = mCallbackProcessor->setCallbackWindow(NULL);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to clear preview callback surface:"
+                        " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
+                return;
+            }
+            params.previewCallbackSurface = false;
+        }
+
         params.previewCallbackFlags = flag;
 
         if (params.state == Parameters::PREVIEW) {
@@ -643,9 +642,61 @@
             }
         }
     }
-
 }
 
+status_t Camera2Client::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer) {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    sp<ANativeWindow> window;
+    if (callbackProducer != 0) {
+        window = new Surface(callbackProducer);
+    }
+
+    res = mCallbackProcessor->setCallbackWindow(window);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set preview callback surface: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    SharedParameters::Lock l(mParameters);
+
+    if (window != NULL) {
+        // Disable traditional callbacks when a valid callback target is given
+        l.mParameters.previewCallbackFlags = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
+        l.mParameters.previewCallbackOneShot = false;
+        l.mParameters.previewCallbackSurface = true;
+    } else {
+        // Disable callback target if given a NULL interface.
+        l.mParameters.previewCallbackSurface = false;
+    }
+
+    switch(l.mParameters.state) {
+        case Parameters::PREVIEW:
+            res = startPreviewL(l.mParameters, true);
+            break;
+        case Parameters::RECORD:
+        case Parameters::VIDEO_SNAPSHOT:
+            res = startRecordingL(l.mParameters, true);
+            break;
+        default:
+            break;
+    }
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+                __FUNCTION__, mCameraId,
+                Parameters::getStateName(l.mParameters.state));
+    }
+
+    return OK;
+}
+
+
 status_t Camera2Client::startPreview() {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
@@ -708,8 +759,10 @@
     }
 
     Vector<uint8_t> outputStreams;
-    bool callbacksEnabled = params.previewCallbackFlags &
-        CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
+    bool callbacksEnabled = (params.previewCallbackFlags &
+            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
+            params.previewCallbackSurface;
+
     if (callbacksEnabled) {
         // Can't have recording stream hanging around when enabling callbacks,
         // since it exceeds the max stream count on some devices.
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
similarity index 90%
rename from services/camera/libcameraservice/Camera2Client.h
rename to services/camera/libcameraservice/api1/Camera2Client.h
index 8ab46b1..ed448f3 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -17,19 +17,29 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H
 #define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_H
 
-#include "CameraDeviceBase.h"
 #include "CameraService.h"
-#include "camera2/Parameters.h"
-#include "camera2/FrameProcessor.h"
-#include "camera2/StreamingProcessor.h"
-#include "camera2/JpegProcessor.h"
-#include "camera2/ZslProcessorInterface.h"
-#include "camera2/CaptureSequencer.h"
-#include "camera2/CallbackProcessor.h"
-#include "Camera2ClientBase.h"
+#include "common/CameraDeviceBase.h"
+#include "common/Camera2ClientBase.h"
+#include "api1/client2/Parameters.h"
+#include "api1/client2/FrameProcessor.h"
+//#include "api1/client2/StreamingProcessor.h"
+//#include "api1/client2/JpegProcessor.h"
+//#include "api1/client2/ZslProcessorInterface.h"
+//#include "api1/client2/CaptureSequencer.h"
+//#include "api1/client2/CallbackProcessor.h"
 
 namespace android {
 
+namespace camera2 {
+
+class StreamingProcessor;
+class JpegProcessor;
+class ZslProcessorInterface;
+class CaptureSequencer;
+class CallbackProcessor;
+
+}
+
 class IMemory;
 /**
  * Interface between android.hardware.Camera API and Camera HAL device for versions
@@ -51,6 +61,9 @@
     virtual status_t        setPreviewTexture(
         const sp<IGraphicBufferProducer>& bufferProducer);
     virtual void            setPreviewCallbackFlag(int flag);
+    virtual status_t        setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer);
+
     virtual status_t        startPreview();
     virtual void            stopPreview();
     virtual bool            previewEnabled();
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
similarity index 98%
rename from services/camera/libcameraservice/CameraClient.cpp
rename to services/camera/libcameraservice/api1/CameraClient.cpp
index e577fa3..ad8856b 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -20,8 +20,8 @@
 #include <cutils/properties.h>
 #include <gui/Surface.h>
 
-#include "CameraClient.h"
-#include "CameraHardwareInterface.h"
+#include "api1/CameraClient.h"
+#include "device1/CameraHardwareInterface.h"
 #include "CameraService.h"
 
 namespace android {
@@ -347,6 +347,13 @@
     }
 }
 
+status_t CameraClient::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer) {
+    (void)callbackProducer;
+    ALOGE("%s: Unimplemented!", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
 // start preview mode
 status_t CameraClient::startPreview() {
     LOG1("startPreview (pid %d)", getCallingPid());
diff --git a/services/camera/libcameraservice/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
similarity index 98%
rename from services/camera/libcameraservice/CameraClient.h
rename to services/camera/libcameraservice/api1/CameraClient.h
index 7f0cb29..abde75a 100644
--- a/services/camera/libcameraservice/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -40,6 +40,8 @@
     virtual status_t        setPreviewDisplay(const sp<Surface>& surface);
     virtual status_t        setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer);
     virtual void            setPreviewCallbackFlag(int flag);
+    virtual status_t        setPreviewCallbackTarget(
+            const sp<IGraphicBufferProducer>& callbackProducer);
     virtual status_t        startPreview();
     virtual void            stopPreview();
     virtual bool            previewEnabled();
diff --git a/services/camera/libcameraservice/camera2/BurstCapture.cpp b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
similarity index 97%
rename from services/camera/libcameraservice/camera2/BurstCapture.cpp
rename to services/camera/libcameraservice/api1/client2/BurstCapture.cpp
index 192d419..0bfdfd4 100644
--- a/services/camera/libcameraservice/camera2/BurstCapture.cpp
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
@@ -22,8 +22,8 @@
 
 #include "BurstCapture.h"
 
-#include "../Camera2Client.h"
-#include "JpegCompressor.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/JpegCompressor.h"
 
 namespace android {
 namespace camera2 {
diff --git a/services/camera/libcameraservice/camera2/BurstCapture.h b/services/camera/libcameraservice/api1/client2/BurstCapture.h
similarity index 96%
rename from services/camera/libcameraservice/camera2/BurstCapture.h
rename to services/camera/libcameraservice/api1/client2/BurstCapture.h
index a2cc893..ea321fd 100644
--- a/services/camera/libcameraservice/camera2/BurstCapture.h
+++ b/services/camera/libcameraservice/api1/client2/BurstCapture.h
@@ -17,11 +17,12 @@
 #ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
 #define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
 
-#include "camera/CameraMetadata.h"
+#include <camera/CameraMetadata.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <gui/CpuConsumer.h>
-#include "Camera2Device.h"
+
+#include "device2/Camera2Device.h"
 
 namespace android {
 
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
similarity index 90%
rename from services/camera/libcameraservice/camera2/CallbackProcessor.cpp
rename to services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 522f49a..9d8c4a1 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -20,11 +20,11 @@
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
-
-#include "CallbackProcessor.h"
 #include <gui/Surface.h>
-#include "../CameraDeviceBase.h"
-#include "../Camera2Client.h"
+
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CallbackProcessor.h"
 
 #define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) )
 
@@ -37,6 +37,7 @@
         mDevice(client->getCameraDevice()),
         mId(client->getCameraId()),
         mCallbackAvailable(false),
+        mCallbackToApp(false),
         mCallbackStreamId(NO_STREAM) {
 }
 
@@ -53,6 +54,35 @@
     }
 }
 
+status_t CallbackProcessor::setCallbackWindow(
+        sp<ANativeWindow> callbackWindow) {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return OK;
+    sp<CameraDeviceBase> device = client->getCameraDevice();
+
+    // If the window is changing, clear out stream if it already exists
+    if (mCallbackWindow != callbackWindow && mCallbackStreamId != NO_STREAM) {
+        res = device->deleteStream(mCallbackStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete old stream "
+                    "for callbacks: %s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+        mCallbackStreamId = NO_STREAM;
+        mCallbackConsumer.clear();
+    }
+    mCallbackWindow = callbackWindow;
+    mCallbackToApp = (mCallbackWindow != NULL);
+
+    return OK;
+}
+
 status_t CallbackProcessor::updateStream(const Parameters &params) {
     ATRACE_CALL();
     status_t res;
@@ -67,21 +97,24 @@
 
     // If possible, use the flexible YUV format
     int32_t callbackFormat = params.previewFormat;
-    if (params.fastInfo.useFlexibleYuv &&
+    if (mCallbackToApp) {
+        // TODO: etalvala: This should use the flexible YUV format as well, but
+        // need to reconcile HAL2/HAL3 requirements.
+        callbackFormat = HAL_PIXEL_FORMAT_YV12;
+    } else if(params.fastInfo.useFlexibleYuv &&
             (params.previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
              params.previewFormat == HAL_PIXEL_FORMAT_YV12) ) {
         callbackFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
     }
 
-    if (mCallbackConsumer == 0) {
-        // Create CPU buffer queue endpoint. Make it async to avoid disconnect
-        // deadlocks.
-        mCallbackConsumer = new CpuConsumer(kCallbackHeapCount,
-                /*synchronized*/ false);
+    if (!mCallbackToApp && mCallbackConsumer == 0) {
+        // Create CPU buffer queue endpoint, since app hasn't given us one
+        // Make it async to avoid disconnect deadlocks
+        sp<BufferQueue> bq = new BufferQueue();
+        mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount);
         mCallbackConsumer->setFrameAvailableListener(this);
         mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
-        mCallbackWindow = new Surface(
-            mCallbackConsumer->getProducerInterface());
+        mCallbackWindow = new Surface(bq);
     }
 
     if (mCallbackStreamId != NO_STREAM) {
@@ -106,8 +139,8 @@
             res = device->deleteStream(mCallbackStreamId);
             if (res != OK) {
                 ALOGE("%s: Camera %d: Unable to delete old output stream "
-                        "for callbacks: %s (%d)", __FUNCTION__, mId,
-                        strerror(-res), res);
+                        "for callbacks: %s (%d)", __FUNCTION__,
+                        mId, strerror(-res), res);
                 return res;
             }
             mCallbackStreamId = NO_STREAM;
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
similarity index 88%
rename from services/camera/libcameraservice/camera2/CallbackProcessor.h
rename to services/camera/libcameraservice/api1/client2/CallbackProcessor.h
index d851a84..613f5be 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -23,9 +23,8 @@
 #include <utils/Mutex.h>
 #include <utils/Condition.h>
 #include <gui/CpuConsumer.h>
-#include "Parameters.h"
-#include "camera/CameraMetadata.h"
-#include "Camera2Heap.h"
+
+#include "api1/client2/Camera2Heap.h"
 
 namespace android {
 
@@ -34,6 +33,8 @@
 
 namespace camera2 {
 
+class Parameters;
+
 /***
  * Still image capture output image processing
  */
@@ -45,6 +46,8 @@
 
     void onFrameAvailable();
 
+    // Set to NULL to disable the direct-to-app callback window
+    status_t setCallbackWindow(sp<ANativeWindow> callbackWindow);
     status_t updateStream(const Parameters &params);
     status_t deleteStream();
     int getStreamId() const;
@@ -64,6 +67,9 @@
         NO_STREAM = -1
     };
 
+    // True if mCallbackWindow is a remote consumer, false if just the local
+    // mCallbackConsumer
+    bool mCallbackToApp;
     int mCallbackStreamId;
     static const size_t kCallbackHeapCount = 6;
     sp<CpuConsumer>    mCallbackConsumer;
diff --git a/services/camera/libcameraservice/camera2/Camera2Heap.h b/services/camera/libcameraservice/api1/client2/Camera2Heap.h
similarity index 100%
rename from services/camera/libcameraservice/camera2/Camera2Heap.h
rename to services/camera/libcameraservice/api1/client2/Camera2Heap.h
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
similarity index 98%
rename from services/camera/libcameraservice/camera2/CaptureSequencer.cpp
rename to services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index e5a011c..ad1590a 100644
--- a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -22,12 +22,11 @@
 #include <utils/Trace.h>
 #include <utils/Vector.h>
 
-#include "CaptureSequencer.h"
-#include "BurstCapture.h"
-#include "../Camera2Device.h"
-#include "../Camera2Client.h"
-#include "Parameters.h"
-#include "ZslProcessorInterface.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/BurstCapture.h"
+#include "api1/client2/Parameters.h"
+#include "api1/client2/ZslProcessorInterface.h"
 
 namespace android {
 namespace camera2 {
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
similarity index 100%
rename from services/camera/libcameraservice/camera2/CaptureSequencer.h
rename to services/camera/libcameraservice/api1/client2/CaptureSequencer.h
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
similarity index 97%
rename from services/camera/libcameraservice/camera2/FrameProcessor.cpp
rename to services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 114a7a8..c34cb12 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -21,16 +21,16 @@
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
-#include "FrameProcessor.h"
-#include "../CameraDeviceBase.h"
-#include "../Camera2Client.h"
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/FrameProcessor.h"
 
 namespace android {
 namespace camera2 {
 
 FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device,
                                wp<Camera2Client> client) :
-    ProFrameProcessor(device),
+    FrameProcessorBase(device),
     mClient(client),
     mLastFrameNumberOfFaces(0) {
 
@@ -58,7 +58,7 @@
         process3aState(frame, client);
     }
 
-    if (!ProFrameProcessor::processSingleFrame(frame, device)) {
+    if (!FrameProcessorBase::processSingleFrame(frame, device)) {
         return false;
     }
 
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
similarity index 96%
rename from services/camera/libcameraservice/camera2/FrameProcessor.h
rename to services/camera/libcameraservice/api1/client2/FrameProcessor.h
index f480c55..2a17d45 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -24,7 +24,7 @@
 #include <utils/List.h>
 #include <camera/CameraMetadata.h>
 
-#include "ProFrameProcessor.h"
+#include "common/FrameProcessorBase.h"
 
 struct camera_frame_metadata;
 
@@ -37,7 +37,7 @@
 /* Output frame metadata processing thread.  This thread waits for new
  * frames from the device, and analyzes them as necessary.
  */
-class FrameProcessor : public ProFrameProcessor {
+class FrameProcessor : public FrameProcessorBase {
   public:
     FrameProcessor(wp<CameraDeviceBase> device, wp<Camera2Client> client);
     ~FrameProcessor();
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.cpp b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
similarity index 98%
rename from services/camera/libcameraservice/camera2/JpegCompressor.cpp
rename to services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
index c9af71e..2f0c67d 100644
--- a/services/camera/libcameraservice/camera2/JpegCompressor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
@@ -210,7 +210,8 @@
     return true;
 }
 
-void JpegCompressor::jpegTermDestination(j_compress_ptr /*cinfo*/) {
+void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
+    (void) cinfo; // TODO: clean up
     ALOGV("%s", __FUNCTION__);
     ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
             __FUNCTION__, cinfo->dest->free_in_buffer);
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.h b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
similarity index 100%
rename from services/camera/libcameraservice/camera2/JpegCompressor.h
rename to services/camera/libcameraservice/api1/client2/JpegCompressor.h
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
similarity index 97%
rename from services/camera/libcameraservice/camera2/JpegProcessor.cpp
rename to services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index f0a13ca..77d5c8a 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -24,12 +24,13 @@
 #include <binder/MemoryHeapBase.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
-
-#include "JpegProcessor.h"
 #include <gui/Surface.h>
-#include "../CameraDeviceBase.h"
-#include "../Camera2Client.h"
 
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/Camera2Heap.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/JpegProcessor.h"
 
 namespace android {
 namespace camera2 {
@@ -82,11 +83,11 @@
 
     if (mCaptureConsumer == 0) {
         // Create CPU buffer queue endpoint
-        mCaptureConsumer = new CpuConsumer(1);
+        sp<BufferQueue> bq = new BufferQueue();
+        mCaptureConsumer = new CpuConsumer(bq, 1);
         mCaptureConsumer->setFrameAvailableListener(this);
         mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
-        mCaptureWindow = new Surface(
-            mCaptureConsumer->getProducerInterface());
+        mCaptureWindow = new Surface(bq);
         // Create memory for API consumption
         mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0,
                                        "Camera2Client::CaptureHeap");
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
similarity index 98%
rename from services/camera/libcameraservice/camera2/JpegProcessor.h
rename to services/camera/libcameraservice/api1/client2/JpegProcessor.h
index a38611c..b2c05df 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -23,7 +23,7 @@
 #include <utils/Mutex.h>
 #include <utils/Condition.h>
 #include <gui/CpuConsumer.h>
-#include "Parameters.h"
+
 #include "camera/CameraMetadata.h"
 
 namespace android {
@@ -35,6 +35,7 @@
 namespace camera2 {
 
 class CaptureSequencer;
+class Parameters;
 
 /***
  * Still image capture output image processing
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
similarity index 96%
rename from services/camera/libcameraservice/camera2/Parameters.cpp
rename to services/camera/libcameraservice/api1/client2/Parameters.cpp
index a248b76..0459866 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -292,8 +292,11 @@
             CameraParameters::WHITE_BALANCE_AUTO);
 
     camera_metadata_ro_entry_t availableWhiteBalanceModes =
-        staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
-    {
+        staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
+    if (!availableWhiteBalanceModes.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+                CameraParameters::WHITE_BALANCE_AUTO);
+    } else {
         String8 supportedWhiteBalance;
         bool addComma = false;
         for (size_t i=0; i < availableWhiteBalanceModes.count; i++) {
@@ -353,9 +356,11 @@
             CameraParameters::EFFECT_NONE);
 
     camera_metadata_ro_entry_t availableEffects =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS);
-    if (!availableEffects.count) return NO_INIT;
-    {
+        staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS, 0, 0, false);
+    if (!availableEffects.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
+                CameraParameters::EFFECT_NONE);
+    } else {
         String8 supportedEffects;
         bool addComma = false;
         for (size_t i=0; i < availableEffects.count; i++) {
@@ -413,9 +418,11 @@
             CameraParameters::ANTIBANDING_AUTO);
 
     camera_metadata_ro_entry_t availableAntibandingModes =
-        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES);
-    if (!availableAntibandingModes.count) return NO_INIT;
-    {
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, 0, 0, false);
+    if (!availableAntibandingModes.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+                CameraParameters::ANTIBANDING_OFF);
+    } else {
         String8 supportedAntibanding;
         bool addComma = false;
         for (size_t i=0; i < availableAntibandingModes.count; i++) {
@@ -455,9 +462,10 @@
             CameraParameters::SCENE_MODE_AUTO);
 
     camera_metadata_ro_entry_t availableSceneModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
-    if (!availableSceneModes.count) return NO_INIT;
-    {
+        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
+    if (!availableSceneModes.count) {
+        params.remove(CameraParameters::KEY_SCENE_MODE);
+    } else {
         String8 supportedSceneModes(CameraParameters::SCENE_MODE_AUTO);
         bool addComma = true;
         bool noSceneModes = false;
@@ -548,15 +556,17 @@
         }
     }
 
+    bool isFlashAvailable = false;
     camera_metadata_ro_entry_t flashAvailable =
-        staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 1, 1);
-    if (!flashAvailable.count) return NO_INIT;
+        staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 0, 1, false);
+    if (flashAvailable.count) {
+        isFlashAvailable = flashAvailable.data.u8[0];
+    }
 
     camera_metadata_ro_entry_t availableAeModes =
-        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES);
-    if (!availableAeModes.count) return NO_INIT;
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false);
 
-    if (flashAvailable.data.u8[0]) {
+    if (isFlashAvailable) {
         flashMode = Parameters::FLASH_MODE_OFF;
         params.set(CameraParameters::KEY_FLASH_MODE,
                 CameraParameters::FLASH_MODE_OFF);
@@ -585,14 +595,12 @@
     }
 
     camera_metadata_ro_entry_t minFocusDistance =
-        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 1, 1);
-    if (!minFocusDistance.count) return NO_INIT;
+        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 1, false);
 
     camera_metadata_ro_entry_t availableAfModes =
-        staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES);
-    if (!availableAfModes.count) return NO_INIT;
+        staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES, 0, 0, false);
 
-    if (minFocusDistance.data.f[0] == 0) {
+    if (!minFocusDistance.count || minFocusDistance.data.f[0] == 0) {
         // Fixed-focus lens
         focusMode = Parameters::FOCUS_MODE_FIXED;
         params.set(CameraParameters::KEY_FOCUS_MODE,
@@ -662,7 +670,7 @@
     focusingAreas.add(Parameters::Area(0,0,0,0,0));
 
     camera_metadata_ro_entry_t availableFocalLengths =
-        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
+        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
     if (!availableFocalLengths.count) return NO_INIT;
 
     float minFocalLength = availableFocalLengths.data.f[0];
@@ -768,8 +776,8 @@
             CameraParameters::FALSE);
 
     camera_metadata_ro_entry_t availableVideoStabilizationModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
-    if (!availableVideoStabilizationModes.count) return NO_INIT;
+        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+                false);
 
     if (availableVideoStabilizationModes.count > 1) {
         params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED,
@@ -794,9 +802,10 @@
 
     previewCallbackFlags = 0;
     previewCallbackOneShot = false;
+    previewCallbackSurface = false;
 
     camera_metadata_ro_entry_t supportedHardwareLevel =
-        staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
+        staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, 0, 0, false);
     if (!supportedHardwareLevel.count || (supportedHardwareLevel.data.u8[0] ==
             ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED)) {
         ALOGI("Camera %d: ZSL mode disabled for limited mode HALs", cameraId);
@@ -828,14 +837,23 @@
 status_t Parameters::buildFastInfo() {
 
     camera_metadata_ro_entry_t activeArraySize =
-        staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 2);
+        staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4);
     if (!activeArraySize.count) return NO_INIT;
-    int32_t arrayWidth = activeArraySize.data.i32[0];
-    int32_t arrayHeight = activeArraySize.data.i32[1];
+    int32_t arrayWidth;
+    int32_t arrayHeight;
+    if (activeArraySize.count == 2) {
+        ALOGW("%s: Camera %d: activeArraySize is missing xmin/ymin!",
+                __FUNCTION__, cameraId);
+        arrayWidth = activeArraySize.data.i32[0];
+        arrayHeight = activeArraySize.data.i32[1];
+    } else if (activeArraySize.count == 4) {
+        arrayWidth = activeArraySize.data.i32[2];
+        arrayHeight = activeArraySize.data.i32[3];
+    } else return NO_INIT;
 
     camera_metadata_ro_entry_t availableFaceDetectModes =
-        staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES);
-    if (!availableFaceDetectModes.count) return NO_INIT;
+        staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, 0, 0,
+                false);
 
     uint8_t bestFaceDetectMode =
         ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
@@ -862,19 +880,21 @@
         }
     }
 
+    int32_t maxFaces = 0;
     camera_metadata_ro_entry_t maxFacesDetected =
-        staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 1, 1);
-    if (!maxFacesDetected.count) return NO_INIT;
-
-    int32_t maxFaces = maxFacesDetected.data.i32[0];
+        staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 0, 1, false);
+    if (maxFacesDetected.count) {
+        maxFaces = maxFacesDetected.data.i32[0];
+    }
 
     camera_metadata_ro_entry_t availableSceneModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
     camera_metadata_ro_entry_t sceneModeOverrides =
-        staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES);
+        staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, 0, 0, false);
     camera_metadata_ro_entry_t minFocusDistance =
-        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
-    bool fixedLens = (minFocusDistance.data.f[0] == 0);
+        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0, false);
+    bool fixedLens = minFocusDistance.count == 0 ||
+        minFocusDistance.data.f[0] == 0;
 
     camera_metadata_ro_entry_t availableFocalLengths =
         staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
@@ -1465,7 +1485,7 @@
     }
     if (validatedParams.wbMode != wbMode) {
         camera_metadata_ro_entry_t availableWbModes =
-            staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
+            staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
         for (i = 0; i < availableWbModes.count; i++) {
             if (validatedParams.wbMode == availableWbModes.data.u8[i]) break;
         }
@@ -1496,8 +1516,9 @@
         validatedParams.currentAfTriggerId = -1;
         if (validatedParams.focusMode != Parameters::FOCUS_MODE_FIXED) {
             camera_metadata_ro_entry_t minFocusDistance =
-                staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
-            if (minFocusDistance.data.f[0] == 0) {
+                staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0,
+                        false);
+            if (minFocusDistance.count && minFocusDistance.data.f[0] == 0) {
                 ALOGE("%s: Requested focus mode \"%s\" is not available: "
                         "fixed focus lens",
                         __FUNCTION__,
@@ -1617,7 +1638,8 @@
     validatedParams.videoStabilization = boolFromString(
         newParams.get(CameraParameters::KEY_VIDEO_STABILIZATION) );
     camera_metadata_ro_entry_t availableVideoStabilizationModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
+        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+                false);
     if (validatedParams.videoStabilization &&
             availableVideoStabilizationModes.count == 1) {
         ALOGE("%s: Video stabilization not supported", __FUNCTION__);
@@ -2544,10 +2566,6 @@
             staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
     if (!sensorSize.count) return NO_INIT;
 
-    camera_metadata_ro_entry_t availableFocalLengths =
-            staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
-    if (!availableFocalLengths.count) return NO_INIT;
-
     float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
             fastInfo.arrayHeight;
     float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
similarity index 99%
rename from services/camera/libcameraservice/camera2/Parameters.h
rename to services/camera/libcameraservice/api1/client2/Parameters.h
index be05b54..464830c 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -142,6 +142,7 @@
 
     uint32_t previewCallbackFlags;
     bool previewCallbackOneShot;
+    bool previewCallbackSurface;
 
     bool zslMode;
 
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
similarity index 98%
rename from services/camera/libcameraservice/camera2/StreamingProcessor.cpp
rename to services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index f7a6be7..dfe8580 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -30,10 +30,10 @@
 #include <gui/Surface.h>
 #include <media/hardware/MetadataBufferType.h>
 
-#include "StreamingProcessor.h"
-#include "Camera2Heap.h"
-#include "../Camera2Client.h"
-#include "../CameraDeviceBase.h"
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/StreamingProcessor.h"
+#include "api1/client2/Camera2Heap.h"
 
 namespace android {
 namespace camera2 {
@@ -319,14 +319,13 @@
         // Create CPU buffer queue endpoint. We need one more buffer here so that we can
         // always acquire and free a buffer when the heap is full; otherwise the consumer
         // will have buffers in flight we'll never clear out.
-        mRecordingConsumer = new BufferItemConsumer(
+        sp<BufferQueue> bq = new BufferQueue();
+        mRecordingConsumer = new BufferItemConsumer(bq,
                 GRALLOC_USAGE_HW_VIDEO_ENCODER,
-                mRecordingHeapCount + 1,
-                true);
+                mRecordingHeapCount + 1);
         mRecordingConsumer->setFrameAvailableListener(this);
         mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
-        mRecordingWindow = new Surface(
-            mRecordingConsumer->getProducerInterface());
+        mRecordingWindow = new Surface(bq);
         newConsumer = true;
         // Allocate memory later, since we don't know buffer size until receipt
     }
@@ -617,7 +616,7 @@
     if (client == 0) {
         // Discard frames during shutdown
         BufferItemConsumer::BufferItem imgBuffer;
-        res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
         if (res != OK) {
             if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
                 ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
@@ -635,7 +634,7 @@
         SharedParameters::Lock l(client->getParameters());
         Mutex::Autolock m(mMutex);
         BufferItemConsumer::BufferItem imgBuffer;
-        res = mRecordingConsumer->acquireBuffer(&imgBuffer);
+        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
         if (res != OK) {
             if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
                 ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
similarity index 99%
rename from services/camera/libcameraservice/camera2/StreamingProcessor.h
rename to services/camera/libcameraservice/api1/client2/StreamingProcessor.h
index 3ec2df7..d879b83 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -21,7 +21,6 @@
 #include <utils/String16.h>
 #include <gui/BufferItemConsumer.h>
 
-#include "Parameters.h"
 #include "camera/CameraMetadata.h"
 
 namespace android {
@@ -32,6 +31,7 @@
 
 namespace camera2 {
 
+class Parameters;
 class Camera2Heap;
 
 /**
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
similarity index 97%
rename from services/camera/libcameraservice/camera2/ZslProcessor.cpp
rename to services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 94059cd..3b118f4 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -27,12 +27,12 @@
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
-
-#include "ZslProcessor.h"
 #include <gui/Surface.h>
-#include "../CameraDeviceBase.h"
-#include "../Camera2Client.h"
 
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/ZslProcessor.h"
 
 namespace android {
 namespace camera2 {
@@ -128,14 +128,13 @@
 
     if (mZslConsumer == 0) {
         // Create CPU buffer queue endpoint
-        mZslConsumer = new BufferItemConsumer(
+        sp<BufferQueue> bq = new BufferQueue();
+        mZslConsumer = new BufferItemConsumer(bq,
             GRALLOC_USAGE_HW_CAMERA_ZSL,
-            kZslBufferDepth,
-            true);
+            kZslBufferDepth);
         mZslConsumer->setFrameAvailableListener(this);
         mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
-        mZslWindow = new Surface(
-            mZslConsumer->getProducerInterface());
+        mZslWindow = new Surface(bq);
     }
 
     if (mZslStreamId != NO_STREAM) {
@@ -426,7 +425,7 @@
     }
     ALOGVV("Trying to get next buffer");
     BufferItemConsumer::BufferItem item;
-    res = zslConsumer->acquireBuffer(&item);
+    res = zslConsumer->acquireBuffer(&item, 0);
     if (res != OK) {
         if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
             ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
similarity index 94%
rename from services/camera/libcameraservice/camera2/ZslProcessor.h
rename to services/camera/libcameraservice/api1/client2/ZslProcessor.h
index 27b597e..5fb178f 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -23,12 +23,11 @@
 #include <utils/Mutex.h>
 #include <utils/Condition.h>
 #include <gui/BufferItemConsumer.h>
-#include "Parameters.h"
-#include "FrameProcessor.h"
-#include "camera/CameraMetadata.h"
-#include "Camera2Heap.h"
-#include "../CameraDeviceBase.h"
-#include "ZslProcessorInterface.h"
+#include <camera/CameraMetadata.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api1/client2/ZslProcessorInterface.h"
+#include "api1/client2/FrameProcessor.h"
 
 namespace android {
 
@@ -37,6 +36,7 @@
 namespace camera2 {
 
 class CaptureSequencer;
+class Parameters;
 
 /***
  * ZSL queue processing
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
similarity index 98%
rename from services/camera/libcameraservice/camera2/ZslProcessor3.cpp
rename to services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 40c77df..7c4da50 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -27,13 +27,13 @@
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
-
-#include "ZslProcessor3.h"
 #include <gui/Surface.h>
-#include "../CameraDeviceBase.h"
-#include "../Camera3Device.h"
-#include "../Camera2Client.h"
 
+#include "common/CameraDeviceBase.h"
+#include "api1/Camera2Client.h"
+#include "api1/client2/CaptureSequencer.h"
+#include "api1/client2/ZslProcessor3.h"
+#include "device3/Camera3Device.h"
 
 namespace android {
 namespace camera2 {
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
similarity index 93%
rename from services/camera/libcameraservice/camera2/ZslProcessor3.h
rename to services/camera/libcameraservice/api1/client2/ZslProcessor3.h
index cb98b99..35b85f5 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor3.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
@@ -23,13 +23,11 @@
 #include <utils/Mutex.h>
 #include <utils/Condition.h>
 #include <gui/BufferItemConsumer.h>
-#include "Parameters.h"
-#include "FrameProcessor.h"
-#include "camera/CameraMetadata.h"
-#include "Camera2Heap.h"
-#include "../CameraDeviceBase.h"
-#include "ZslProcessorInterface.h"
-#include "../camera3/Camera3ZslStream.h"
+#include <camera/CameraMetadata.h>
+
+#include "api1/client2/FrameProcessor.h"
+#include "api1/client2/ZslProcessorInterface.h"
+#include "device3/Camera3ZslStream.h"
 
 namespace android {
 
@@ -38,6 +36,7 @@
 namespace camera2 {
 
 class CaptureSequencer;
+class Parameters;
 
 /***
  * ZSL queue processing
diff --git a/services/camera/libcameraservice/camera2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
similarity index 100%
rename from services/camera/libcameraservice/camera2/ZslProcessorInterface.h
rename to services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
new file mode 100644
index 0000000..414316d
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraDeviceClient"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+// #define LOG_NDEBUG 0
+
+#include <cutils/properties.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <gui/Surface.h>
+#include <camera/camera2/CaptureRequest.h>
+
+#include "common/CameraDeviceBase.h"
+#include "api2/CameraDeviceClient.h"
+
+
+
+namespace android {
+using namespace camera2;
+
+CameraDeviceClientBase::CameraDeviceClientBase(
+        const sp<CameraService>& cameraService,
+        const sp<ICameraDeviceCallbacks>& remoteCallback,
+        const String16& clientPackageName,
+        int cameraId,
+        int cameraFacing,
+        int clientPid,
+        uid_t clientUid,
+        int servicePid) :
+    BasicClient(cameraService, remoteCallback->asBinder(), clientPackageName,
+                cameraId, cameraFacing, clientPid, clientUid, servicePid),
+    mRemoteCallback(remoteCallback) {
+}
+void CameraDeviceClientBase::notifyError() {
+    // Thread safe. Don't bother locking.
+    sp<ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
+
+    if (remoteCb != 0) {
+        remoteCb->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
+    }
+}
+
+// Interface used by CameraService
+
+CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
+                                   const sp<ICameraDeviceCallbacks>& remoteCallback,
+                                   const String16& clientPackageName,
+                                   int cameraId,
+                                   int cameraFacing,
+                                   int clientPid,
+                                   uid_t clientUid,
+                                   int servicePid) :
+    Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
+                cameraId, cameraFacing, clientPid, clientUid, servicePid),
+    mRequestIdCounter(0) {
+
+    ATRACE_CALL();
+    ALOGI("CameraDeviceClient %d: Opened", cameraId);
+}
+
+status_t CameraDeviceClient::initialize(camera_module_t *module)
+{
+    ATRACE_CALL();
+    status_t res;
+
+    res = Camera2ClientBase::initialize(module);
+    if (res != OK) {
+        return res;
+    }
+
+    String8 threadName;
+    mFrameProcessor = new FrameProcessorBase(mDevice);
+    threadName = String8::format("CDU-%d-FrameProc", mCameraId);
+    mFrameProcessor->run(threadName.string());
+
+    mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
+                                      FRAME_PROCESSOR_LISTENER_MAX_ID,
+                                      /*listener*/this);
+
+    return OK;
+}
+
+CameraDeviceClient::~CameraDeviceClient() {
+}
+
+status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request,
+                                         bool streaming) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    status_t res;
+
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    if (request == 0) {
+        ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
+              __FUNCTION__, mCameraId);
+        return BAD_VALUE;
+    }
+
+    CameraMetadata metadata(request->mMetadata);
+
+    if (metadata.isEmpty()) {
+        ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
+               __FUNCTION__, mCameraId);
+        return BAD_VALUE;
+    } else if (request->mSurfaceList.size() == 0) {
+        ALOGE("%s: Camera %d: Requests must have at least one surface target. "
+              "Rejecting request.", __FUNCTION__, mCameraId);
+        return BAD_VALUE;
+    }
+
+    if (!enforceRequestPermissions(metadata)) {
+        // Callee logs
+        return PERMISSION_DENIED;
+    }
+
+    /**
+     * Write in the output stream IDs which we calculate from
+     * the capture request's list of surface targets
+     */
+    Vector<uint8_t> outputStreamIds;
+    outputStreamIds.setCapacity(request->mSurfaceList.size());
+    for (size_t i = 0; i < request->mSurfaceList.size(); ++i) {
+        sp<Surface> surface = request->mSurfaceList[i];
+
+        if (surface == 0) continue;
+
+        sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
+        int idx = mStreamMap.indexOfKey(gbp->asBinder());
+
+        // Trying to submit request with surface that wasn't created
+        if (idx == NAME_NOT_FOUND) {
+            ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
+                  " we have not called createStream on",
+                  __FUNCTION__, mCameraId);
+            return BAD_VALUE;
+        }
+
+        int streamId = mStreamMap.valueAt(idx);
+        outputStreamIds.push_back(streamId);
+        ALOGV("%s: Camera %d: Appending output stream %d to request",
+              __FUNCTION__, mCameraId, streamId);
+    }
+
+    metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
+                    outputStreamIds.size());
+
+    // TODO: @hide ANDROID_REQUEST_ID, or use another request token
+    int32_t requestId = mRequestIdCounter++;
+    metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
+    ALOGV("%s: Camera %d: Submitting request with ID %d",
+          __FUNCTION__, mCameraId, requestId);
+
+    if (streaming) {
+        res = mDevice->setStreamingRequest(metadata);
+        if (res != OK) {
+            ALOGE("%s: Camera %d:  Got error %d after trying to set streaming "
+                  "request", __FUNCTION__, mCameraId, res);
+        } else {
+            mStreamingRequestList.push_back(requestId);
+        }
+    } else {
+        res = mDevice->capture(metadata);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Got error %d after trying to set capture",
+                  __FUNCTION__, mCameraId, res);
+        }
+    }
+
+    ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
+    if (res == OK) {
+        return requestId;
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::cancelRequest(int requestId) {
+    ATRACE_CALL();
+    ALOGV("%s, requestId = %d", __FUNCTION__, requestId);
+
+    status_t res;
+
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    Vector<int>::iterator it, end;
+    for (it = mStreamingRequestList.begin(), end = mStreamingRequestList.end();
+         it != end; ++it) {
+        if (*it == requestId) {
+            break;
+        }
+    }
+
+    if (it == end) {
+        ALOGE("%s: Camera%d: Did not find request id %d in list of streaming "
+              "requests", __FUNCTION__, mCameraId, requestId);
+        return BAD_VALUE;
+    }
+
+    res = mDevice->clearStreamingRequest();
+
+    if (res == OK) {
+        ALOGV("%s: Camera %d: Successfully cleared streaming request",
+              __FUNCTION__, mCameraId);
+        mStreamingRequestList.erase(it);
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::deleteStream(int streamId) {
+    ATRACE_CALL();
+    ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    // Guard against trying to delete non-created streams
+    ssize_t index = NAME_NOT_FOUND;
+    for (size_t i = 0; i < mStreamMap.size(); ++i) {
+        if (streamId == mStreamMap.valueAt(i)) {
+            index = i;
+            break;
+        }
+    }
+
+    if (index == NAME_NOT_FOUND) {
+        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+              "created yet", __FUNCTION__, mCameraId, streamId);
+        return BAD_VALUE;
+    }
+
+    // Also returns BAD_VALUE if stream ID was not valid
+    res = mDevice->deleteStream(streamId);
+
+    if (res == BAD_VALUE) {
+        ALOGE("%s: Camera %d: Unexpected BAD_VALUE when deleting stream, but we"
+              " already checked and the stream ID (%d) should be valid.",
+              __FUNCTION__, mCameraId, streamId);
+    } else if (res == OK) {
+        mStreamMap.removeItemsAt(index);
+
+        ALOGV("%s: Camera %d: Successfully deleted stream ID (%d)",
+              __FUNCTION__, mCameraId, streamId);
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::createStream(int width, int height, int format,
+                      const sp<IGraphicBufferProducer>& bufferProducer)
+{
+    ATRACE_CALL();
+    ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    // Don't create multiple streams for the same target surface
+    {
+        ssize_t index = mStreamMap.indexOfKey(bufferProducer->asBinder());
+        if (index != NAME_NOT_FOUND) {
+            ALOGW("%s: Camera %d: Buffer producer already has a stream for it "
+                  "(ID %d)",
+                  __FUNCTION__, mCameraId, index);
+            return ALREADY_EXISTS;
+        }
+    }
+
+    sp<IBinder> binder;
+    sp<ANativeWindow> anw;
+    if (bufferProducer != 0) {
+        binder = bufferProducer->asBinder();
+        anw = new Surface(bufferProducer);
+    }
+
+    // TODO: remove w,h,f since we are ignoring them
+
+    if ((res = anw->query(anw.get(), NATIVE_WINDOW_WIDTH, &width)) != OK) {
+        ALOGE("%s: Camera %d: Failed to query Surface width", __FUNCTION__,
+              mCameraId);
+        return res;
+    }
+    if ((res = anw->query(anw.get(), NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+        ALOGE("%s: Camera %d: Failed to query Surface height", __FUNCTION__,
+              mCameraId);
+        return res;
+    }
+    if ((res = anw->query(anw.get(), NATIVE_WINDOW_FORMAT, &format)) != OK) {
+        ALOGE("%s: Camera %d: Failed to query Surface format", __FUNCTION__,
+              mCameraId);
+        return res;
+    }
+
+    // FIXME: remove this override since the default format should be
+    //       IMPLEMENTATION_DEFINED. b/9487482
+    if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+        format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+        ALOGW("%s: Camera %d: Overriding format 0x%x to IMPLEMENTATION_DEFINED",
+              __FUNCTION__, mCameraId, format);
+        format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    }
+
+    // TODO: add startConfigure/stopConfigure call to CameraDeviceBase
+    // this will make it so Camera3Device doesn't call configure_streams
+    // after each call, but only once we are done with all.
+
+    int streamId = -1;
+    if (format == HAL_PIXEL_FORMAT_BLOB) {
+        // JPEG buffers need to be sized for maximum possible compressed size
+        CameraMetadata staticInfo = mDevice->info();
+        camera_metadata_entry_t entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
+        if (entry.count == 0) {
+            ALOGE("%s: Camera %d: Can't find maximum JPEG size in "
+                    "static metadata!", __FUNCTION__, mCameraId);
+            return INVALID_OPERATION;
+        }
+        int32_t maxJpegSize = entry.data.i32[0];
+        res = mDevice->createStream(anw, width, height, format, maxJpegSize,
+                &streamId);
+    } else {
+        // All other streams are a known size
+        res = mDevice->createStream(anw, width, height, format, /*size*/0,
+                &streamId);
+    }
+
+    if (res == OK) {
+        mStreamMap.add(bufferProducer->asBinder(), streamId);
+
+        ALOGV("%s: Camera %d: Successfully created a new stream ID %d",
+              __FUNCTION__, mCameraId, streamId);
+        return streamId;
+    }
+
+    return res;
+}
+
+// Create a request object from a template.
+status_t CameraDeviceClient::createDefaultRequest(int templateId,
+                                                  /*out*/
+                                                  CameraMetadata* request)
+{
+    ATRACE_CALL();
+    ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId);
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    CameraMetadata metadata;
+    if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK &&
+        request != NULL) {
+
+        request->swap(metadata);
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::getCameraInfo(/*out*/CameraMetadata* info)
+{
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    status_t res = OK;
+
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    if (info != NULL) {
+        *info = mDevice->info(); // static camera metadata
+        // TODO: merge with device-specific camera metadata
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::waitUntilIdle()
+{
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    status_t res = OK;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    // FIXME: Also need check repeating burst.
+    if (!mStreamingRequestList.isEmpty()) {
+        ALOGE("%s: Camera %d: Try to waitUntilIdle when there are active streaming requests",
+              __FUNCTION__, mCameraId);
+        return INVALID_OPERATION;
+    }
+    res = mDevice->waitUntilDrained();
+    ALOGV("%s Done", __FUNCTION__);
+
+    return res;
+}
+
+status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
+    String8 result;
+    result.appendFormat("CameraDeviceClient[%d] (%p) PID: %d, dump:\n",
+            mCameraId,
+            getRemoteCallback()->asBinder().get(),
+            mClientPid);
+    result.append("  State: ");
+
+    // TODO: print dynamic/request section from most recent requests
+    mFrameProcessor->dump(fd, args);
+
+    return dumpDevice(fd, args);
+}
+
+// TODO: refactor the code below this with IProCameraUser.
+// it's 100% copy-pasted, so lets not change it right now to make it easier.
+
+void CameraDeviceClient::detachDevice() {
+    if (mDevice == 0) return;
+
+    ALOGV("Camera %d: Stopping processors", mCameraId);
+
+    mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
+                                    FRAME_PROCESSOR_LISTENER_MAX_ID,
+                                    /*listener*/this);
+    mFrameProcessor->requestExit();
+    ALOGV("Camera %d: Waiting for threads", mCameraId);
+    mFrameProcessor->join();
+    ALOGV("Camera %d: Disconnecting device", mCameraId);
+
+    // WORKAROUND: HAL refuses to disconnect while there's streams in flight
+    {
+        mDevice->clearStreamingRequest();
+
+        status_t code;
+        if ((code = mDevice->waitUntilDrained()) != OK) {
+            ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
+                  code);
+        }
+    }
+
+    Camera2ClientBase::detachDevice();
+}
+
+/** Device-related methods */
+void CameraDeviceClient::onFrameAvailable(int32_t frameId,
+                                        const CameraMetadata& frame) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+
+    if (mRemoteCallback != NULL) {
+        ALOGV("%s: frame = %p ", __FUNCTION__, &frame);
+        mRemoteCallback->onResultReceived(frameId, frame);
+    }
+
+}
+
+// TODO: move to Camera2ClientBase
+bool CameraDeviceClient::enforceRequestPermissions(CameraMetadata& metadata) {
+
+    const int pid = IPCThreadState::self()->getCallingPid();
+    const int selfPid = getpid();
+    camera_metadata_entry_t entry;
+
+    /**
+     * Mixin default important security values
+     * - android.led.transmit = defaulted ON
+     */
+    CameraMetadata staticInfo = mDevice->info();
+    entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS);
+    for(size_t i = 0; i < entry.count; ++i) {
+        uint8_t led = entry.data.u8[i];
+
+        switch(led) {
+            case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
+                uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
+                if (!metadata.exists(ANDROID_LED_TRANSMIT)) {
+                    metadata.update(ANDROID_LED_TRANSMIT,
+                                    &transmitDefault, 1);
+                }
+                break;
+            }
+        }
+    }
+
+    // We can do anything!
+    if (pid == selfPid) {
+        return true;
+    }
+
+    /**
+     * Permission check special fields in the request
+     * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT
+     */
+    entry = metadata.find(ANDROID_LED_TRANSMIT);
+    if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) {
+        String16 permissionString =
+            String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED");
+        if (!checkCallingPermission(permissionString)) {
+            const int uid = IPCThreadState::self()->getCallingUid();
+            ALOGE("Permission Denial: "
+                  "can't disable transmit LED pid=%d, uid=%d", pid, uid);
+            return false;
+        }
+    }
+
+    return true;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
new file mode 100644
index 0000000..21d633c
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
+#define ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
+
+#include <camera/camera2/ICameraDeviceUser.h>
+#include <camera/camera2/ICameraDeviceCallbacks.h>
+
+#include "CameraService.h"
+#include "common/FrameProcessorBase.h"
+#include "common/Camera2ClientBase.h"
+
+namespace android {
+
+struct CameraDeviceClientBase :
+        public CameraService::BasicClient, public BnCameraDeviceUser
+{
+    typedef ICameraDeviceCallbacks TCamCallbacks;
+
+    const sp<ICameraDeviceCallbacks>& getRemoteCallback() {
+        return mRemoteCallback;
+    }
+
+protected:
+    CameraDeviceClientBase(const sp<CameraService>& cameraService,
+            const sp<ICameraDeviceCallbacks>& remoteCallback,
+            const String16& clientPackageName,
+            int cameraId,
+            int cameraFacing,
+            int clientPid,
+            uid_t clientUid,
+            int servicePid);
+
+    virtual void notifyError();
+
+    sp<ICameraDeviceCallbacks> mRemoteCallback;
+};
+
+/**
+ * Implements the binder ICameraDeviceUser API,
+ * meant for HAL3-public implementation of
+ * android.hardware.photography.CameraDevice
+ */
+class CameraDeviceClient :
+        public Camera2ClientBase<CameraDeviceClientBase>,
+        public camera2::FrameProcessorBase::FilteredListener
+{
+public:
+    /**
+     * ICameraDeviceUser interface (see ICameraDeviceUser for details)
+     */
+
+    // Note that the callee gets a copy of the metadata.
+    virtual int           submitRequest(sp<CaptureRequest> request,
+                                        bool streaming = false);
+    virtual status_t      cancelRequest(int requestId);
+
+    // Returns -EBUSY if device is not idle
+    virtual status_t      deleteStream(int streamId);
+
+    virtual status_t      createStream(
+            int width,
+            int height,
+            int format,
+            const sp<IGraphicBufferProducer>& bufferProducer);
+
+    // Create a request object from a template.
+    virtual status_t      createDefaultRequest(int templateId,
+                                               /*out*/
+                                               CameraMetadata* request);
+
+    // Get the static metadata for the camera
+    // -- Caller owns the newly allocated metadata
+    virtual status_t      getCameraInfo(/*out*/CameraMetadata* info);
+
+    // Wait until all the submitted requests have finished processing
+    virtual status_t      waitUntilIdle();
+    /**
+     * Interface used by CameraService
+     */
+
+    CameraDeviceClient(const sp<CameraService>& cameraService,
+            const sp<ICameraDeviceCallbacks>& remoteCallback,
+            const String16& clientPackageName,
+            int cameraId,
+            int cameraFacing,
+            int clientPid,
+            uid_t clientUid,
+            int servicePid);
+    virtual ~CameraDeviceClient();
+
+    virtual status_t      initialize(camera_module_t *module);
+
+    virtual status_t      dump(int fd, const Vector<String16>& args);
+
+    /**
+     * Interface used by independent components of CameraDeviceClient.
+     */
+protected:
+    /** FilteredListener implementation **/
+    virtual void          onFrameAvailable(int32_t frameId,
+                                           const CameraMetadata& frame);
+    virtual void          detachDevice();
+
+private:
+    /** ICameraDeviceUser interface-related private members */
+
+    /** Preview callback related members */
+    sp<camera2::FrameProcessorBase> mFrameProcessor;
+    static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
+    static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
+
+    /** Utility members */
+    bool enforceRequestPermissions(CameraMetadata& metadata);
+
+    // IGraphicsBufferProducer binder -> Stream ID
+    KeyedVector<sp<IBinder>, int> mStreamMap;
+
+    // Stream ID
+    Vector<int> mStreamingRequestList;
+
+    int32_t mRequestIdCounter;
+};
+
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
similarity index 98%
rename from services/camera/libcameraservice/ProCamera2Client.cpp
rename to services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
index 251fdab..2b583e5 100644
--- a/services/camera/libcameraservice/ProCamera2Client.cpp
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
@@ -24,10 +24,9 @@
 #include <cutils/properties.h>
 #include <gui/Surface.h>
 #include <gui/Surface.h>
-#include "camera2/Parameters.h"
-#include "ProCamera2Client.h"
-#include "camera2/ProFrameProcessor.h"
-#include "CameraDeviceBase.h"
+
+#include "api_pro/ProCamera2Client.h"
+#include "common/CameraDeviceBase.h"
 
 namespace android {
 using namespace camera2;
@@ -62,7 +61,7 @@
     }
 
     String8 threadName;
-    mFrameProcessor = new ProFrameProcessor(mDevice);
+    mFrameProcessor = new FrameProcessorBase(mDevice);
     threadName = String8::format("PC2-%d-FrameProc", mCameraId);
     mFrameProcessor->run(threadName.string());
 
@@ -218,6 +217,7 @@
 }
 
 status_t ProCamera2Client::cancelRequest(int requestId) {
+    (void)requestId;
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
diff --git a/services/camera/libcameraservice/ProCamera2Client.h b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
similarity index 94%
rename from services/camera/libcameraservice/ProCamera2Client.h
rename to services/camera/libcameraservice/api_pro/ProCamera2Client.h
index faee9f9..0bf6784 100644
--- a/services/camera/libcameraservice/ProCamera2Client.h
+++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
@@ -17,10 +17,10 @@
 #ifndef ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
 #define ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
 
-#include "Camera2Device.h"
 #include "CameraService.h"
-#include "camera2/ProFrameProcessor.h"
-#include "Camera2ClientBase.h"
+#include "common/FrameProcessorBase.h"
+#include "common/Camera2ClientBase.h"
+#include "device2/Camera2Device.h"
 
 namespace android {
 
@@ -31,7 +31,7 @@
  */
 class ProCamera2Client :
         public Camera2ClientBase<CameraService::ProClient>,
-        public camera2::ProFrameProcessor::FilteredListener
+        public camera2::FrameProcessorBase::FilteredListener
 {
 public:
     /**
@@ -105,7 +105,7 @@
     /** IProCameraUser interface-related private members */
 
     /** Preview callback related members */
-    sp<camera2::ProFrameProcessor> mFrameProcessor;
+    sp<camera2::FrameProcessorBase> mFrameProcessor;
     static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
     static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
 
diff --git a/services/camera/libcameraservice/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
similarity index 96%
rename from services/camera/libcameraservice/Camera2ClientBase.cpp
rename to services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 0623b89..060e2a2 100644
--- a/services/camera/libcameraservice/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -24,11 +24,12 @@
 #include <cutils/properties.h>
 #include <gui/Surface.h>
 #include <gui/Surface.h>
-#include "camera2/Parameters.h"
-#include "Camera2ClientBase.h"
-#include "camera2/ProFrameProcessor.h"
 
-#include "Camera2Device.h"
+#include "common/Camera2ClientBase.h"
+
+#include "api2/CameraDeviceClient.h"
+
+#include "CameraDeviceFactory.h"
 
 namespace android {
 using namespace camera2;
@@ -54,7 +55,9 @@
         mSharedCameraCallbacks(remoteCallback)
 {
     ALOGI("Camera %d: Opened", cameraId);
-    mDevice = new Camera2Device(cameraId);
+
+    mDevice = CameraDeviceFactory::createDevice(cameraId);
+    LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
 }
 
 template <typename TClientBase>
@@ -325,5 +328,6 @@
 
 template class Camera2ClientBase<CameraService::ProClient>;
 template class Camera2ClientBase<CameraService::Client>;
+template class Camera2ClientBase<CameraDeviceClientBase>;
 
 } // namespace android
diff --git a/services/camera/libcameraservice/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
similarity index 96%
rename from services/camera/libcameraservice/Camera2ClientBase.h
rename to services/camera/libcameraservice/common/Camera2ClientBase.h
index 9001efb..d23197c 100644
--- a/services/camera/libcameraservice/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -17,13 +17,14 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
 #define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
 
-#include "CameraDeviceBase.h"
-#include "CameraService.h"
+#include "common/CameraDeviceBase.h"
 
 namespace android {
 
 class IMemory;
 
+class CameraService;
+
 template <typename TClientBase>
 class Camera2ClientBase :
         public TClientBase,
@@ -101,6 +102,10 @@
 
 protected:
 
+    virtual sp<IBinder> asBinderWrapper() {
+        return IInterface::asBinder();
+    }
+
     virtual status_t      dumpDevice(int fd, const Vector<String16>& args);
 
     /** Binder client interface-related private members */
diff --git a/services/camera/libcameraservice/CameraDeviceBase.cpp b/services/camera/libcameraservice/common/CameraDeviceBase.cpp
similarity index 100%
rename from services/camera/libcameraservice/CameraDeviceBase.cpp
rename to services/camera/libcameraservice/common/CameraDeviceBase.cpp
diff --git a/services/camera/libcameraservice/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
similarity index 100%
rename from services/camera/libcameraservice/CameraDeviceBase.h
rename to services/camera/libcameraservice/common/CameraDeviceBase.h
diff --git a/services/camera/libcameraservice/camera2/ProFrameProcessor.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
similarity index 82%
rename from services/camera/libcameraservice/camera2/ProFrameProcessor.cpp
rename to services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 4012fc5..e7b440a 100644
--- a/services/camera/libcameraservice/camera2/ProFrameProcessor.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -14,29 +14,29 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "Camera2-ProFrameProcessor"
+#define LOG_TAG "Camera2-FrameProcessorBase"
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
-#include "ProFrameProcessor.h"
-#include "../CameraDeviceBase.h"
+#include "common/FrameProcessorBase.h"
+#include "common/CameraDeviceBase.h"
 
 namespace android {
 namespace camera2 {
 
-ProFrameProcessor::ProFrameProcessor(wp<CameraDeviceBase> device) :
+FrameProcessorBase::FrameProcessorBase(wp<CameraDeviceBase> device) :
     Thread(/*canCallJava*/false),
     mDevice(device) {
 }
 
-ProFrameProcessor::~ProFrameProcessor() {
+FrameProcessorBase::~FrameProcessorBase() {
     ALOGV("%s: Exit", __FUNCTION__);
 }
 
-status_t ProFrameProcessor::registerListener(int32_t minId,
+status_t FrameProcessorBase::registerListener(int32_t minId,
         int32_t maxId, wp<FilteredListener> listener) {
     Mutex::Autolock l(mInputMutex);
     ALOGV("%s: Registering listener for frame id range %d - %d",
@@ -46,7 +46,7 @@
     return OK;
 }
 
-status_t ProFrameProcessor::removeListener(int32_t minId,
+status_t FrameProcessorBase::removeListener(int32_t minId,
                                            int32_t maxId,
                                            wp<FilteredListener> listener) {
     Mutex::Autolock l(mInputMutex);
@@ -63,13 +63,13 @@
     return OK;
 }
 
-void ProFrameProcessor::dump(int fd, const Vector<String16>& /*args*/) {
+void FrameProcessorBase::dump(int fd, const Vector<String16>& /*args*/) {
     String8 result("    Latest received frame:\n");
     write(fd, result.string(), result.size());
     mLastFrame.dump(fd, 2, 6);
 }
 
-bool ProFrameProcessor::threadLoop() {
+bool FrameProcessorBase::threadLoop() {
     status_t res;
 
     sp<CameraDeviceBase> device;
@@ -82,14 +82,14 @@
     if (res == OK) {
         processNewFrames(device);
     } else if (res != TIMED_OUT) {
-        ALOGE("ProFrameProcessor: Error waiting for new "
+        ALOGE("FrameProcessorBase: Error waiting for new "
                 "frames: %s (%d)", strerror(-res), res);
     }
 
     return true;
 }
 
-void ProFrameProcessor::processNewFrames(const sp<CameraDeviceBase> &device) {
+void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) {
     status_t res;
     ATRACE_CALL();
     CameraMetadata frame;
@@ -125,14 +125,14 @@
     return;
 }
 
-bool ProFrameProcessor::processSingleFrame(CameraMetadata &frame,
+bool FrameProcessorBase::processSingleFrame(CameraMetadata &frame,
                                            const sp<CameraDeviceBase> &device) {
     ALOGV("%s: Camera %d: Process single frame (is empty? %d)",
           __FUNCTION__, device->getId(), frame.isEmpty());
     return processListeners(frame, device) == OK;
 }
 
-status_t ProFrameProcessor::processListeners(const CameraMetadata &frame,
+status_t FrameProcessorBase::processListeners(const CameraMetadata &frame,
         const sp<CameraDeviceBase> &device) {
     ATRACE_CALL();
     camera_metadata_ro_entry_t entry;
@@ -143,7 +143,7 @@
                 __FUNCTION__, device->getId());
         return BAD_VALUE;
     }
-    int32_t frameId = entry.data.i32[0];
+    int32_t requestId = entry.data.i32[0];
 
     List<sp<FilteredListener> > listeners;
     {
@@ -151,8 +151,8 @@
 
         List<RangeListener>::iterator item = mRangeListeners.begin();
         while (item != mRangeListeners.end()) {
-            if (frameId >= item->minId &&
-                    frameId < item->maxId) {
+            if (requestId >= item->minId &&
+                    requestId < item->maxId) {
                 sp<FilteredListener> listener = item->listener.promote();
                 if (listener == 0) {
                     item = mRangeListeners.erase(item);
@@ -167,7 +167,7 @@
     ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size());
     List<sp<FilteredListener> >::iterator item = listeners.begin();
     for (; item != listeners.end(); item++) {
-        (*item)->onFrameAvailable(frameId, frame);
+        (*item)->onFrameAvailable(requestId, frame);
     }
     return OK;
 }
diff --git a/services/camera/libcameraservice/camera2/ProFrameProcessor.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
similarity index 94%
rename from services/camera/libcameraservice/camera2/ProFrameProcessor.h
rename to services/camera/libcameraservice/common/FrameProcessorBase.h
index b82942c..1e46beb 100644
--- a/services/camera/libcameraservice/camera2/ProFrameProcessor.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -33,10 +33,10 @@
 /* Output frame metadata processing thread.  This thread waits for new
  * frames from the device, and analyzes them as necessary.
  */
-class ProFrameProcessor: public Thread {
+class FrameProcessorBase: public Thread {
   public:
-    ProFrameProcessor(wp<CameraDeviceBase> device);
-    virtual ~ProFrameProcessor();
+    FrameProcessorBase(wp<CameraDeviceBase> device);
+    virtual ~FrameProcessorBase();
 
     struct FilteredListener: virtual public RefBase {
         virtual void onFrameAvailable(int32_t frameId,
diff --git a/services/camera/libcameraservice/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
similarity index 100%
rename from services/camera/libcameraservice/CameraHardwareInterface.h
rename to services/camera/libcameraservice/device1/CameraHardwareInterface.h
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
similarity index 100%
rename from services/camera/libcameraservice/Camera2Device.cpp
rename to services/camera/libcameraservice/device2/Camera2Device.cpp
diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
similarity index 99%
rename from services/camera/libcameraservice/Camera2Device.h
rename to services/camera/libcameraservice/device2/Camera2Device.h
index 372ce9f..8945ec2 100644
--- a/services/camera/libcameraservice/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -22,7 +22,7 @@
 #include <utils/List.h>
 #include <utils/Mutex.h>
 
-#include "CameraDeviceBase.h"
+#include "common/CameraDeviceBase.h"
 
 namespace android {
 
diff --git a/services/camera/libcameraservice/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
similarity index 97%
rename from services/camera/libcameraservice/Camera3Device.cpp
rename to services/camera/libcameraservice/device3/Camera3Device.cpp
index 353fe74..0a4a24c 100644
--- a/services/camera/libcameraservice/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -40,9 +40,11 @@
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <utils/Timers.h>
-#include "Camera3Device.h"
-#include "camera3/Camera3OutputStream.h"
-#include "camera3/Camera3InputStream.h"
+
+#include "device3/Camera3Device.h"
+#include "device3/Camera3OutputStream.h"
+#include "device3/Camera3InputStream.h"
+#include "device3/Camera3ZslStream.h"
 
 using namespace android::camera3;
 
@@ -128,7 +130,10 @@
 
     /** Initialize device with callback functions */
 
+    ATRACE_BEGIN("camera3->initialize");
     res = device->ops->initialize(device, this);
+    ATRACE_END();
+
     if (res != OK) {
         SET_ERR_L("Unable to initialize HAL device: %s (%d)",
                 strerror(-res), res);
@@ -140,7 +145,9 @@
 
     mVendorTagOps.get_camera_vendor_section_name = NULL;
 
+    ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
     device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps);
+    ATRACE_END();
 
     if (mVendorTagOps.get_camera_vendor_section_name != NULL) {
         res = set_camera_metadata_vendor_tag_ops(&mVendorTagOps);
@@ -736,7 +743,7 @@
 status_t Camera3Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
     ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
+    ALOGV("%s: for template %d", __FUNCTION__, templateId);
     Mutex::Autolock l(mLock);
 
     switch (mStatus) {
@@ -756,8 +763,10 @@
     }
 
     const camera_metadata_t *rawRequest;
+    ATRACE_BEGIN("camera3->construct_default_request_settings");
     rawRequest = mHal3Device->ops->construct_default_request_settings(
         mHal3Device, templateId);
+    ATRACE_END();
     if (rawRequest == NULL) {
         SET_ERR_L("HAL is unable to construct default settings for template %d",
                 templateId);
@@ -1072,8 +1081,9 @@
 
     // Do the HAL configuration; will potentially touch stream
     // max_buffers, usage, priv fields.
-
+    ATRACE_BEGIN("camera3->configure_streams");
     res = mHal3Device->ops->configure_streams(mHal3Device, &config);
+    ATRACE_END();
 
     if (res != OK) {
         SET_ERR_L("Unable to configure streams with HAL: %s (%d)",
@@ -1228,6 +1238,7 @@
         }
 
         if (request.haveResultMetadata && request.numBuffersLeft == 0) {
+            ATRACE_ASYNC_END("frame capture", frameNumber);
             mInFlightMap.removeItemsAt(idx, 1);
         }
 
@@ -1271,8 +1282,7 @@
         if (entry.count == 0) {
             SET_ERR("No timestamp provided by HAL for frame %d!",
                     frameNumber);
-        }
-        if (timestamp != entry.data.i64[0]) {
+        } else if (timestamp != entry.data.i64[0]) {
             SET_ERR("Timestamp mismatch between shutter notify and result"
                     " metadata for frame %d (%lld vs %lld respectively)",
                     frameNumber, timestamp, entry.data.i64[0]);
@@ -1304,6 +1314,7 @@
 
 
 void Camera3Device::notify(const camera3_notify_msg *msg) {
+    ATRACE_CALL();
     NotificationListener *listener;
     {
         Mutex::Autolock l(mOutputLock);
@@ -1324,6 +1335,9 @@
                                   msg->message.error.error_stream);
                 streamId = stream->getId();
             }
+            ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
+                    mId, __FUNCTION__, msg->message.error.frame_number,
+                    streamId, msg->message.error.error_code);
             if (listener != NULL) {
                 listener->notifyError(msg->message.error.error_code,
                         msg->message.error.frame_number, streamId);
@@ -1359,7 +1373,8 @@
                         frameNumber);
                 break;
             }
-
+            ALOGVV("Camera %d: %s: Shutter fired for frame %d at %lld",
+                    mId, __FUNCTION__, frameNumber, timestamp);
             // Call listener, if any
             if (listener != NULL) {
                 listener->notifyShutter(frameNumber, timestamp);
@@ -1480,6 +1495,7 @@
 }
 
 status_t Camera3Device::RequestThread::waitUntilPaused(nsecs_t timeout) {
+    ATRACE_CALL();
     status_t res;
     Mutex::Autolock l(mPauseLock);
     while (!mPaused) {
@@ -1626,8 +1642,11 @@
     }
 
     // Submit request and block until ready for next one
-
+    ATRACE_ASYNC_BEGIN("frame capture", request.frame_number);
+    ATRACE_BEGIN("camera3->process_capture_request");
     res = mHal3Device->ops->process_capture_request(mHal3Device, &request);
+    ATRACE_END();
+
     if (res != OK) {
         SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
                 " device: %s (%d)", request.frame_number, strerror(-res), res);
diff --git a/services/camera/libcameraservice/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
similarity index 98%
rename from services/camera/libcameraservice/Camera3Device.h
rename to services/camera/libcameraservice/device3/Camera3Device.h
index 2328f89..76c08ae 100644
--- a/services/camera/libcameraservice/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -22,13 +22,10 @@
 #include <utils/List.h>
 #include <utils/Mutex.h>
 #include <utils/Thread.h>
+#include <utils/KeyedVector.h>
+#include <hardware/camera3.h>
 
-#include "CameraDeviceBase.h"
-#include "camera3/Camera3Stream.h"
-#include "camera3/Camera3OutputStream.h"
-#include "camera3/Camera3ZslStream.h"
-
-#include "hardware/camera3.h"
+#include "common/CameraDeviceBase.h"
 
 /**
  * Function pointer types with C calling convention to
@@ -46,6 +43,15 @@
 
 namespace android {
 
+namespace camera3 {
+
+class Camera3Stream;
+class Camera3ZslStream;
+class Camera3OutputStreamInterface;
+class Camera3StreamInterface;
+
+}
+
 /**
  * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0
  */
diff --git a/services/camera/libcameraservice/camera3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
similarity index 100%
rename from services/camera/libcameraservice/camera3/Camera3IOStreamBase.cpp
rename to services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
diff --git a/services/camera/libcameraservice/camera3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
similarity index 97%
rename from services/camera/libcameraservice/camera3/Camera3IOStreamBase.h
rename to services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 74c4484..9432a59 100644
--- a/services/camera/libcameraservice/camera3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -77,6 +77,8 @@
 
     virtual size_t   getBufferCountLocked();
 
+    virtual status_t getEndpointUsage(uint32_t *usage) = 0;
+
     status_t getBufferPreconditionCheckLocked() const;
     status_t returnBufferPreconditionCheckLocked() const;
 
diff --git a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
similarity index 94%
rename from services/camera/libcameraservice/camera3/Camera3InputStream.cpp
rename to services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 13e9c83..c80f512 100644
--- a/services/camera/libcameraservice/camera3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -182,10 +182,6 @@
     return OK;
 }
 
-sp<IGraphicBufferProducer> Camera3InputStream::getProducerInterface() const {
-    return mConsumer->getProducerInterface();
-}
-
 void Camera3InputStream::dump(int fd, const Vector<String16> &args) const {
     (void) args;
     String8 lines;
@@ -211,9 +207,9 @@
     mFrameCount = 0;
 
     if (mConsumer.get() == 0) {
-        mConsumer = new BufferItemConsumer(camera3_stream::usage,
-                                           mTotalBufferCount,
-                                           /*synchronousMode*/true);
+        sp<BufferQueue> bq = new BufferQueue();
+        mConsumer = new BufferItemConsumer(bq, camera3_stream::usage,
+                                           mTotalBufferCount);
         mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
     }
 
@@ -234,6 +230,12 @@
     return OK;
 }
 
+status_t Camera3InputStream::getEndpointUsage(uint32_t *usage) {
+    // Per HAL3 spec, input streams have 0 for their initial usage field.
+    *usage = 0;
+    return OK;
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/camera3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
similarity index 89%
rename from services/camera/libcameraservice/camera3/Camera3InputStream.h
rename to services/camera/libcameraservice/device3/Camera3InputStream.h
index 8adda88..681d684 100644
--- a/services/camera/libcameraservice/camera3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -44,13 +44,6 @@
 
     virtual void     dump(int fd, const Vector<String16> &args) const;
 
-    /**
-     * Get the producer interface for this stream, to hand off to a producer.
-     * The producer must be connected to the provided interface before
-     * finishConfigure is called on this stream.
-     */
-    sp<IGraphicBufferProducer> getProducerInterface() const;
-
   private:
 
     typedef BufferItemConsumer::BufferItem BufferItem;
@@ -79,6 +72,8 @@
 
     virtual status_t configureQueueLocked();
 
+    virtual status_t getEndpointUsage(uint32_t *usage);
+
 }; // class Camera3InputStream
 
 }; // namespace camera3
diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
similarity index 94%
rename from services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
rename to services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 2efeede..35cb5ba 100644
--- a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -301,8 +301,13 @@
         return res;
     }
 
-    ALOGV("%s: Consumer wants %d buffers", __FUNCTION__,
-            maxConsumerBuffers);
+    ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
+            maxConsumerBuffers, camera3_stream::max_buffers);
+    if (camera3_stream::max_buffers == 0) {
+        ALOGE("%s: Camera HAL requested max_buffer count: %d, requires at least 1",
+                __FUNCTION__, camera3_stream::max_buffers);
+        return INVALID_OPERATION;
+    }
 
     mTotalBufferCount = maxConsumerBuffers + camera3_stream::max_buffers;
     mDequeuedBufferCount = 0;
@@ -359,6 +364,17 @@
     return OK;
 }
 
+status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) {
+
+    status_t res;
+    int32_t u = 0;
+    res = mConsumer->query(mConsumer.get(),
+            NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
+    *usage = u;
+
+    return res;
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
similarity index 97%
rename from services/camera/libcameraservice/camera3/Camera3OutputStream.h
rename to services/camera/libcameraservice/device3/Camera3OutputStream.h
index 774fbdd..6cbb9f4 100644
--- a/services/camera/libcameraservice/camera3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -92,6 +92,9 @@
 
     virtual status_t configureQueueLocked();
     virtual status_t disconnectLocked();
+
+    virtual status_t getEndpointUsage(uint32_t *usage);
+
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
similarity index 100%
rename from services/camera/libcameraservice/camera3/Camera3OutputStreamInterface.h
rename to services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
diff --git a/services/camera/libcameraservice/camera3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
similarity index 95%
rename from services/camera/libcameraservice/camera3/Camera3Stream.cpp
rename to services/camera/libcameraservice/device3/Camera3Stream.cpp
index f05658a..a6872aa 100644
--- a/services/camera/libcameraservice/camera3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -77,7 +77,9 @@
 }
 
 camera3_stream* Camera3Stream::startConfiguration() {
+    ATRACE_CALL();
     Mutex::Autolock l(mLock);
+    status_t res;
 
     switch (mState) {
         case STATE_ERROR:
@@ -107,8 +109,15 @@
             return NULL;
     }
 
-    oldUsage = usage;
-    oldMaxBuffers = max_buffers;
+    oldUsage = camera3_stream::usage;
+    oldMaxBuffers = camera3_stream::max_buffers;
+
+    res = getEndpointUsage(&(camera3_stream::usage));
+    if (res != OK) {
+        ALOGE("%s: Cannot query consumer endpoint usage!",
+                __FUNCTION__);
+        return NULL;
+    }
 
     if (mState == STATE_CONSTRUCTED) {
         mState = STATE_IN_CONFIG;
@@ -125,6 +134,7 @@
 }
 
 status_t Camera3Stream::finishConfiguration(camera3_device *hal3Device) {
+    ATRACE_CALL();
     Mutex::Autolock l(mLock);
     switch (mState) {
         case STATE_ERROR:
@@ -147,8 +157,8 @@
     // Check if the stream configuration is unchanged, and skip reallocation if
     // so. As documented in hardware/camera3.h:configure_streams().
     if (mState == STATE_IN_RECONFIG &&
-            oldUsage == usage &&
-            oldMaxBuffers == max_buffers) {
+            oldUsage == camera3_stream::usage &&
+            oldMaxBuffers == camera3_stream::max_buffers) {
         mState = STATE_CONFIGURED;
         return OK;
     }
@@ -312,8 +322,10 @@
         // Got all buffers, register with HAL
         ALOGV("%s: Registering %d buffers with camera HAL",
                 __FUNCTION__, bufferCount);
+        ATRACE_BEGIN("camera3->register_stream_buffers");
         res = hal3Device->ops->register_stream_buffers(hal3Device,
                 &bufferSet);
+        ATRACE_END();
     }
 
     // Return all valid buffers to stream, in ERROR state to indicate
diff --git a/services/camera/libcameraservice/camera3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
similarity index 98%
rename from services/camera/libcameraservice/camera3/Camera3Stream.h
rename to services/camera/libcameraservice/device3/Camera3Stream.h
index 69d81e4..b64fd86 100644
--- a/services/camera/libcameraservice/camera3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -263,6 +263,10 @@
     // Get the total number of buffers in the queue
     virtual size_t   getBufferCountLocked() = 0;
 
+    // Get the usage flags for the other endpoint, or return
+    // INVALID_OPERATION if they cannot be obtained.
+    virtual status_t getEndpointUsage(uint32_t *usage) = 0;
+
   private:
     uint32_t oldUsage;
     uint32_t oldMaxBuffers;
diff --git a/services/camera/libcameraservice/camera3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
similarity index 100%
rename from services/camera/libcameraservice/camera3/Camera3StreamBufferListener.h
rename to services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
diff --git a/services/camera/libcameraservice/camera3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
similarity index 100%
rename from services/camera/libcameraservice/camera3/Camera3StreamInterface.h
rename to services/camera/libcameraservice/device3/Camera3StreamInterface.h
diff --git a/services/camera/libcameraservice/camera3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
similarity index 97%
rename from services/camera/libcameraservice/camera3/Camera3ZslStream.cpp
rename to services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 8790c8c..04f5dc5 100644
--- a/services/camera/libcameraservice/camera3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -113,11 +113,11 @@
         Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL,
                             width, height,
                             HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED),
-        mDepth(depth),
-        mProducer(new RingBufferConsumer(GRALLOC_USAGE_HW_CAMERA_ZSL,
-                                         depth)) {
+        mDepth(depth) {
 
-        mConsumer = new Surface(mProducer->getProducerInterface());
+    sp<BufferQueue> bq = new BufferQueue();
+    mProducer = new RingBufferConsumer(bq, GRALLOC_USAGE_HW_CAMERA_ZSL, depth);
+    mConsumer = new Surface(bq);
 }
 
 Camera3ZslStream::~Camera3ZslStream() {
diff --git a/services/camera/libcameraservice/camera3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
similarity index 100%
rename from services/camera/libcameraservice/camera3/Camera3ZslStream.h
rename to services/camera/libcameraservice/device3/Camera3ZslStream.h
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index cd39bad..ebc7ea7 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -34,14 +34,14 @@
 
 namespace android {
 
-RingBufferConsumer::RingBufferConsumer(uint32_t consumerUsage,
+RingBufferConsumer::RingBufferConsumer(const sp<IGraphicBufferConsumer>& consumer,
+        uint32_t consumerUsage,
         int bufferCount) :
-    ConsumerBase(new BufferQueue(true)),
+    ConsumerBase(consumer),
     mBufferCount(bufferCount)
 {
-    mBufferQueue->setConsumerUsageBits(consumerUsage);
-    mBufferQueue->setSynchronousMode(true);
-    mBufferQueue->setMaxAcquiredBufferCount(bufferCount);
+    mConsumer->setConsumerUsageBits(consumerUsage);
+    mConsumer->setMaxAcquiredBufferCount(bufferCount);
 
     assert(bufferCount > 0);
 }
@@ -52,7 +52,7 @@
 void RingBufferConsumer::setName(const String8& name) {
     Mutex::Autolock _l(mMutex);
     mName = name;
-    mBufferQueue->setConsumerName(name);
+    mConsumer->setConsumerName(name);
 }
 
 sp<PinnedBufferItem> RingBufferConsumer::pinSelectedBuffer(
@@ -214,7 +214,11 @@
         // In case the object was never pinned, pass the acquire fence
         // back to the release fence. If the fence was already waited on,
         // it'll just be a no-op to wait on it again.
-        err = addReleaseFenceLocked(item.mBuf, item.mFence);
+
+        // item.mGraphicBuffer was populated with the proper graphic-buffer
+        // at acquire even if it was previously acquired
+        err = addReleaseFenceLocked(item.mBuf,
+                item.mGraphicBuffer, item.mFence);
 
         if (err != OK) {
             BI_LOGE("Failed to add release fence to buffer "
@@ -226,7 +230,9 @@
         BI_LOGV("Attempting to release buffer timestamp %lld, frame %lld",
                 item.mTimestamp, item.mFrameNumber);
 
-        err = releaseBufferLocked(item.mBuf,
+        // item.mGraphicBuffer was populated with the proper graphic-buffer
+        // at acquire even if it was previously acquired
+        err = releaseBufferLocked(item.mBuf, item.mGraphicBuffer,
                                   EGL_NO_DISPLAY,
                                   EGL_NO_SYNC_KHR);
         if (err != OK) {
@@ -278,7 +284,7 @@
         /**
          * Acquire new frame
          */
-        err = acquireBufferLocked(&item);
+        err = acquireBufferLocked(&item, 0);
         if (err != OK) {
             if (err != NO_BUFFER_AVAILABLE) {
                 BI_LOGE("Error acquiring buffer: %s (%d)", strerror(err), err);
@@ -310,7 +316,8 @@
 
         RingBufferItem& find = *it;
         if (item.mGraphicBuffer == find.mGraphicBuffer) {
-            status_t res = addReleaseFenceLocked(item.mBuf, item.mFence);
+            status_t res = addReleaseFenceLocked(item.mBuf,
+                    item.mGraphicBuffer, item.mFence);
 
             if (res != OK) {
                 BI_LOGE("Failed to add release fence to buffer "
@@ -336,17 +343,17 @@
 
 status_t RingBufferConsumer::setDefaultBufferSize(uint32_t w, uint32_t h) {
     Mutex::Autolock _l(mMutex);
-    return mBufferQueue->setDefaultBufferSize(w, h);
+    return mConsumer->setDefaultBufferSize(w, h);
 }
 
 status_t RingBufferConsumer::setDefaultBufferFormat(uint32_t defaultFormat) {
     Mutex::Autolock _l(mMutex);
-    return mBufferQueue->setDefaultBufferFormat(defaultFormat);
+    return mConsumer->setDefaultBufferFormat(defaultFormat);
 }
 
 status_t RingBufferConsumer::setConsumerUsage(uint32_t usage) {
     Mutex::Autolock _l(mMutex);
-    return mBufferQueue->setConsumerUsageBits(usage);
+    return mConsumer->setConsumerUsageBits(usage);
 }
 
 } // namespace android
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
index 454fbae..b4ad824 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.h
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -63,7 +63,7 @@
     // the consumer usage flags passed to the graphics allocator. The
     // bufferCount parameter specifies how many buffers can be pinned for user
     // access at the same time.
-    RingBufferConsumer(uint32_t consumerUsage,
+    RingBufferConsumer(const sp<IGraphicBufferConsumer>& consumer, uint32_t consumerUsage,
             int bufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS);
 
     virtual ~RingBufferConsumer();
@@ -72,8 +72,6 @@
     // log messages.
     void setName(const String8& name);
 
-    sp<IGraphicBufferProducer> getProducerInterface() const { return getBufferQueue(); }
-
     // setDefaultBufferSize is used to set the size of buffers returned by
     // requestBuffers when a with and height of zero is requested.
     status_t setDefaultBufferSize(uint32_t w, uint32_t h);