Merge "libstagefright: export profiling results directly to xml." into mnc-dev
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 3a9fb4c..84e0d1c 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -71,14 +71,13 @@
     // deadlock if we call any method of ICamera here.
 }
 
-sp<Camera> Camera::connect(int cameraId, const String16& clientPackageName,
-        int clientUid)
+sp<Camera> Camera::connect(int cameraId, const String16& opPackageName, int clientUid)
 {
-    return CameraBaseT::connect(cameraId, clientPackageName, clientUid);
+    return CameraBaseT::connect(cameraId, opPackageName, clientUid);
 }
 
 status_t Camera::connectLegacy(int cameraId, int halVersion,
-        const String16& clientPackageName,
+        const String16& opPackageName,
         int clientUid,
         sp<Camera>& camera)
 {
@@ -89,7 +88,7 @@
     const sp<ICameraService>& cs = CameraBaseT::getCameraService();
 
     if (cs != 0) {
-        status = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
+        status = cs.get()->connectLegacy(cl, cameraId, halVersion, opPackageName,
                                         clientUid, /*out*/c->mCamera);
     }
     if (status == OK && c->mCamera != 0) {
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 5d50aa8..0dc0276 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -91,7 +91,7 @@
 
 template <typename TCam, typename TCamTraits>
 sp<TCam> CameraBase<TCam, TCamTraits>::connect(int cameraId,
-                                               const String16& clientPackageName,
+                                               const String16& opPackageName,
                                                int clientUid)
 {
     ALOGV("%s: connect", __FUNCTION__);
@@ -102,7 +102,7 @@
 
     if (cs != 0) {
         TCamConnectService fnConnectService = TCamTraits::fnConnectService;
-        status = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid,
+        status = (cs.get()->*fnConnectService)(cl, cameraId, opPackageName, clientUid,
                                              /*out*/ c->mCamera);
     }
     if (status == OK && c->mCamera != 0) {
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index 51a775b..192e40d 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -164,7 +164,7 @@
 
     // connect to camera service (android.hardware.Camera)
     virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
-                             const String16 &clientPackageName, int clientUid,
+                             const String16& opPackageName, int clientUid,
                              /*out*/
                              sp<ICamera>& device)
     {
@@ -172,7 +172,7 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeStrongBinder(IInterface::asBinder(cameraClient));
         data.writeInt32(cameraId);
-        data.writeString16(clientPackageName);
+        data.writeString16(opPackageName);
         data.writeInt32(clientUid);
         remote()->transact(BnCameraService::CONNECT, data, &reply);
 
@@ -187,7 +187,7 @@
     // connect to camera service (android.hardware.Camera)
     virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
                              int halVersion,
-                             const String16 &clientPackageName, int clientUid,
+                             const String16& opPackageName, int clientUid,
                              /*out*/sp<ICamera>& device)
     {
         Parcel data, reply;
@@ -195,7 +195,7 @@
         data.writeStrongBinder(IInterface::asBinder(cameraClient));
         data.writeInt32(cameraId);
         data.writeInt32(halVersion);
-        data.writeString16(clientPackageName);
+        data.writeString16(opPackageName);
         data.writeInt32(clientUid);
         remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply);
 
@@ -225,7 +225,7 @@
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
             int cameraId,
-            const String16& clientPackageName,
+            const String16& opPackageName,
             int clientUid,
             /*out*/
             sp<ICameraDeviceUser>& device)
@@ -234,7 +234,7 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeStrongBinder(IInterface::asBinder(cameraCb));
         data.writeInt32(cameraId);
-        data.writeString16(clientPackageName);
+        data.writeString16(opPackageName);
         data.writeInt32(clientUid);
         remote()->transact(BnCameraService::CONNECT_DEVICE, data, &reply);
 
@@ -374,11 +374,11 @@
             sp<ICameraClient> cameraClient =
                     interface_cast<ICameraClient>(data.readStrongBinder());
             int32_t cameraId = data.readInt32();
-            const String16 clientName = data.readString16();
+            const String16 opPackageName = data.readString16();
             int32_t clientUid = data.readInt32();
             sp<ICamera> camera;
             status_t status = connect(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/camera);
+                    opPackageName, clientUid, /*out*/camera);
             reply->writeNoException();
             reply->writeInt32(status);
             if (camera != NULL) {
@@ -394,11 +394,11 @@
             sp<ICameraDeviceCallbacks> cameraClient =
                 interface_cast<ICameraDeviceCallbacks>(data.readStrongBinder());
             int32_t cameraId = data.readInt32();
-            const String16 clientName = data.readString16();
+            const String16 opPackageName = data.readString16();
             int32_t clientUid = data.readInt32();
             sp<ICameraDeviceUser> camera;
             status_t status = connectDevice(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/camera);
+                    opPackageName, clientUid, /*out*/camera);
             reply->writeNoException();
             reply->writeInt32(status);
             if (camera != NULL) {
@@ -454,11 +454,11 @@
                     interface_cast<ICameraClient>(data.readStrongBinder());
             int32_t cameraId = data.readInt32();
             int32_t halVersion = data.readInt32();
-            const String16 clientName = data.readString16();
+            const String16 opPackageName = data.readString16();
             int32_t clientUid = data.readInt32();
             sp<ICamera> camera;
             status_t status = connectLegacy(cameraClient, cameraId, halVersion,
-                    clientName, clientUid, /*out*/camera);
+                    opPackageName, clientUid, /*out*/camera);
             reply->writeNoException();
             reply->writeInt32(status);
             if (camera != NULL) {
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 0e3bc68..20c0094 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -17,7 +17,8 @@
 	$(TOP)/frameworks/native/include/media/openmax \
 	external/jpeg \
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -40,7 +41,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -63,7 +65,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -87,7 +90,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -110,7 +114,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -133,7 +138,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -157,7 +163,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -199,7 +206,8 @@
 LOCAL_STATIC_LIBRARIES:= \
 	libstagefright_mediafilter
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -222,7 +230,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index ac1a547..50913cd 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -21,6 +21,7 @@
 #include "SimplePlayer.h"
 
 #include <gui/Surface.h>
+
 #include <media/AudioTrack.h>
 #include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
@@ -29,7 +30,6 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/NuMediaExtractor.h>
 
 namespace android {
@@ -73,8 +73,7 @@
         surface = new Surface(bufferProducer);
     }
 
-    msg->setObject(
-            "native-window", new NativeWindowWrapper(surface));
+    msg->setObject("surface", surface);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
@@ -132,10 +131,8 @@
                 err = INVALID_OPERATION;
             } else {
                 sp<RefBase> obj;
-                CHECK(msg->findObject("native-window", &obj));
-
-                mNativeWindow = static_cast<NativeWindowWrapper *>(obj.get());
-
+                CHECK(msg->findObject("surface", &obj));
+                mSurface = static_cast<Surface *>(obj.get());
                 err = OK;
             }
 
@@ -324,7 +321,7 @@
 
         err = state->mCodec->configure(
                 format,
-                isVideo ? mNativeWindow->getSurfaceTextureClient() : NULL,
+                isVideo ? mSurface : NULL,
                 NULL /* crypto */,
                 0 /* flags */);
 
@@ -411,7 +408,7 @@
     mStateByTrackIndex.clear();
     mCodecLooper.clear();
     mExtractor.clear();
-    mNativeWindow.clear();
+    mSurface.clear();
     mPath.clear();
 
     return OK;
@@ -428,12 +425,12 @@
             err = state->mCodec->dequeueInputBuffer(&index);
 
             if (err == OK) {
-                ALOGV("dequeued input buffer on track %d",
+                ALOGV("dequeued input buffer on track %zu",
                       mStateByTrackIndex.keyAt(i));
 
                 state->mAvailInputBufferIndices.push_back(index);
             } else {
-                ALOGV("dequeueInputBuffer on track %d returned %d",
+                ALOGV("dequeueInputBuffer on track %zu returned %d",
                       mStateByTrackIndex.keyAt(i), err);
             }
         } while (err == OK);
@@ -448,7 +445,7 @@
                     &info.mFlags);
 
             if (err == OK) {
-                ALOGV("dequeued output buffer on track %d",
+                ALOGV("dequeued output buffer on track %zu",
                       mStateByTrackIndex.keyAt(i));
 
                 state->mAvailOutputBufferInfos.push_back(info);
@@ -459,7 +456,7 @@
                 err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
                 CHECK_EQ(err, (status_t)OK);
             } else {
-                ALOGV("dequeueOutputBuffer on track %d returned %d",
+                ALOGV("dequeueOutputBuffer on track %zu returned %d",
                       mStateByTrackIndex.keyAt(i), err);
             }
         } while (err == OK
@@ -502,7 +499,7 @@
                     0);
             CHECK_EQ(err, (status_t)OK);
 
-            ALOGV("enqueued input data on track %d", trackIndex);
+            ALOGV("enqueued input data on track %zu", trackIndex);
 
             err = mExtractor->advance();
             CHECK_EQ(err, (status_t)OK);
@@ -528,8 +525,8 @@
                 bool release = true;
 
                 if (lateByUs > 30000ll) {
-                    ALOGI("track %d buffer late by %lld us, dropping.",
-                          mStateByTrackIndex.keyAt(i), lateByUs);
+                    ALOGI("track %zu buffer late by %lld us, dropping.",
+                          mStateByTrackIndex.keyAt(i), (long long)lateByUs);
                     state->mCodec->releaseOutputBuffer(info->mIndex);
                 } else {
                     if (state->mAudioTrack != NULL) {
@@ -558,8 +555,8 @@
                     break;
                 }
             } else {
-                ALOGV("track %d buffer early by %lld us.",
-                      mStateByTrackIndex.keyAt(i), -lateByUs);
+                ALOGV("track %zu buffer early by %lld us.",
+                      mStateByTrackIndex.keyAt(i), (long long)-lateByUs);
                 break;
             }
         }
@@ -569,7 +566,7 @@
 }
 
 status_t SimplePlayer::onOutputFormatChanged(
-        size_t trackIndex, CodecState *state) {
+        size_t trackIndex __unused, CodecState *state) {
     sp<AMessage> format;
     status_t err = state->mCodec->getOutputFormat(&format);
 
@@ -640,7 +637,7 @@
     if (delayUs > 2000ll) {
         ALOGW("AudioTrack::write took %lld us, numFramesAvailableToWrite=%u, "
               "numFramesWritten=%u",
-              delayUs, numFramesAvailableToWrite, numFramesWritten);
+              (long long)delayUs, numFramesAvailableToWrite, numFramesWritten);
     }
 
     info->mOffset += nbytes;
diff --git a/cmds/stagefright/SimplePlayer.h b/cmds/stagefright/SimplePlayer.h
index ce993e8..ae9dfd2 100644
--- a/cmds/stagefright/SimplePlayer.h
+++ b/cmds/stagefright/SimplePlayer.h
@@ -25,8 +25,8 @@
 struct AudioTrack;
 class IGraphicBufferProducer;
 struct MediaCodec;
-struct NativeWindowWrapper;
 struct NuMediaExtractor;
+class Surface;
 
 struct SimplePlayer : public AHandler {
     SimplePlayer();
@@ -84,7 +84,7 @@
 
     State mState;
     AString mPath;
-    sp<NativeWindowWrapper> mNativeWindow;
+    sp<Surface> mSurface;
 
     sp<NuMediaExtractor> mExtractor;
     sp<ALooper> mCodecLooper;
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 7b0de24..6e9e6ec 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -18,6 +18,8 @@
 #include <sys/stat.h>
 #include <fcntl.h>
 
+#include <utils/String16.h>
+
 #include <binder/ProcessState.h>
 #include <media/mediarecorder.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -34,7 +36,7 @@
 
 static void usage(const char* name)
 {
-    fprintf(stderr, "Usage: %s [-d duration] [-m] [-w] [<output-file>]\n", name);
+    fprintf(stderr, "Usage: %s [-d du.ration] [-m] [-w] [<output-file>]\n", name);
     fprintf(stderr, "Encodes either a sine wave or microphone input to AMR format\n");
     fprintf(stderr, "    -d    duration in seconds, default 5 seconds\n");
     fprintf(stderr, "    -m    use microphone for input, default sine source\n");
@@ -85,6 +87,7 @@
         // talk into the appropriate microphone for the duration
         source = new AudioSource(
                 AUDIO_SOURCE_MIC,
+                String16(),
                 kSampleRate,
                 channels);
     } else {
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index d987250..dae9bbe 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -108,7 +108,7 @@
             continue;
         }
 
-        ALOGV("selecting track %d", i);
+        ALOGV("selecting track %zu", i);
 
         err = extractor->selectTrack(i);
         CHECK_EQ(err, (status_t)OK);
@@ -151,7 +151,7 @@
         CHECK_EQ((status_t)OK, codec->getInputBuffers(&state->mInBuffers));
         CHECK_EQ((status_t)OK, codec->getOutputBuffers(&state->mOutBuffers));
 
-        ALOGV("got %d input and %d output buffers",
+        ALOGV("got %zu input and %zu output buffers",
               state->mInBuffers.size(), state->mOutBuffers.size());
     }
 
@@ -172,7 +172,7 @@
                 err = state->mCodec->dequeueInputBuffer(&index, kTimeout);
 
                 if (err == OK) {
-                    ALOGV("filling input buffer %d", index);
+                    ALOGV("filling input buffer %zu", index);
 
                     const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
 
@@ -209,7 +209,7 @@
                         state->mCodec->dequeueInputBuffer(&index, kTimeout);
 
                     if (err == OK) {
-                        ALOGV("signalling input EOS on track %d", i);
+                        ALOGV("signalling input EOS on track %zu", i);
 
                         err = state->mCodec->queueInputBuffer(
                                 index,
@@ -258,8 +258,8 @@
                     kTimeout);
 
             if (err == OK) {
-                ALOGV("draining output buffer %d, time = %lld us",
-                      index, presentationTimeUs);
+                ALOGV("draining output buffer %zu, time = %lld us",
+                      index, (long long)presentationTimeUs);
 
                 ++state->mNumBuffersDecoded;
                 state->mNumBytesDecoded += size;
@@ -293,7 +293,7 @@
                 CHECK_EQ((status_t)OK,
                          state->mCodec->getOutputBuffers(&state->mOutBuffers));
 
-                ALOGV("got %d output buffers", state->mOutBuffers.size());
+                ALOGV("got %zu output buffers", state->mOutBuffers.size());
             } else if (err == INFO_FORMAT_CHANGED) {
                 sp<AMessage> format;
                 CHECK_EQ((status_t)OK, state->mCodec->getOutputFormat(&format));
@@ -313,17 +313,17 @@
         CHECK_EQ((status_t)OK, state->mCodec->release());
 
         if (state->mIsAudio) {
-            printf("track %zu: %" PRId64 " bytes received. %.2f KB/sec\n",
+            printf("track %zu: %lld bytes received. %.2f KB/sec\n",
                    i,
-                   state->mNumBytesDecoded,
+                   (long long)state->mNumBytesDecoded,
                    state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs);
         } else {
-            printf("track %zu: %" PRId64 " frames decoded, %.2f fps. %" PRId64
+            printf("track %zu: %lld frames decoded, %.2f fps. %lld"
                     " bytes received. %.2f KB/sec\n",
                    i,
-                   state->mNumBuffersDecoded,
+                   (long long)state->mNumBuffersDecoded,
                    state->mNumBuffersDecoded * 1E6 / elapsedTimeUs,
-                   state->mNumBytesDecoded,
+                   (long long)state->mNumBytesDecoded,
                    state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs);
         }
     }
@@ -418,7 +418,7 @@
         ssize_t displayWidth = info.w;
         ssize_t displayHeight = info.h;
 
-        ALOGV("display is %ld x %ld\n", displayWidth, displayHeight);
+        ALOGV("display is %zd x %zd\n", displayWidth, displayHeight);
 
         control = composerClient->createSurface(
                 String8("A Surface"),
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index f77b38b..1183112 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -81,7 +81,7 @@
         return OK;
     }
 
-    status_t handleSetParameters(const sp<AMessage> &msg) {
+    status_t handleSetParameters(const sp<AMessage> &msg __unused) {
         return OK;
     }
 
@@ -101,7 +101,7 @@
         return OK;
     }
 
-    status_t handleSetParameters(const sp<AMessage> &msg) {
+    status_t handleSetParameters(const sp<AMessage> &msg __unused) {
         return OK;
     }
 
@@ -121,7 +121,7 @@
         return OK;
     }
 
-    status_t handleSetParameters(const sp<AMessage> &msg) {
+    status_t handleSetParameters(const sp<AMessage> &msg __unused) {
         return OK;
     }
 
@@ -597,7 +597,7 @@
 
             if (err == OK) {
                 ALOGV("draining decoded buffer %zu, time = %lld us",
-                        frame.index, frame.presentationTimeUs);
+                        frame.index, (long long)frame.presentationTimeUs);
 
                 ++(state->mNumBuffersDecoded);
 
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 461b56c..36fa3b5 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -53,7 +53,6 @@
 using namespace android;
 
 static int muxing(
-        const android::sp<android::ALooper> &looper,
         const char *path,
         bool useAudio,
         bool useVideo,
@@ -137,14 +136,19 @@
             }
         }
 
-        ALOGV("selecting track %d", i);
+        ALOGV("selecting track %zu", i);
 
         err = extractor->selectTrack(i);
         CHECK_EQ(err, (status_t)OK);
 
         ssize_t newTrackIndex = muxer->addTrack(format);
-        CHECK_GE(newTrackIndex, 0);
-        trackIndexMap.add(i, newTrackIndex);
+        if (newTrackIndex < 0) {
+            fprintf(stderr, "%s track (%zu) unsupported by muxer\n",
+                    isAudio ? "audio" : "video",
+                    i);
+        } else {
+            trackIndexMap.add(i, newTrackIndex);
+        }
     }
 
     int64_t muxerStartTimeUs = ALooper::GetNowUs();
@@ -163,7 +167,12 @@
             ALOGV("saw input eos, err %d", err);
             sawInputEOS = true;
             break;
+        } else if (trackIndexMap.indexOfKey(trackIndex) < 0) {
+            // ALOGV("skipping input from unsupported track %zu", trackIndex);
+            extractor->advance();
+            continue;
         } else {
+            // ALOGV("reading sample from track index %zu\n", trackIndex);
             err = extractor->readSampleData(newBuffer);
             CHECK_EQ(err, (status_t)OK);
 
@@ -308,7 +317,7 @@
     sp<ALooper> looper = new ALooper;
     looper->start();
 
-    int result = muxing(looper, argv[0], useAudio, useVideo, outputFileName,
+    int result = muxing(argv[0], useAudio, useVideo, outputFileName,
                         enableTrim, trimStartTimeMs, trimEndTimeMs, rotationDegrees);
 
     looper->stop();
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index fdc352e..594c933 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -32,13 +32,13 @@
 
 using namespace android;
 
+static const int32_t kAudioBitRate = 12200;
+#if 0
 static const int32_t kFramerate = 24;  // fps
 static const int32_t kIFramesIntervalSec = 1;
 static const int32_t kVideoBitRate = 512 * 1024;
-static const int32_t kAudioBitRate = 12200;
 static const int64_t kDurationUs = 10000000LL;  // 10 seconds
 
-#if 0
 class DummySource : public MediaSource {
 
 public:
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
index 172dc36..0d64d2f 100644
--- a/cmds/stagefright/sf2.cpp
+++ b/cmds/stagefright/sf2.cpp
@@ -38,10 +38,10 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/Utils.h>
 
 #include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
 
 #include "include/ESDS.h"
 
@@ -154,8 +154,7 @@
                 sp<AMessage> format = makeFormat(mSource->getFormat());
 
                 if (mSurface != NULL) {
-                    format->setObject(
-                            "native-window", new NativeWindowWrapper(mSurface));
+                    format->setObject("surface", mSurface);
                 }
 
                 mCodec->initiateSetup(format);
@@ -328,14 +327,14 @@
 
             CHECK(size >= 7);
             CHECK_EQ((unsigned)ptr[0], 1u);  // configurationVersion == 1
-            uint8_t profile = ptr[1];
-            uint8_t level = ptr[3];
+            uint8_t profile __unused = ptr[1];
+            uint8_t level __unused = ptr[3];
 
             // There is decodable content out there that fails the following
             // assertion, let's be lenient for now...
             // CHECK((ptr[4] >> 2) == 0x3f);  // reserved
 
-            size_t lengthSize = 1 + (ptr[4] & 3);
+            size_t lengthSize __unused = 1 + (ptr[4] & 3);
 
             // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
             // violates it...
@@ -491,7 +490,7 @@
 
                 if (sizeNeeded > sizeLeft) {
                     if (outBuffer->size() == 0) {
-                        ALOGE("Unable to fit even a single input buffer of size %d.",
+                        ALOGE("Unable to fit even a single input buffer of size %zu.",
                              sizeNeeded);
                     }
                     CHECK_GT(outBuffer->size(), 0u);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 318b56d..a9c6eda 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -965,7 +965,7 @@
     OMXClient client;
     status_t err = client.connect();
 
-    for (int k = 0; k < argc; ++k) {
+    for (int k = 0; k < argc && err == OK; ++k) {
         bool syncInfoPresent = true;
 
         const char *filename = argv[k];
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 0566d14..1a40e53 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -269,7 +269,7 @@
         : mEOS(false) {
     }
 
-    virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) {
+    virtual void notify(int msg, int ext1 __unused, int ext2 __unused, const Parcel *obj __unused) {
         Mutex::Autolock autoLock(mLock);
 
         if (msg == MEDIA_ERROR || msg == MEDIA_PLAYBACK_COMPLETE) {
@@ -318,7 +318,7 @@
     ssize_t displayWidth = info.w;
     ssize_t displayHeight = info.h;
 
-    ALOGV("display is %d x %d\n", displayWidth, displayHeight);
+    ALOGV("display is %zd x %zd\n", displayWidth, displayHeight);
 
     sp<SurfaceControl> control =
         composerClient->createSurface(
diff --git a/drm/common/IDrmManagerService.cpp b/drm/common/IDrmManagerService.cpp
index 3f62ed7..b90da1b 100644
--- a/drm/common/IDrmManagerService.cpp
+++ b/drm/common/IDrmManagerService.cpp
@@ -34,6 +34,7 @@
 #include "IDrmManagerService.h"
 
 #define INVALID_BUFFER_LENGTH -1
+#define MAX_BINDER_TRANSACTION_SIZE ((1*1024*1024)-(4096*2))
 
 using namespace android;
 
@@ -933,7 +934,12 @@
 
         //Filling DRM info
         const int infoType = data.readInt32();
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+
+        if (bufferSize > data.dataAvail()) {
+            return BAD_VALUE;
+        }
+
         char* buffer = NULL;
         if (0 < bufferSize) {
             buffer = (char *)data.readInplace(bufferSize);
@@ -986,6 +992,9 @@
 
         const int size = data.readInt32();
         for (int index = 0; index < size; ++index) {
+            if (!data.dataAvail()) {
+                break;
+            }
             const String8 key(data.readString8());
             if (key == String8("FileDescriptorKey")) {
                 char buffer[16];
@@ -1035,7 +1044,12 @@
         const int uniqueId = data.readInt32();
 
         //Filling DRM Rights
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+        if (bufferSize > data.dataAvail()) {
+            reply->writeInt32(BAD_VALUE);
+            return DRM_NO_ERROR;
+        }
+
         const DrmBuffer drmBuffer((char *)data.readInplace(bufferSize), bufferSize);
 
         const String8 mimeType(data.readString8());
@@ -1206,10 +1220,13 @@
         const int convertId = data.readInt32();
 
         //Filling input data
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+        if (bufferSize > data.dataAvail()) {
+            return BAD_VALUE;
+        }
         DrmBuffer* inputData = new DrmBuffer((char *)data.readInplace(bufferSize), bufferSize);
 
-        DrmConvertedStatus*    drmConvertedStatus = convertData(uniqueId, convertId, inputData);
+        DrmConvertedStatus* drmConvertedStatus = convertData(uniqueId, convertId, inputData);
 
         if (NULL != drmConvertedStatus) {
             //Filling Drm Converted Ststus
@@ -1393,7 +1410,12 @@
         const int decryptUnitId = data.readInt32();
 
         //Filling Header info
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+        if (bufferSize > data.dataAvail()) {
+            reply->writeInt32(BAD_VALUE);
+            clearDecryptHandle(&handle);
+            return DRM_NO_ERROR;
+        }
         DrmBuffer* headerInfo = NULL;
         headerInfo = new DrmBuffer((char *)data.readInplace(bufferSize), bufferSize);
 
@@ -1417,9 +1439,17 @@
         readDecryptHandleFromParcelData(&handle, data);
 
         const int decryptUnitId = data.readInt32();
-        const int decBufferSize = data.readInt32();
+        const uint32_t decBufferSize = data.readInt32();
+        const uint32_t encBufferSize = data.readInt32();
 
-        const int encBufferSize = data.readInt32();
+        if (encBufferSize > data.dataAvail() ||
+            decBufferSize > MAX_BINDER_TRANSACTION_SIZE) {
+            reply->writeInt32(BAD_VALUE);
+            reply->writeInt32(0);
+            clearDecryptHandle(&handle);
+            return DRM_NO_ERROR;
+        }
+
         DrmBuffer* encBuffer
             = new DrmBuffer((char *)data.readInplace(encBufferSize), encBufferSize);
 
@@ -1429,8 +1459,10 @@
 
         DrmBuffer* IV = NULL;
         if (0 != data.dataAvail()) {
-            const int ivBufferlength = data.readInt32();
-            IV = new DrmBuffer((char *)data.readInplace(ivBufferlength), ivBufferlength);
+            const uint32_t ivBufferlength = data.readInt32();
+            if (ivBufferlength <= data.dataAvail()) {
+                IV = new DrmBuffer((char *)data.readInplace(ivBufferlength), ivBufferlength);
+            }
         }
 
         const status_t status
@@ -1477,7 +1509,11 @@
         DecryptHandle handle;
         readDecryptHandleFromParcelData(&handle, data);
 
-        const int numBytes = data.readInt32();
+        const uint32_t numBytes = data.readInt32();
+        if (numBytes > MAX_BINDER_TRANSACTION_SIZE) {
+            reply->writeInt32(BAD_VALUE);
+            return DRM_NO_ERROR;
+        }
         char* buffer = new char[numBytes];
 
         const off64_t offset = data.readInt64();
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 2b60842..25d75f7 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -71,11 +71,11 @@
             // construct a camera client from an existing remote
     static  sp<Camera>  create(const sp<ICamera>& camera);
     static  sp<Camera>  connect(int cameraId,
-                                const String16& clientPackageName,
+                                const String16& opPackageName,
                                 int clientUid);
 
     static  status_t  connectLegacy(int cameraId, int halVersion,
-                                     const String16& clientPackageName,
+                                     const String16& opPackageName,
                                      int clientUid, sp<Camera>& camera);
 
             virtual     ~Camera();
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index cad275e..38bff3e 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -109,7 +109,7 @@
      */
     virtual status_t connect(const sp<ICameraClient>& cameraClient,
             int cameraId,
-            const String16& clientPackageName,
+            const String16& opPackageName,
             int clientUid,
             /*out*/
             sp<ICamera>& device) = 0;
@@ -117,7 +117,7 @@
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
             int cameraId,
-            const String16& clientPackageName,
+            const String16& opPackageName,
             int clientUid,
             /*out*/
             sp<ICameraDeviceUser>& device) = 0;
@@ -141,7 +141,7 @@
      */
     virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient,
             int cameraId, int halVersion,
-            const String16& clientPackageName,
+            const String16& opPackageName,
             int clientUid,
             /*out*/
             sp<ICamera>& device) = 0;
diff --git a/include/media/AVSyncSettings.h b/include/media/AVSyncSettings.h
new file mode 100644
index 0000000..10e3bcc
--- /dev/null
+++ b/include/media/AVSyncSettings.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AV_SYNC_SETTINGS_H
+#define ANDROID_AV_SYNC_SETTINGS_H
+
+namespace android {
+
+enum AVSyncSource : unsigned {
+    // let the system decide the best sync source
+    AVSYNC_SOURCE_DEFAULT = 0,
+    // sync to the system clock
+    AVSYNC_SOURCE_SYSTEM_CLOCK = 1,
+    // sync to the audio track
+    AVSYNC_SOURCE_AUDIO = 2,
+    // sync to the display vsync
+    AVSYNC_SOURCE_VSYNC = 3,
+    AVSYNC_SOURCE_MAX,
+};
+
+enum AVSyncAudioAdjustMode : unsigned {
+    // let the system decide the best audio adjust mode
+    AVSYNC_AUDIO_ADJUST_MODE_DEFAULT = 0,
+    // adjust audio by time stretching
+    AVSYNC_AUDIO_ADJUST_MODE_STRETCH = 1,
+    // adjust audio by resampling
+    AVSYNC_AUDIO_ADJUST_MODE_RESAMPLE = 2,
+    AVSYNC_AUDIO_ADJUST_MODE_MAX,
+};
+
+// max tolerance when adjusting playback speed to desired playback speed
+#define AVSYNC_TOLERANCE_MAX 1.0f
+
+struct AVSyncSettings {
+    AVSyncSource mSource;
+    AVSyncAudioAdjustMode mAudioAdjustMode;
+    float mTolerance;
+    AVSyncSettings()
+        : mSource(AVSYNC_SOURCE_DEFAULT),
+          mAudioAdjustMode(AVSYNC_AUDIO_ADJUST_MODE_DEFAULT),
+          mTolerance(.044f) { }
+};
+
+} // namespace android
+
+// ---------------------------------------------------------------------------
+
+#endif // ANDROID_AV_SYNC_SETTINGS_H
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 583695d..61da4f2 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -201,8 +201,12 @@
      */
 
     /* Simple Constructor.
+     *
+     * Parameters:
+     *
+     * opPackageName:      The package name used for app op checks.
      */
-    AudioEffect();
+    AudioEffect(const String16& opPackageName);
 
 
     /* Constructor.
@@ -211,6 +215,7 @@
      *
      * type:  type of effect created: can be null if uuid is specified. This corresponds to
      *        the OpenSL ES interface implemented by this effect.
+     * opPackageName:  The package name used for app op checks.
      * uuid:  Uuid of effect created: can be null if type is specified. This uuid corresponds to
      *        a particular implementation of an effect type.
      * priority:    requested priority for effect control: the priority level corresponds to the
@@ -227,6 +232,7 @@
      */
 
     AudioEffect(const effect_uuid_t *type,
+                const String16& opPackageName,
                 const effect_uuid_t *uuid = NULL,
                   int32_t priority = 0,
                   effect_callback_t cbf = NULL,
@@ -239,6 +245,7 @@
      *      Same as above but with type and uuid specified by character strings
      */
     AudioEffect(const char *typeStr,
+                    const String16& opPackageName,
                     const char *uuidStr = NULL,
                     int32_t priority = 0,
                     effect_callback_t cbf = NULL,
@@ -406,7 +413,9 @@
      void*                   mUserData;          // client context for callback function
      effect_descriptor_t     mDescriptor;        // effect descriptor
      int32_t                 mId;                // system wide unique effect engine instance ID
-     Mutex                   mLock;               // Mutex for mEnabled access
+     Mutex                   mLock;              // Mutex for mEnabled access
+
+     String16                mOpPackageName;     // The package name used for app op checks.
 
      // IEffectClient
      virtual void controlStatusChanged(bool controlGranted);
diff --git a/include/media/AudioIoDescriptor.h b/include/media/AudioIoDescriptor.h
new file mode 100644
index 0000000..c94b738
--- /dev/null
+++ b/include/media/AudioIoDescriptor.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_IO_DESCRIPTOR_H
+#define ANDROID_AUDIO_IO_DESCRIPTOR_H
+
+namespace android {
+
+enum audio_io_config_event {
+    AUDIO_OUTPUT_OPENED,
+    AUDIO_OUTPUT_CLOSED,
+    AUDIO_OUTPUT_CONFIG_CHANGED,
+    AUDIO_INPUT_OPENED,
+    AUDIO_INPUT_CLOSED,
+    AUDIO_INPUT_CONFIG_CHANGED,
+};
+
+// audio input/output descriptor used to cache output configurations in client process to avoid
+// frequent calls through IAudioFlinger
+class AudioIoDescriptor : public RefBase {
+public:
+    AudioIoDescriptor() :
+        mIoHandle(AUDIO_IO_HANDLE_NONE),
+        mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(AUDIO_CHANNEL_NONE),
+        mFrameCount(0), mLatency(0)
+    {
+        memset(&mPatch, 0, sizeof(struct audio_patch));
+    }
+
+    virtual ~AudioIoDescriptor() {}
+
+    audio_port_handle_t getDeviceId() {
+        if (mPatch.num_sources != 0 && mPatch.num_sinks != 0) {
+            if (mPatch.sources[0].type == AUDIO_PORT_TYPE_MIX) {
+                // this is an output mix
+                // FIXME: the API only returns the first device in case of multiple device selection
+                return mPatch.sinks[0].id;
+            } else {
+                // this is an input mix
+                return mPatch.sources[0].id;
+            }
+        }
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+
+    audio_io_handle_t mIoHandle;
+    struct audio_patch mPatch;
+    uint32_t mSamplingRate;
+    audio_format_t mFormat;
+    audio_channel_mask_t mChannelMask;
+    size_t mFrameCount;
+    uint32_t mLatency;
+};
+
+
+};  // namespace android
+
+#endif  /*ANDROID_AUDIO_IO_DESCRIPTOR_H*/
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
index 800b27b..feed402 100644
--- a/include/media/AudioPolicy.h
+++ b/include/media/AudioPolicy.h
@@ -38,14 +38,17 @@
 #define MIX_TYPE_PLAYERS 0
 #define MIX_TYPE_RECORDERS 1
 
+// definition of the different events that can be reported on a dynamic policy from
+//   AudioSystem's implementation of the AudioPolicyClient interface
+// keep in sync with AudioSystem.java
+#define DYNAMIC_POLICY_EVENT_MIX_STATE_UPDATE 0
+
 #define MIX_STATE_DISABLED -1
 #define MIX_STATE_IDLE 0
 #define MIX_STATE_MIXING 1
 
-#define ROUTE_FLAG_RENDER 0x1
-#define ROUTE_FLAG_LOOP_BACK (0x1 << 1)
-
-#define MIX_FLAG_NOTIFY_ACTIVITY 0x1
+#define MIX_ROUTE_FLAG_RENDER 0x1
+#define MIX_ROUTE_FLAG_LOOP_BACK (0x1 << 1)
 
 #define MAX_MIXES_PER_POLICY 10
 #define MAX_CRITERIA_PER_MIX 20
@@ -67,11 +70,15 @@
 
 class AudioMix {
 public:
+    // flag on an AudioMix indicating the activity on this mix (IDLE, MIXING)
+    //   must be reported through the AudioPolicyClient interface
+    static const uint32_t kCbFlagNotifyActivity = 0x1;
+
     AudioMix() {}
     AudioMix(Vector<AttributeMatchCriterion> criteria, uint32_t mixType, audio_config_t format,
              uint32_t routeFlags, String8 registrationId, uint32_t flags) :
         mCriteria(criteria), mMixType(mixType), mFormat(format),
-        mRouteFlags(routeFlags), mRegistrationId(registrationId), mFlags(flags){}
+        mRouteFlags(routeFlags), mRegistrationId(registrationId), mCbFlags(flags){}
 
     status_t readFromParcel(Parcel *parcel);
     status_t writeToParcel(Parcel *parcel) const;
@@ -81,7 +88,7 @@
     audio_config_t  mFormat;
     uint32_t        mRouteFlags;
     String8         mRegistrationId;
-    uint32_t        mFlags;
+    uint32_t        mCbFlags; // flags indicating which callbacks to use, see kCbFlag*
 };
 
 }; // namespace android
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index dbe2788..c4c7b0e 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -129,8 +129,12 @@
 
     /* Constructs an uninitialized AudioRecord. No connection with
      * AudioFlinger takes place.  Use set() after this.
+     *
+     * Parameters:
+     *
+     * opPackageName:      The package name used for app ops.
      */
-                        AudioRecord();
+                        AudioRecord(const String16& opPackageName);
 
     /* Creates an AudioRecord object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
@@ -143,6 +147,7 @@
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
      * channelMask:        Channel mask, such that audio_is_input_channel(channelMask) is true.
+     * opPackageName:      The package name used for app ops.
      * frameCount:         Minimum size of track PCM buffer in frames. This defines the
      *                     application's contribution to the
      *                     latency of the track.  The actual size selected by the AudioRecord could
@@ -165,6 +170,7 @@
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
+                                    const String16& opPackageName,
                                     size_t frameCount = 0,
                                     callback_t cbf = NULL,
                                     void* user = NULL,
@@ -172,6 +178,8 @@
                                     int sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                                    int uid = -1,
+                                    pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL);
 
     /* Terminates the AudioRecord and unregisters it from AudioFlinger.
@@ -208,6 +216,8 @@
                             int sessionId = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                            int uid = -1,
+                            pid_t pid = -1,
                             const audio_attributes_t* pAttributes = NULL);
 
     /* Result of constructing the AudioRecord. This must be checked for successful initialization
@@ -374,6 +384,49 @@
             status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
                                 size_t *nonContig = NULL);
 
+            // Explicit Routing
+    /**
+     * TODO Document this method.
+     */
+            status_t setInputDevice(audio_port_handle_t deviceId);
+
+    /**
+     * TODO Document this method.
+     */
+            audio_port_handle_t getInputDevice();
+
+     /* Returns the ID of the audio device actually used by the input to which this AudioRecord
+      * is attached.
+      * A value of AUDIO_PORT_HANDLE_NONE indicates the AudioRecord is not attached to any input.
+      *
+      * Parameters:
+      *  none.
+      */
+     audio_port_handle_t getRoutedDeviceId();
+
+    /* Add an AudioDeviceCallback. The caller will be notified when the audio device
+     * to which this AudioRecord is routed is updated.
+     * Replaces any previously installed callback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the same callback is already installed.
+     *         NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t addAudioDeviceCallback(
+                    const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+    /* remove an AudioDeviceCallback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the callback is not installed
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t removeAudioDeviceCallback(
+                    const sp<AudioSystem::AudioDeviceCallback>& callback);
+
 private:
     /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
      * additional non-contiguous frames that are predicted to be available immediately,
@@ -473,7 +526,7 @@
 
             // caller must hold lock on mLock for all _l methods
 
-            status_t openRecord_l(size_t epoch);
+            status_t openRecord_l(size_t epoch, const String16& opPackageName);
 
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreRecord_l(const char *from);
@@ -510,6 +563,8 @@
 
     status_t                mStatus;
 
+    String16                mOpPackageName;         // The package name used for app ops.
+
     size_t                  mFrameCount;            // corresponds to current IAudioRecord, value is
                                                     // reported back by AudioFlinger to the client
     size_t                  mReqFrameCount;         // frame count to request the first or next time
@@ -559,7 +614,14 @@
 
     sp<DeathNotifier>       mDeathNotifier;
     uint32_t                mSequence;              // incremented for each new IAudioRecord attempt
+    int                     mClientUid;
+    pid_t                   mClientPid;
     audio_attributes_t      mAttributes;
+
+    // For Device Selection API
+    //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
+    audio_port_handle_t    mSelectedDeviceId;
+    sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
 };
 
 }; // namespace android
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 182133c..3241e9c 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -19,6 +19,7 @@
 
 #include <hardware/audio_effect.h>
 #include <media/AudioPolicy.h>
+#include <media/AudioIoDescriptor.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioPolicyServiceClient.h>
 #include <system/audio.h>
@@ -29,6 +30,7 @@
 namespace android {
 
 typedef void (*audio_error_callback)(status_t err);
+typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
 
 class IAudioFlinger;
 class IAudioPolicyService;
@@ -89,6 +91,7 @@
     static String8  getParameters(const String8& keys);
 
     static void setErrorCallback(audio_error_callback cb);
+    static void setDynPolicyCallback(dynamic_policy_callback cb);
 
     // helper function to obtain AudioFlinger service handle
     static const sp<IAudioFlinger> get_audio_flinger();
@@ -155,33 +158,6 @@
     // or no HW sync source is used.
     static audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
 
-    // types of io configuration change events received with ioConfigChanged()
-    enum io_config_event {
-        OUTPUT_OPENED,
-        OUTPUT_CLOSED,
-        OUTPUT_CONFIG_CHANGED,
-        INPUT_OPENED,
-        INPUT_CLOSED,
-        INPUT_CONFIG_CHANGED,
-        STREAM_CONFIG_CHANGED,
-        NUM_CONFIG_EVENTS
-    };
-
-    // audio output descriptor used to cache output configurations in client process to avoid
-    // frequent calls through IAudioFlinger
-    class OutputDescriptor {
-    public:
-        OutputDescriptor()
-        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)
-            {}
-
-        uint32_t samplingRate;
-        audio_format_t format;
-        audio_channel_mask_t channelMask;
-        size_t frameCount;
-        uint32_t latency;
-    };
-
     // Events used to synchronize actions between audio sessions.
     // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
     // playback is complete on another audio session.
@@ -224,6 +200,7 @@
                                      audio_io_handle_t *output,
                                      audio_session_t session,
                                      audio_stream_type_t *stream,
+                                     uid_t uid,
                                      uint32_t samplingRate = 0,
                                      audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                      audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
@@ -245,10 +222,12 @@
     static status_t getInputForAttr(const audio_attributes_t *attr,
                                     audio_io_handle_t *input,
                                     audio_session_t session,
+                                    uid_t uid,
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_input_flags_t flags);
+                                    audio_input_flags_t flags,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
 
     static status_t startInput(audio_io_handle_t input,
                                audio_session_t session);
@@ -353,17 +332,42 @@
 
     };
 
-    static status_t addAudioPortCallback(const sp<AudioPortCallback>& callBack);
-    static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callBack);
+    static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
+    static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+
+    class AudioDeviceCallback : public RefBase
+    {
+    public:
+
+                AudioDeviceCallback() {}
+        virtual ~AudioDeviceCallback() {}
+
+        virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
+                                         audio_port_handle_t deviceId) = 0;
+    };
+
+    static status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                           audio_io_handle_t audioIo);
+    static status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                              audio_io_handle_t audioIo);
+
+    static audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
 
 private:
 
     class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
     {
     public:
-        AudioFlingerClient() {
+        AudioFlingerClient() :
+            mInBuffSize(0), mInSamplingRate(0),
+            mInFormat(AUDIO_FORMAT_DEFAULT), mInChannelMask(AUDIO_CHANNEL_NONE) {
         }
 
+        void clearIoCache();
+        status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+                                    audio_channel_mask_t channelMask, size_t* buffSize);
+        sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
+
         // DeathRecipient
         virtual void binderDied(const wp<IBinder>& who);
 
@@ -371,7 +375,27 @@
 
         // indicate a change in the configuration of an output or input: keeps the cached
         // values for output/input parameters up-to-date in client process
-        virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+        virtual void ioConfigChanged(audio_io_config_event event,
+                                     const sp<AudioIoDescriptor>& ioDesc);
+
+
+        status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                               audio_io_handle_t audioIo);
+        status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                           audio_io_handle_t audioIo);
+
+        audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
+
+    private:
+        Mutex                               mLock;
+        DefaultKeyedVector<audio_io_handle_t, sp<AudioIoDescriptor> >   mIoDescriptors;
+        DefaultKeyedVector<audio_io_handle_t, Vector < sp<AudioDeviceCallback> > >
+                                                                        mAudioDeviceCallbacks;
+        // cached values for recording getInputBufferSize() queries
+        size_t                              mInBuffSize;    // zero indicates cache is invalid
+        uint32_t                            mInSamplingRate;
+        audio_format_t                      mInFormat;
+        audio_channel_mask_t                mInChannelMask;
     };
 
     class AudioPolicyServiceClient: public IBinder::DeathRecipient,
@@ -381,8 +405,8 @@
         AudioPolicyServiceClient() {
         }
 
-        status_t addAudioPortCallback(const sp<AudioPortCallback>& callBack);
-        status_t removeAudioPortCallback(const sp<AudioPortCallback>& callBack);
+        status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
+        status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
 
         // DeathRecipient
         virtual void binderDied(const wp<IBinder>& who);
@@ -397,17 +421,19 @@
         Vector <sp <AudioPortCallback> >    mAudioPortCallbacks;
     };
 
+    static const sp<AudioFlingerClient> getAudioFlingerClient();
+    static sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
+
     static sp<AudioFlingerClient> gAudioFlingerClient;
     static sp<AudioPolicyServiceClient> gAudioPolicyServiceClient;
     friend class AudioFlingerClient;
     friend class AudioPolicyServiceClient;
 
     static Mutex gLock;      // protects gAudioFlinger and gAudioErrorCallback,
-    static Mutex gLockCache; // protects gOutputs, gPrevInSamplingRate, gPrevInFormat,
-                             // gPrevInChannelMask and gInBuffSize
     static Mutex gLockAPS;   // protects gAudioPolicyService and gAudioPolicyServiceClient
     static sp<IAudioFlinger> gAudioFlinger;
     static audio_error_callback gAudioErrorCallback;
+    static dynamic_policy_callback gDynPolicyCallback;
 
     static size_t gInBuffSize;
     // previous parameters for recording buffer size queries
@@ -416,10 +442,6 @@
     static audio_channel_mask_t gPrevInChannelMask;
 
     static sp<IAudioPolicyService> gAudioPolicyService;
-
-    // list of output descriptors containing cached parameters
-    // (sampling rate, framecount, channel count...)
-    static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
 };
 
 };  // namespace android
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 2d34c02..458f4b4 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -360,6 +360,11 @@
     /* Return current source sample rate in Hz */
             uint32_t    getSampleRate() const;
 
+    /* Return the original source sample rate in Hz. This corresponds to the sample rate
+     * if playback rate had normal speed and pitch.
+     */
+            uint32_t    getOriginalSampleRate() const;
+
     /* Set source playback rate for timestretch
      * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
      * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
@@ -505,7 +510,7 @@
      */
             status_t    setOutputDevice(audio_port_handle_t deviceId);
 
-    /* Returns the ID of the audio device used for output of this AudioTrack.
+    /* Returns the ID of the audio device selected for this AudioTrack.
      * A value of AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
      *
      * Parameters:
@@ -513,6 +518,15 @@
      */
      audio_port_handle_t getOutputDevice();
 
+     /* Returns the ID of the audio device actually used by the output to which this AudioTrack is
+      * attached.
+      * A value of AUDIO_PORT_HANDLE_NONE indicates the audio track is not attached to any output.
+      *
+      * Parameters:
+      *  none.
+      */
+     audio_port_handle_t getRoutedDeviceId();
+
     /* Returns the unique session ID associated with this track.
      *
      * Parameters:
@@ -659,6 +673,28 @@
      */
             status_t    getTimestamp(AudioTimestamp& timestamp);
 
+    /* Add an AudioDeviceCallback. The caller will be notified when the audio device to which this
+     * AudioTrack is routed is updated.
+     * Replaces any previously installed callback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the same callback is already installed.
+     *         NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+    /* remove an AudioDeviceCallback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the callback is not installed
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t removeAudioDeviceCallback(
+                    const sp<AudioSystem::AudioDeviceCallback>& callback);
+
 protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
@@ -749,6 +785,7 @@
     float                   mVolume[2];
     float                   mSendLevel;
     mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it
+    uint32_t                mOriginalSampleRate;
     AudioPlaybackRate       mPlaybackRate;
     size_t                  mFrameCount;            // corresponds to current IAudioTrack, value is
                                                     // reported back by AudioFlinger to the client
@@ -832,6 +869,10 @@
     int64_t                 mStartUs;               // the start time after flush or stop.
                                                     // only used for offloaded and direct tracks.
 
+    bool                    mPreviousTimestampValid;// true if mPreviousTimestamp is valid
+    bool                    mRetrogradeMotionReported; // reduce log spam
+    AudioTimestamp          mPreviousTimestamp;     // used to detect retrograde motion
+
     audio_output_flags_t    mFlags;
         // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
         // mLock must be held to read or write those bits reliably.
@@ -859,7 +900,7 @@
 
     // For Device Selection API
     //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
-    int                     mSelectedDeviceId;
+    audio_port_handle_t     mSelectedDeviceId;
 
 private:
     class DeathNotifier : public IBinder::DeathRecipient {
@@ -875,6 +916,8 @@
     uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
     int                     mClientUid;
     pid_t                   mClientPid;
+
+    sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
 };
 
 class TimedAudioTrack : public AudioTrack
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index f927a80..3f7fd09 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -85,9 +85,11 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
+                                const String16& callingPackage,
                                 size_t *pFrameCount,
                                 track_flags_t *flags,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
+                                int clientUid,
                                 int *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
@@ -198,6 +200,7 @@
                                     // AudioFlinger doesn't take over handle reference from client
                                     audio_io_handle_t output,
                                     int sessionId,
+                                    const String16& callingPackage,
                                     status_t *status,
                                     int *id,
                                     int *enabled) = 0;
diff --git a/include/media/IAudioFlingerClient.h b/include/media/IAudioFlingerClient.h
index 75a9971..0080bc9 100644
--- a/include/media/IAudioFlingerClient.h
+++ b/include/media/IAudioFlingerClient.h
@@ -22,6 +22,7 @@
 #include <binder/IInterface.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
+#include <media/AudioIoDescriptor.h>
 
 namespace android {
 
@@ -33,7 +34,8 @@
     DECLARE_META_INTERFACE(AudioFlingerClient);
 
     // Notifies a change of audio input/output configuration.
-    virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) = 0;
+    virtual void ioConfigChanged(audio_io_config_event event,
+                                 const sp<AudioIoDescriptor>& ioDesc) = 0;
 
 };
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 413267b..ee462a0 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -62,6 +62,7 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate = 0,
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = 0,
@@ -78,12 +79,14 @@
                                audio_stream_type_t stream,
                                audio_session_t session) = 0;
     virtual status_t  getInputForAttr(const audio_attributes_t *attr,
-                                      audio_io_handle_t *input,
-                                      audio_session_t session,
-                                      uint32_t samplingRate,
-                                      audio_format_t format,
-                                      audio_channel_mask_t channelMask,
-                                      audio_input_flags_t flags) = 0;
+                              audio_io_handle_t *input,
+                              audio_session_t session,
+                              uid_t uid,
+                              uint32_t samplingRate,
+                              audio_format_t format,
+                              audio_channel_mask_t channelMask,
+                              audio_input_flags_t flags,
+                              audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE) = 0;
     virtual status_t startInput(audio_io_handle_t input,
                                 audio_session_t session) = 0;
     virtual status_t stopInput(audio_io_handle_t input,
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index df6130d..0fd8933 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -35,6 +35,8 @@
 struct IStreamSource;
 class IGraphicBufferProducer;
 struct IMediaHTTPService;
+struct AudioPlaybackRate;
+struct AVSyncSettings;
 
 class IMediaPlayer: public IInterface
 {
@@ -58,7 +60,11 @@
     virtual status_t        stop() = 0;
     virtual status_t        pause() = 0;
     virtual status_t        isPlaying(bool* state) = 0;
-    virtual status_t        setPlaybackRate(float rate) = 0;
+    virtual status_t        setPlaybackSettings(const AudioPlaybackRate& rate) = 0;
+    virtual status_t        getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) = 0;
+    virtual status_t        setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) = 0;
+    virtual status_t        getSyncSettings(AVSyncSettings* sync /* nonnull */,
+                                            float* videoFps /* nonnull */) = 0;
     virtual status_t        seekTo(int msec) = 0;
     virtual status_t        getCurrentPosition(int* msec) = 0;
     virtual status_t        getDuration(int* msec) = 0;
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 49a3d61..a316ce2 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -47,7 +47,7 @@
 public:
     DECLARE_META_INTERFACE(MediaPlayerService);
 
-    virtual sp<IMediaRecorder> createMediaRecorder() = 0;
+    virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName) = 0;
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
     virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0)
             = 0;
@@ -65,8 +65,8 @@
     // display client when display connection, disconnection or errors occur.
     // The assumption is that at most one remote display will be connected to the
     // provided interface at a time.
-    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
-            const String8& iface) = 0;
+    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
+            const sp<IRemoteDisplayClient>& client, const String8& iface) = 0;
 
     // codecs and audio devices usage tracking for the battery app
     enum BatteryDataBits {
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 509c06b..47de0ca 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -26,6 +26,7 @@
 class ICamera;
 class ICameraRecordingProxy;
 class IMediaRecorderClient;
+class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
 
 class IMediaRecorder: public IInterface
@@ -55,6 +56,7 @@
     virtual status_t init() = 0;
     virtual status_t close() = 0;
     virtual status_t release() = 0;
+    virtual status_t usePersistentSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
     virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0;
 };
 
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 6def65b..df3aeca 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -20,6 +20,7 @@
 
 #include <binder/IInterface.h>
 #include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
 #include <ui/GraphicBuffer.h>
 #include <utils/List.h>
 #include <utils/String8.h>
@@ -113,6 +114,14 @@
             node_id node, OMX_U32 port_index,
             sp<IGraphicBufferProducer> *bufferProducer) = 0;
 
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer) = 0;
+
+    virtual status_t usePersistentInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer) = 0;
+
     virtual status_t signalEndOfInputStream(node_id node) = 0;
 
     // This API clearly only makes sense if the caller lives in the
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 824762a..fa917f9 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -26,8 +26,10 @@
 #include <utils/RefBase.h>
 
 #include <media/mediaplayer.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
 #include <media/AudioTimestamp.h>
+#include <media/AVSyncSettings.h>
 #include <media/Metadata.h>
 
 // Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
@@ -132,7 +134,8 @@
         virtual void        pause() = 0;
         virtual void        close() = 0;
 
-        virtual status_t    setPlaybackRatePermille(int32_t /* rate */) { return INVALID_OPERATION;}
+        virtual status_t    setPlaybackRate(const AudioPlaybackRate& rate) = 0;
+        virtual status_t    getPlaybackRate(AudioPlaybackRate* rate /* nonnull */) = 0;
         virtual bool        needsTrailingPadding() { return true; }
 
         virtual status_t    setParameters(const String8& /* keyValuePairs */) { return NO_ERROR; }
@@ -173,7 +176,31 @@
     virtual status_t    stop() = 0;
     virtual status_t    pause() = 0;
     virtual bool        isPlaying() = 0;
-    virtual status_t    setPlaybackRate(float /* rate */) { return INVALID_OPERATION; }
+    virtual status_t    setPlaybackSettings(const AudioPlaybackRate& rate) {
+        // by default, players only support setting rate to the default
+        if (!isAudioPlaybackRateEqual(rate, AUDIO_PLAYBACK_RATE_DEFAULT)) {
+            return BAD_VALUE;
+        }
+        return OK;
+    }
+    virtual status_t    getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
+        *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+        return OK;
+    }
+    virtual status_t    setSyncSettings(const AVSyncSettings& sync, float /* videoFps */) {
+        // By default, players only support setting sync source to default; all other sync
+        // settings are ignored. There is no requirement for getters to return set values.
+        if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
+            return BAD_VALUE;
+        }
+        return OK;
+    }
+    virtual status_t    getSyncSettings(
+                                AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
+        *sync = AVSyncSettings();
+        *videoFps = -1.f;
+        return OK;
+    }
     virtual status_t    seekTo(int msec) = 0;
     virtual status_t    getCurrentPosition(int *msec) = 0;
     virtual status_t    getDuration(int *msec) = 0;
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index f55063e..9947309 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -26,10 +26,12 @@
 
 class ICameraRecordingProxy;
 class Surface;
+class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
 
 struct MediaRecorderBase {
-    MediaRecorderBase() {}
+    MediaRecorderBase(const String16 &opPackageName)
+        : mOpPackageName(opPackageName) {}
     virtual ~MediaRecorderBase() {}
 
     virtual status_t init() = 0;
@@ -55,8 +57,13 @@
     virtual status_t reset() = 0;
     virtual status_t getMaxAmplitude(int *max) = 0;
     virtual status_t dump(int fd, const Vector<String16>& args) const = 0;
+    virtual status_t usePersistentSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
     virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0;
 
+
+protected:
+    String16 mOpPackageName;
+
 private:
     MediaRecorderBase(const MediaRecorderBase &);
     MediaRecorderBase &operator=(const MediaRecorderBase &);
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
index 0b57c84..20f2cad 100644
--- a/include/media/MediaResource.h
+++ b/include/media/MediaResource.h
@@ -25,6 +25,8 @@
 
 extern const char kResourceSecureCodec[];
 extern const char kResourceNonSecureCodec[];
+extern const char kResourceAudioCodec[];
+extern const char kResourceVideoCodec[];
 extern const char kResourceGraphicMemory[];
 
 class MediaResource {
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 6167dd6..b92f816 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -65,7 +65,8 @@
     /* Constructor.
      * See AudioEffect constructor for details on parameters.
      */
-                        Visualizer(int32_t priority = 0,
+                        Visualizer(const String16& opPackageName,
+                                   int32_t priority = 0,
                                    effect_callback_t cbf = NULL,
                                    void* user = NULL,
                                    int sessionId = 0);
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 256fa9a..3fe749c 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -20,6 +20,8 @@
 #include <arpa/inet.h>
 
 #include <binder/IMemory.h>
+
+#include <media/AudioResamplerPublic.h>
 #include <media/IMediaPlayerClient.h>
 #include <media/IMediaPlayer.h>
 #include <media/IMediaDeathNotifier.h>
@@ -32,8 +34,9 @@
 
 namespace android {
 
-class Surface;
+struct AVSyncSettings;
 class IGraphicBufferProducer;
+class Surface;
 
 enum media_event_type {
     MEDIA_NOP               = 0, // interface test message
@@ -223,7 +226,12 @@
             status_t        stop();
             status_t        pause();
             bool            isPlaying();
-            status_t        setPlaybackRate(float rate);
+            status_t        setPlaybackSettings(const AudioPlaybackRate& rate);
+            status_t        getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
+            status_t        setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
+            status_t        getSyncSettings(
+                                    AVSyncSettings* sync /* nonnull */,
+                                    float* videoFps /* nonnull */);
             status_t        getVideoWidth(int *w);
             status_t        getVideoHeight(int *h);
             status_t        seekTo(int msec);
@@ -278,7 +286,6 @@
     int                         mVideoWidth;
     int                         mVideoHeight;
     int                         mAudioSessionId;
-    float                       mPlaybackRate;
     float                       mSendLevel;
     struct sockaddr_in          mRetransmitEndpoint;
     bool                        mRetransmitEndpointValid;
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 74a6469..9210feb 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -32,6 +32,7 @@
 class ICamera;
 class ICameraRecordingProxy;
 class IGraphicBufferProducer;
+struct PersistentSurface;
 class Surface;
 
 typedef void (*media_completion_f)(status_t status, void *cookie);
@@ -209,7 +210,7 @@
                       public virtual IMediaDeathNotifier
 {
 public:
-    MediaRecorder();
+    MediaRecorder(const String16& opPackageName);
     ~MediaRecorder();
 
     void        died();
@@ -236,6 +237,7 @@
     status_t    close();
     status_t    release();
     void        notify(int msg, int ext1, int ext2);
+    status_t    usePersistentSurface(const sp<PersistentSurface>& surface);
     sp<IGraphicBufferProducer>     querySurfaceMediaSourceFromMediaServer();
 
 private:
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index a8d0fcb..0a54df9 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -44,9 +44,13 @@
     virtual void initiateAllocateComponent(const sp<AMessage> &msg);
     virtual void initiateConfigureComponent(const sp<AMessage> &msg);
     virtual void initiateCreateInputSurface();
+    virtual void initiateUsePersistentInputSurface(
+            const sp<PersistentSurface> &surface);
     virtual void initiateStart();
     virtual void initiateShutdown(bool keepComponentAllocated = false);
 
+    virtual status_t setSurface(const sp<Surface> &surface);
+
     virtual void signalFlush();
     virtual void signalResume();
 
@@ -113,7 +117,9 @@
         kWhatDrainDeferredMessages   = 'drai',
         kWhatAllocateComponent       = 'allo',
         kWhatConfigureComponent      = 'conf',
+        kWhatSetSurface              = 'setS',
         kWhatCreateInputSurface      = 'cisf',
+        kWhatUsePersistentInputSurface = 'pisf',
         kWhatSignalEndOfInputStream  = 'eois',
         kWhatStart                   = 'star',
         kWhatRequestIDRFrame         = 'ridr',
@@ -192,6 +198,7 @@
     List<sp<AMessage> > mDeferredQueue;
 
     bool mSentFormat;
+    bool mIsVideo;
     bool mIsEncoder;
     bool mUseMetadataOnEncoderOutput;
     bool mShutdownInProgress;
@@ -228,6 +235,9 @@
     status_t freeBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffer(OMX_U32 portIndex, size_t i);
 
+    status_t handleSetSurface(const sp<Surface> &surface);
+    status_t setupNativeWindowSizeFormatAndUsage(ANativeWindow *nativeWindow /* nonnull */);
+
     status_t configureOutputBuffersFromNativeWindow(
             OMX_U32 *nBufferCount, OMX_U32 *nBufferSize,
             OMX_U32 *nMinUndequeuedBuffers);
@@ -319,8 +329,6 @@
 
     status_t initNativeWindow();
 
-    status_t pushBlankBuffersToNativeWindow();
-
     // Returns true iff all buffers on the given port have status
     // OWNED_BY_US or OWNED_BY_NATIVE_WINDOW.
     bool allYourBuffersAreBelongToUs(OMX_U32 portIndex);
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 98c4fa7..e0cd965 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -25,9 +25,10 @@
 
 namespace android {
 
-class MediaSource;
+struct AudioPlaybackRate;
 class AudioTrack;
 struct AwesomePlayer;
+class MediaSource;
 
 class AudioPlayer : public TimeSource {
 public:
@@ -73,7 +74,8 @@
     bool isSeeking();
     bool reachedEOS(status_t *finalStatus);
 
-    status_t setPlaybackRatePermille(int32_t ratePermille);
+    status_t setPlaybackRate(const AudioPlaybackRate &rate);
+    status_t getPlaybackRate(AudioPlaybackRate *rate /* nonnull */);
 
     void notifyAudioEOS();
 
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 4c9aaad..50cf371 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -35,6 +35,7 @@
     // _not_ a bitmask of audio_channels_t constants.
     AudioSource(
             audio_source_t inputSource,
+            const String16 &opPackageName,
             uint32_t sampleRate,
             uint32_t channels = 1);
 
diff --git a/include/media/stagefright/CodecBase.h b/include/media/stagefright/CodecBase.h
index 1bf27a6..51213b6 100644
--- a/include/media/stagefright/CodecBase.h
+++ b/include/media/stagefright/CodecBase.h
@@ -26,6 +26,7 @@
 namespace android {
 
 struct ABuffer;
+struct PersistentSurface;
 
 struct CodecBase : public AHandler {
     enum {
@@ -39,6 +40,7 @@
         kWhatComponentAllocated  = 'cAll',
         kWhatComponentConfigured = 'cCon',
         kWhatInputSurfaceCreated = 'isfc',
+        kWhatInputSurfaceAccepted = 'isfa',
         kWhatSignaledInputEOS    = 'seos',
         kWhatBuffersAllocated    = 'allc',
     };
@@ -48,12 +50,16 @@
     virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
     virtual void initiateConfigureComponent(const sp<AMessage> &msg) = 0;
     virtual void initiateCreateInputSurface() = 0;
+    virtual void initiateUsePersistentInputSurface(
+            const sp<PersistentSurface> &surface) = 0;
     virtual void initiateStart() = 0;
     virtual void initiateShutdown(bool keepComponentAllocated = false) = 0;
 
     // require an explicit message handler
     virtual void onMessageReceived(const sp<AMessage> &msg) = 0;
 
+    virtual status_t setSurface(const sp<Surface> &surface) { return INVALID_OPERATION; }
+
     virtual void signalFlush() = 0;
     virtual void signalResume() = 0;
 
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index f2b21c9..56d2523 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -37,6 +37,7 @@
 struct MemoryDealer;
 class IResourceManagerClient;
 class IResourceManagerService;
+struct PersistentSurface;
 struct SoftwareRenderer;
 struct Surface;
 
@@ -67,6 +68,8 @@
     static sp<MediaCodec> CreateByComponentName(
             const sp<ALooper> &looper, const char *name, status_t *err = NULL);
 
+    static sp<PersistentSurface> CreatePersistentInputSurface();
+
     status_t configure(
             const sp<AMessage> &format,
             const sp<Surface> &nativeWindow,
@@ -77,6 +80,8 @@
 
     status_t createInputSurface(sp<IGraphicBufferProducer>* bufferProducer);
 
+    status_t usePersistentInputSurface(const sp<PersistentSurface> &surface);
+
     status_t start();
 
     // Returns to a state in which the component remains allocated but
@@ -141,6 +146,8 @@
     status_t getOutputFormat(size_t index, sp<AMessage> *format);
     status_t getInputBuffer(size_t index, sp<ABuffer> *buffer);
 
+    status_t setSurface(const sp<Surface> &nativeWindow);
+
     status_t requestIDRFrame();
 
     // Notification will be posted once there "is something to do", i.e.
@@ -179,7 +186,9 @@
     enum {
         kWhatInit                           = 'init',
         kWhatConfigure                      = 'conf',
+        kWhatSetSurface                     = 'sSur',
         kWhatCreateInputSurface             = 'cisf',
+        kWhatUsePersistentInputSurface      = 'pisf',
         kWhatStart                          = 'strt',
         kWhatStop                           = 'stop',
         kWhatRelease                        = 'rele',
@@ -215,6 +224,7 @@
         kFlagGatherCodecSpecificData    = 512,
         kFlagIsAsync                    = 1024,
         kFlagIsComponentAllocated       = 2048,
+        kFlagPushBlankBuffersOnShutdown = 4096,
     };
 
     struct BufferInfo {
@@ -259,7 +269,7 @@
     sp<AReplyToken> mReplyID;
     uint32_t mFlags;
     status_t mStickyError;
-    sp<Surface> mNativeWindow;
+    sp<Surface> mSurface;
     SoftwareRenderer *mSoftRenderer;
     sp<AMessage> mOutputFormat;
     sp<AMessage> mInputFormat;
@@ -334,8 +344,9 @@
     void extractCSD(const sp<AMessage> &format);
     status_t queueCSDInputBuffer(size_t bufferIndex);
 
-    status_t setNativeWindow(
-            const sp<Surface> &surface);
+    status_t handleSetSurface(const sp<Surface> &surface);
+    status_t connectToSurface(const sp<Surface> &surface);
+    status_t disconnectFromSurface();
 
     void postActivityNotificationIfPossible();
 
@@ -351,7 +362,7 @@
     bool isExecuting() const;
 
     uint64_t getGraphicBufferSize();
-    void addResource(const char *type, uint64_t value);
+    void addResource(const String8 &type, const String8 &subtype, uint64_t value);
 
     /* called to get the last codec error when the sticky flag is set.
      * if no such codec error is found, returns UNKNOWN_ERROR.
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 9d1f222..a991b02 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -27,6 +27,7 @@
 class AMessage;
 struct AReplyToken;
 class IGraphicBufferProducer;
+class IGraphicBufferConsumer;
 class MediaCodec;
 class MetaData;
 
@@ -41,6 +42,7 @@
             const sp<ALooper> &looper,
             const sp<AMessage> &format,
             const sp<MediaSource> &source,
+            const sp<IGraphicBufferConsumer> &consumer = NULL,
             uint32_t flags = 0);
 
     bool isVideo() const { return mIsVideo; }
@@ -79,6 +81,7 @@
             const sp<ALooper> &looper,
             const sp<AMessage> &outputFormat,
             const sp<MediaSource> &source,
+            const sp<IGraphicBufferConsumer> &consumer,
             uint32_t flags = 0);
 
     status_t onStart(MetaData *params);
@@ -107,6 +110,7 @@
     bool mDoMoreWorkPending;
     sp<AMessage> mEncoderActivityNotify;
     sp<IGraphicBufferProducer> mGraphicBufferProducer;
+    sp<IGraphicBufferConsumer> mGraphicBufferConsumer;
     List<MediaBuffer *> mInputBufferQueue;
     List<size_t> mAvailEncoderInputIndices;
     List<int64_t> mDecodingTimeQueue; // decoding time (us) for video
diff --git a/include/media/stagefright/MediaFilter.h b/include/media/stagefright/MediaFilter.h
index 7b3f700..fdd2a34 100644
--- a/include/media/stagefright/MediaFilter.h
+++ b/include/media/stagefright/MediaFilter.h
@@ -34,6 +34,9 @@
     virtual void initiateAllocateComponent(const sp<AMessage> &msg);
     virtual void initiateConfigureComponent(const sp<AMessage> &msg);
     virtual void initiateCreateInputSurface();
+    virtual void initiateUsePersistentInputSurface(
+            const sp<PersistentSurface> &surface);
+
     virtual void initiateStart();
     virtual void initiateShutdown(bool keepComponentAllocated = false);
 
diff --git a/include/media/stagefright/MediaSync.h b/include/media/stagefright/MediaSync.h
index 8ad74a4..d1d634d 100644
--- a/include/media/stagefright/MediaSync.h
+++ b/include/media/stagefright/MediaSync.h
@@ -20,9 +20,12 @@
 #include <gui/IConsumerListener.h>
 #include <gui/IProducerListener.h>
 
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
 #include <media/stagefright/foundation/AHandler.h>
 
 #include <utils/Condition.h>
+#include <utils/KeyedVector.h>
 #include <utils/Mutex.h>
 
 namespace android {
@@ -72,14 +75,11 @@
 
     // Called when MediaSync is used to render video. It should be called
     // before createInputSurface().
-    status_t configureSurface(const sp<IGraphicBufferProducer> &output);
+    status_t setSurface(const sp<IGraphicBufferProducer> &output);
 
     // Called when audio track is used as media clock source. It should be
     // called before updateQueuedAudioData().
-    // |nativeSampleRateInHz| is the sample rate of audio data fed into audio
-    // track. It's the same number used to create AudioTrack.
-    status_t configureAudioTrack(
-            const sp<AudioTrack> &audioTrack, uint32_t nativeSampleRateInHz);
+    status_t setAudioTrack(const sp<AudioTrack> &audioTrack);
 
     // Create a surface for client to render video frames. This is the surface
     // on which the client should render video frames. Those video frames will
@@ -98,21 +98,31 @@
     // Set the consumer name of the input queue.
     void setName(const AString &name);
 
-    // Set the playback in a desired speed.
-    // This method can be called any time.
-    // |rate| is the ratio between desired speed and the normal one, and should
-    // be non-negative. The meaning of rate values:
-    // 1.0 -- normal playback
-    // 0.0 -- stop or pause
-    // larger than 1.0 -- faster than normal speed
-    // between 0.0 and 1.0 -- slower than normal speed
-    status_t setPlaybackRate(float rate);
-
     // Get the media clock used by the MediaSync so that the client can obtain
     // corresponding media time or real time via
     // MediaClock::getMediaTime() and MediaClock::getRealTimeFor().
     sp<const MediaClock> getMediaClock();
 
+    // Set the video frame rate hint - this is used by the video FrameScheduler
+    status_t setVideoFrameRateHint(float rate);
+
+    // Get the video frame rate measurement from the FrameScheduler
+    // returns -1 if there is no measurement
+    float getVideoFrameRate();
+
+    // Set the sync settings parameters.
+    status_t setSyncSettings(const AVSyncSettings &syncSettings);
+
+    // Gets the sync settings parameters.
+    void getSyncSettings(AVSyncSettings *syncSettings /* nonnull */);
+
+    // Sets the playback rate using playback settings.
+    // This method can be called any time.
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+
+    // Gets the playback rate (playback settings parameters).
+    void getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+
     // Get the play time for pending audio frames in audio sink.
     status_t getPlayTimeForPendingAudioFrames(int64_t *outTimeUs);
 
@@ -190,9 +200,19 @@
 
     int64_t mNextBufferItemMediaUs;
     List<BufferItem> mBufferItems;
+
+    // Keep track of buffers received from |mInput|. This is needed because
+    // it's possible the consumer of |mOutput| could return a different
+    // GraphicBuffer::handle (e.g., due to passing buffers through IPC),
+    // and that could cause problem if the producer of |mInput| only
+    // supports pre-registered buffers.
+    KeyedVector<uint64_t, sp<GraphicBuffer> > mBuffersFromInput;
     sp<ALooper> mLooper;
     float mPlaybackRate;
 
+    AudioPlaybackRate mPlaybackSettings;
+    AVSyncSettings mSyncSettings;
+
     sp<MediaClock> mMediaClock;
 
     MediaSync();
@@ -231,6 +251,22 @@
     // up. This must be called with mMutex locked.
     void onAbandoned_l(bool isInput);
 
+    // Set the playback in a desired speed.
+    // This method can be called any time.
+    // |rate| is the ratio between desired speed and the normal one, and should
+    // be non-negative. The meaning of rate values:
+    // 1.0 -- normal playback
+    // 0.0 -- stop or pause
+    // larger than 1.0 -- faster than normal speed
+    // between 0.0 and 1.0 -- slower than normal speed
+    void updatePlaybackRate_l(float rate);
+
+    // apply new sync settings
+    void resync_l();
+
+    // apply playback settings only - without resyncing or updating playback rate
+    status_t setPlaybackSettings_l(const AudioPlaybackRate &rate);
+
     // helper.
     bool isPlaying() { return mPlaybackRate != 0.0; }
 
diff --git a/include/media/stagefright/NativeWindowWrapper.h b/include/media/stagefright/NativeWindowWrapper.h
deleted file mode 100644
index cfeec22..0000000
--- a/include/media/stagefright/NativeWindowWrapper.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NATIVE_WINDOW_WRAPPER_H_
-
-#define NATIVE_WINDOW_WRAPPER_H_
-
-#include <gui/Surface.h>
-
-namespace android {
-
-// Surface derives from ANativeWindow which derives from multiple
-// base classes, in order to carry it in AMessages, we'll temporarily wrap it
-// into a NativeWindowWrapper.
-
-struct NativeWindowWrapper : RefBase {
-    NativeWindowWrapper(
-            const sp<Surface> &surfaceTextureClient) :
-        mSurfaceTextureClient(surfaceTextureClient) { }
-
-    sp<ANativeWindow> getNativeWindow() const {
-        return mSurfaceTextureClient;
-    }
-
-    sp<Surface> getSurfaceTextureClient() const {
-        return mSurfaceTextureClient;
-    }
-
-private:
-    const sp<Surface> mSurfaceTextureClient;
-
-    DISALLOW_EVIL_CONSTRUCTORS(NativeWindowWrapper);
-};
-
-}  // namespace android
-
-#endif  // NATIVE_WINDOW_WRAPPER_H_
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 84b1b1a..7fabcb3 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -298,7 +298,6 @@
     status_t queueBufferToNativeWindow(BufferInfo *info);
     status_t cancelBufferToNativeWindow(BufferInfo *info);
     BufferInfo* dequeueBufferFromNativeWindow();
-    status_t pushBlankBuffersToNativeWindow();
 
     status_t freeBuffersOnPort(
             OMX_U32 portIndex, bool onlyThoseWeOwn = false);
@@ -347,7 +346,6 @@
 
     status_t configureCodec(const sp<MetaData> &meta);
 
-    status_t applyRotation();
     status_t waitForBufferFilled_l();
 
     int64_t getDecodingTimeUs();
diff --git a/include/media/stagefright/PersistentSurface.h b/include/media/stagefright/PersistentSurface.h
new file mode 100644
index 0000000..a35b9f1
--- /dev/null
+++ b/include/media/stagefright/PersistentSurface.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PERSISTENT_SURFACE_H_
+
+#define PERSISTENT_SURFACE_H_
+
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct PersistentSurface : public RefBase {
+    PersistentSurface(
+            const sp<IGraphicBufferProducer>& bufferProducer,
+            const sp<IGraphicBufferConsumer>& bufferConsumer) :
+        mBufferProducer(bufferProducer),
+        mBufferConsumer(bufferConsumer) { }
+
+    sp<IGraphicBufferProducer> getBufferProducer() const {
+        return mBufferProducer;
+    }
+
+    sp<IGraphicBufferConsumer> getBufferConsumer() const {
+        return mBufferConsumer;
+    }
+
+private:
+    const sp<IGraphicBufferProducer> mBufferProducer;
+    const sp<IGraphicBufferConsumer> mBufferConsumer;
+
+    DISALLOW_EVIL_CONSTRUCTORS(PersistentSurface);
+};
+
+}  // namespace android
+
+#endif  // PERSISTENT_SURFACE_H_
diff --git a/include/media/stagefright/SurfaceUtils.h b/include/media/stagefright/SurfaceUtils.h
new file mode 100644
index 0000000..c1a9c0a
--- /dev/null
+++ b/include/media/stagefright/SurfaceUtils.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SURFACE_UTILS_H_
+
+#define SURFACE_UTILS_H_
+
+#include <utils/Errors.h>
+
+struct ANativeWindow;
+
+namespace android {
+
+status_t setNativeWindowSizeFormatAndUsage(
+        ANativeWindow *nativeWindow /* nonnull */,
+        int width, int height, int format, int rotation, int usage);
+status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
+
+} // namespace android
+
+#endif  // SURFACE_UTILS_H_
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 0ce1603..5e9d7d4 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -76,6 +76,15 @@
 
 bool operator <(const HLSTime &t0, const HLSTime &t1);
 
+// read and write various object to/from AMessage
+
+void writeToAMessage(sp<AMessage> msg, const AudioPlaybackRate &rate);
+void readFromAMessage(const sp<AMessage> &msg, AudioPlaybackRate *rate /* nonnull */);
+
+void writeToAMessage(sp<AMessage> msg, const AVSyncSettings &sync, float videoFpsHint);
+void readFromAMessage(
+        const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
 }  // namespace android
 
 #endif  // UTILS_H_
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 7d8222f..bbeb854 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -35,13 +35,14 @@
 
 // ---------------------------------------------------------------------------
 
-AudioEffect::AudioEffect()
-    : mStatus(NO_INIT)
+AudioEffect::AudioEffect(const String16& opPackageName)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName)
 {
 }
 
 
 AudioEffect::AudioEffect(const effect_uuid_t *type,
+                const String16& opPackageName,
                 const effect_uuid_t *uuid,
                 int32_t priority,
                 effect_callback_t cbf,
@@ -49,12 +50,13 @@
                 int sessionId,
                 audio_io_handle_t io
                 )
-    : mStatus(NO_INIT)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName)
 {
     mStatus = set(type, uuid, priority, cbf, user, sessionId, io);
 }
 
 AudioEffect::AudioEffect(const char *typeStr,
+                const String16& opPackageName,
                 const char *uuidStr,
                 int32_t priority,
                 effect_callback_t cbf,
@@ -62,7 +64,7 @@
                 int sessionId,
                 audio_io_handle_t io
                 )
-    : mStatus(NO_INIT)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName)
 {
     effect_uuid_t type;
     effect_uuid_t *pType = NULL;
@@ -128,7 +130,7 @@
     mIEffectClient = new EffectClient(this);
 
     iEffect = audioFlinger->createEffect((effect_descriptor_t *)&mDescriptor,
-            mIEffectClient, priority, io, mSessionId, &mStatus, &mId, &enabled);
+            mIEffectClient, priority, io, mSessionId, mOpPackageName, &mStatus, &mId, &enabled);
 
     if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) {
         ALOGE("set(): AudioFlinger could not create effect, status: %d", mStatus);
diff --git a/media/libmedia/AudioPolicy.cpp b/media/libmedia/AudioPolicy.cpp
index 786eb63..9d07011 100644
--- a/media/libmedia/AudioPolicy.cpp
+++ b/media/libmedia/AudioPolicy.cpp
@@ -68,7 +68,7 @@
     mFormat.format = (audio_format_t)parcel->readInt32();
     mRouteFlags = parcel->readInt32();
     mRegistrationId = parcel->readString8();
-    mFlags = (uint32_t)parcel->readInt32();
+    mCbFlags = (uint32_t)parcel->readInt32();
     size_t size = (size_t)parcel->readInt32();
     if (size > MAX_CRITERIA_PER_MIX) {
         size = MAX_CRITERIA_PER_MIX;
@@ -90,7 +90,7 @@
     parcel->writeInt32(mFormat.format);
     parcel->writeInt32(mRouteFlags);
     parcel->writeString8(mRegistrationId);
-    parcel->writeInt32(mFlags);
+    parcel->writeInt32(mCbFlags);
     size_t size = mCriteria.size();
     if (size > MAX_CRITERIA_PER_MIX) {
         size = MAX_CRITERIA_PER_MIX;
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 5bbe786..3868f13 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -65,9 +65,10 @@
 
 // ---------------------------------------------------------------------------
 
-AudioRecord::AudioRecord()
-    : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
+AudioRecord::AudioRecord(const String16 &opPackageName)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
 }
 
@@ -76,6 +77,7 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
+        const String16& opPackageName,
         size_t frameCount,
         callback_t cbf,
         void* user,
@@ -83,15 +85,20 @@
         int sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
+        int uid,
+        pid_t pid,
         const audio_attributes_t* pAttributes)
-    : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
+    : mStatus(NO_INIT),
+      mOpPackageName(opPackageName),
+      mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mProxy(NULL),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
             notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
-            pAttributes);
+            uid, pid, pAttributes);
 }
 
 AudioRecord::~AudioRecord()
@@ -107,6 +114,10 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
+        // No lock here: worst case we remove a NULL callback which will be a nop
+        if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+        }
         IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
         mAudioRecord.clear();
         mCblkMemory.clear();
@@ -131,12 +142,15 @@
         int sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
+        int uid,
+        pid_t pid,
         const audio_attributes_t* pAttributes)
 {
     ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
-          "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
+          "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
+          "uid %d, pid %d",
           inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
-          sessionId, transferType, flags);
+          sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);
 
     switch (transferType) {
     case TRANSFER_DEFAULT:
@@ -223,6 +237,19 @@
     }
     ALOGV("set(): mSessionId %d", mSessionId);
 
+    int callingpid = IPCThreadState::self()->getCallingPid();
+    int mypid = getpid();
+    if (uid == -1 || (callingpid != mypid)) {
+        mClientUid = IPCThreadState::self()->getCallingUid();
+    } else {
+        mClientUid = uid;
+    }
+    if (pid == -1 || (callingpid != mypid)) {
+        mClientPid = callingpid;
+    } else {
+        mClientPid = pid;
+    }
+
     mFlags = flags;
     mCbf = cbf;
 
@@ -233,7 +260,7 @@
     }
 
     // create the IAudioRecord
-    status_t status = openRecord_l(0 /*epoch*/);
+    status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
 
     if (status != NO_ERROR) {
         if (mAudioRecordThread != 0) {
@@ -281,6 +308,8 @@
     mNewPosition = mProxy->getPosition() + mUpdatePeriod;
     int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
 
+    mActive = true;
+
     status_t status = NO_ERROR;
     if (!(flags & CBLK_INVALID)) {
         status = mAudioRecord->start(event, triggerSession);
@@ -293,9 +322,9 @@
     }
 
     if (status != NO_ERROR) {
+        mActive = false;
         ALOGE("start() status %d", status);
     } else {
-        mActive = true;
         sp<AudioRecordThread> t = mAudioRecordThread;
         if (t != 0) {
             t->resume();
@@ -415,10 +444,38 @@
     return AudioSystem::getInputFramesLost(getInputPrivate());
 }
 
+// ---- Explicit Routing ---------------------------------------------------
+status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
+    AutoMutex lock(mLock);
+    if (mSelectedDeviceId != deviceId) {
+        mSelectedDeviceId = deviceId;
+        // stop capture so that audio policy manager does not reject the new instance start request
+        // as only one capture can be active at a time.
+        if (mAudioRecord != 0 && mActive) {
+            mAudioRecord->stop();
+        }
+        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+    }
+    return NO_ERROR;
+}
+
+audio_port_handle_t AudioRecord::getInputDevice() {
+    AutoMutex lock(mLock);
+    return mSelectedDeviceId;
+}
+
+audio_port_handle_t AudioRecord::getRoutedDeviceId() {
+    AutoMutex lock(mLock);
+    if (mInput == AUDIO_IO_HANDLE_NONE) {
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+    return AudioSystem::getDeviceIdForIo(mInput);
+}
+
 // -------------------------------------------------------------------------
 
 // must be called with mLock held
-status_t AudioRecord::openRecord_l(size_t epoch)
+status_t AudioRecord::openRecord_l(size_t epoch, const String16& opPackageName)
 {
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
@@ -458,10 +515,16 @@
         }
     }
 
+    if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+    }
+
     audio_io_handle_t input;
     status_t status = AudioSystem::getInputForAttr(&mAttributes, &input,
                                         (audio_session_t)mSessionId,
-                                        mSampleRate, mFormat, mChannelMask, mFlags);
+                                        IPCThreadState::self()->getCallingUid(),
+                                        mSampleRate, mFormat, mChannelMask,
+                                        mFlags, mSelectedDeviceId);
 
     if (status != NO_ERROR) {
         ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
@@ -484,11 +547,14 @@
     sp<IMemory> iMem;           // for cblk
     sp<IMemory> bufferMem;
     sp<IAudioRecord> record = audioFlinger->openRecord(input,
-                                                       mSampleRate, mFormat,
+                                                       mSampleRate,
+                                                       mFormat,
                                                        mChannelMask,
+                                                       opPackageName,
                                                        &temp,
                                                        &trackFlags,
                                                        tid,
+                                                       mClientUid,
                                                        &mSessionId,
                                                        &notificationFrames,
                                                        iMem,
@@ -585,6 +651,10 @@
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
 
+    if (mDeviceCallback != 0) {
+        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mInput);
+    }
+
     return NO_ERROR;
     }
 
@@ -1014,7 +1084,7 @@
     // It will also delete the strong references on previous IAudioRecord and IMemory
     size_t position = mProxy->getPosition();
     mNewPosition = position + mUpdatePeriod;
-    status_t result = openRecord_l(position);
+    status_t result = openRecord_l(position, mOpPackageName);
     if (result == NO_ERROR) {
         if (mActive) {
             // callback thread or sync event hasn't changed
@@ -1030,6 +1100,48 @@
     return result;
 }
 
+status_t AudioRecord::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s adding NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback == callback) {
+        ALOGW("%s adding same callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    status_t status = NO_ERROR;
+    if (mInput != AUDIO_IO_HANDLE_NONE) {
+        if (mDeviceCallback != 0) {
+            ALOGW("%s callback already present!", __FUNCTION__);
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+        }
+        status = AudioSystem::addAudioDeviceCallback(callback, mInput);
+    }
+    mDeviceCallback = callback;
+    return status;
+}
+
+status_t AudioRecord::removeAudioDeviceCallback(
+        const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s removing NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback != callback) {
+        ALOGW("%s removing different callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (mInput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+    }
+    mDeviceCallback = 0;
+    return NO_ERROR;
+}
+
 // =========================================================================
 
 void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 2ed50e8..4c2e77b 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -32,20 +32,12 @@
 
 // client singleton for AudioFlinger binder interface
 Mutex AudioSystem::gLock;
-Mutex AudioSystem::gLockCache;
 Mutex AudioSystem::gLockAPS;
 sp<IAudioFlinger> AudioSystem::gAudioFlinger;
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
+dynamic_policy_callback AudioSystem::gDynPolicyCallback = NULL;
 
-// Cached values for output handles
-DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(NULL);
-
-// Cached values for recording queries, all protected by gLock
-uint32_t AudioSystem::gPrevInSamplingRate;
-audio_format_t AudioSystem::gPrevInFormat;
-audio_channel_mask_t AudioSystem::gPrevInChannelMask;
-size_t AudioSystem::gInBuffSize = 0;    // zero indicates cache is invalid
 
 // establish binder interface to AudioFlinger service
 const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
@@ -84,6 +76,25 @@
     return af;
 }
 
+const sp<AudioSystem::AudioFlingerClient> AudioSystem::getAudioFlingerClient()
+{
+    // calling get_audio_flinger() will initialize gAudioFlingerClient if needed
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return 0;
+    Mutex::Autolock _l(gLock);
+    return gAudioFlingerClient;
+}
+
+sp<AudioIoDescriptor> AudioSystem::getIoDescriptor(audio_io_handle_t ioHandle)
+{
+    sp<AudioIoDescriptor> desc;
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc != 0) {
+        desc = afc->getIoDescriptor(ioHandle);
+    }
+    return desc;
+}
+
 /* static */ status_t AudioSystem::checkAudioFlinger()
 {
     if (defaultServiceManager()->checkService(String16("media.audio_flinger")) != 0) {
@@ -257,18 +268,13 @@
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-
-    Mutex::Autolock _l(gLockCache);
-
-    OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == NULL) {
+    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
+    if (outputDesc == 0) {
         ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
-        gLockCache.unlock();
         *samplingRate = af->sampleRate(output);
-        gLockCache.lock();
     } else {
         ALOGV("getOutputSamplingRate() reading from output desc");
-        *samplingRate = outputDesc->samplingRate;
+        *samplingRate = outputDesc->mSamplingRate;
     }
     if (*samplingRate == 0) {
         ALOGE("AudioSystem::getSamplingRate failed for output %d", output);
@@ -301,16 +307,11 @@
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-
-    Mutex::Autolock _l(gLockCache);
-
-    OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == NULL) {
-        gLockCache.unlock();
+    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
+    if (outputDesc == 0) {
         *frameCount = af->frameCount(output);
-        gLockCache.lock();
     } else {
-        *frameCount = outputDesc->frameCount;
+        *frameCount = outputDesc->mFrameCount;
     }
     if (*frameCount == 0) {
         ALOGE("AudioSystem::getFrameCount failed for output %d", output);
@@ -343,16 +344,11 @@
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-
-    Mutex::Autolock _l(gLockCache);
-
-    OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == NULL) {
-        gLockCache.unlock();
+    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
+    if (outputDesc == 0) {
         *latency = af->latency(output);
-        gLockCache.lock();
     } else {
-        *latency = outputDesc->latency;
+        *latency = outputDesc->mLatency;
     }
 
     ALOGV("getLatency() output %d, latency %d", output, *latency);
@@ -363,34 +359,11 @@
 status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
         audio_channel_mask_t channelMask, size_t* buffSize)
 {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        return PERMISSION_DENIED;
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc == 0) {
+        return NO_INIT;
     }
-    Mutex::Autolock _l(gLockCache);
-    // Do we have a stale gInBufferSize or are we requesting the input buffer size for new values
-    size_t inBuffSize = gInBuffSize;
-    if ((inBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
-        || (channelMask != gPrevInChannelMask)) {
-        gLockCache.unlock();
-        inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
-        gLockCache.lock();
-        if (inBuffSize == 0) {
-            ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %x",
-                    sampleRate, format, channelMask);
-            return BAD_VALUE;
-        }
-        // A benign race is possible here: we could overwrite a fresher cache entry
-        // save the request params
-        gPrevInSamplingRate = sampleRate;
-        gPrevInFormat = format;
-        gPrevInChannelMask = channelMask;
-
-        gInBuffSize = inBuffSize;
-    }
-    *buffSize = inBuffSize;
-
-    return NO_ERROR;
+    return afc->getInputBufferSize(sampleRate, format, channelMask, buffSize);
 }
 
 status_t AudioSystem::setVoiceVolume(float value)
@@ -452,6 +425,17 @@
 
 // ---------------------------------------------------------------------------
 
+
+void AudioSystem::AudioFlingerClient::clearIoCache()
+{
+    Mutex::Autolock _l(mLock);
+    mIoDescriptors.clear();
+    mInBuffSize = 0;
+    mInSamplingRate = 0;
+    mInFormat = AUDIO_FORMAT_DEFAULT;
+    mInChannelMask = AUDIO_CHANNEL_NONE;
+}
+
 void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused)
 {
     audio_error_callback cb = NULL;
@@ -461,11 +445,8 @@
         cb = gAudioErrorCallback;
     }
 
-    {
-        // clear output handles and stream to output map caches
-        Mutex::Autolock _l(gLockCache);
-        AudioSystem::gOutputs.clear();
-    }
+    // clear output handles and stream to output map caches
+    clearIoCache();
 
     if (cb) {
         cb(DEAD_OBJECT);
@@ -473,75 +454,189 @@
     ALOGW("AudioFlinger server died!");
 }
 
-void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle_t ioHandle,
-        const void *param2) {
+void AudioSystem::AudioFlingerClient::ioConfigChanged(audio_io_config_event event,
+                                                      const sp<AudioIoDescriptor>& ioDesc) {
     ALOGV("ioConfigChanged() event %d", event);
-    const OutputDescriptor *desc;
 
-    if (ioHandle == AUDIO_IO_HANDLE_NONE) return;
+    if (ioDesc == 0 || ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return;
 
-    Mutex::Autolock _l(AudioSystem::gLockCache);
+    audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
+    Vector < sp<AudioDeviceCallback> > callbacks;
 
-    switch (event) {
-    case STREAM_CONFIG_CHANGED:
-        break;
-    case OUTPUT_OPENED: {
-        if (gOutputs.indexOfKey(ioHandle) >= 0) {
-            ALOGV("ioConfigChanged() opening already existing output! %d", ioHandle);
-            break;
-        }
-        if (param2 == NULL) break;
-        desc = (const OutputDescriptor *)param2;
+    {
+        Mutex::Autolock _l(mLock);
 
-        OutputDescriptor *outputDesc =  new OutputDescriptor(*desc);
-        gOutputs.add(ioHandle, outputDesc);
-        ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x "
-                "frameCount %zu latency %d",
-                outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask,
-                outputDesc->frameCount, outputDesc->latency);
+        switch (event) {
+        case AUDIO_OUTPUT_OPENED:
+        case AUDIO_INPUT_OPENED: {
+            if (getIoDescriptor(ioDesc->mIoHandle) != 0) {
+                ALOGV("ioConfigChanged() opening already existing output! %d", ioDesc->mIoHandle);
+                break;
+            }
+            mIoDescriptors.add(ioDesc->mIoHandle, ioDesc);
+
+            if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
+                deviceId = ioDesc->getDeviceId();
+                ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
+                if (ioIndex >= 0) {
+                    callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+                }
+            }
+            ALOGV("ioConfigChanged() new %s opened %d samplingRate %u, format %#x channel mask %#x "
+                    "frameCount %zu deviceId %d", event == AUDIO_OUTPUT_OPENED ? "output" : "input",
+                    ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat, ioDesc->mChannelMask,
+                    ioDesc->mFrameCount, ioDesc->getDeviceId());
+            } break;
+        case AUDIO_OUTPUT_CLOSED:
+        case AUDIO_INPUT_CLOSED: {
+            if (getIoDescriptor(ioDesc->mIoHandle) == 0) {
+                ALOGW("ioConfigChanged() closing unknown %s %d",
+                      event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+                break;
+            }
+            ALOGV("ioConfigChanged() %s %d closed",
+                  event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+
+            mIoDescriptors.removeItem(ioDesc->mIoHandle);
+            mAudioDeviceCallbacks.removeItem(ioDesc->mIoHandle);
+            } break;
+
+        case AUDIO_OUTPUT_CONFIG_CHANGED:
+        case AUDIO_INPUT_CONFIG_CHANGED: {
+            sp<AudioIoDescriptor> oldDesc = getIoDescriptor(ioDesc->mIoHandle);
+            if (oldDesc == 0) {
+                ALOGW("ioConfigChanged() modifying unknown output! %d", ioDesc->mIoHandle);
+                break;
+            }
+
+            deviceId = oldDesc->getDeviceId();
+            mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+
+            if (deviceId != ioDesc->getDeviceId()) {
+                deviceId = ioDesc->getDeviceId();
+                ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
+                if (ioIndex >= 0) {
+                    callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+                }
+            }
+            ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
+                    "channel mask %#x frameCount %zu deviceId %d",
+                    event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
+                    ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
+                    ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->getDeviceId());
+
         } break;
-    case OUTPUT_CLOSED: {
-        if (gOutputs.indexOfKey(ioHandle) < 0) {
-            ALOGW("ioConfigChanged() closing unknown output! %d", ioHandle);
-            break;
         }
-        ALOGV("ioConfigChanged() output %d closed", ioHandle);
-
-        gOutputs.removeItem(ioHandle);
-        } break;
-
-    case OUTPUT_CONFIG_CHANGED: {
-        int index = gOutputs.indexOfKey(ioHandle);
-        if (index < 0) {
-            ALOGW("ioConfigChanged() modifying unknown output! %d", ioHandle);
-            break;
-        }
-        if (param2 == NULL) break;
-        desc = (const OutputDescriptor *)param2;
-
-        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %#x "
-                "channel mask %#x frameCount %zu latency %d",
-                ioHandle, desc->samplingRate, desc->format,
-                desc->channelMask, desc->frameCount, desc->latency);
-        OutputDescriptor *outputDesc = gOutputs.valueAt(index);
-        delete outputDesc;
-        outputDesc =  new OutputDescriptor(*desc);
-        gOutputs.replaceValueFor(ioHandle, outputDesc);
-    } break;
-    case INPUT_OPENED:
-    case INPUT_CLOSED:
-    case INPUT_CONFIG_CHANGED:
-        break;
-
+    }
+    // callbacks.size() != 0 =>  ioDesc->mIoHandle and deviceId are valid
+    for (size_t i = 0; i < callbacks.size(); i++) {
+        callbacks[i]->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
     }
 }
 
-void AudioSystem::setErrorCallback(audio_error_callback cb)
+status_t AudioSystem::AudioFlingerClient::getInputBufferSize(
+                                                uint32_t sampleRate, audio_format_t format,
+                                                audio_channel_mask_t channelMask, size_t* buffSize)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+    Mutex::Autolock _l(mLock);
+    // Do we have a stale mInBuffSize or are we requesting the input buffer size for new values
+    if ((mInBuffSize == 0) || (sampleRate != mInSamplingRate) || (format != mInFormat)
+        || (channelMask != mInChannelMask)) {
+        size_t inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
+        if (inBuffSize == 0) {
+            ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %x",
+                    sampleRate, format, channelMask);
+            return BAD_VALUE;
+        }
+        // A benign race is possible here: we could overwrite a fresher cache entry
+        // save the request params
+        mInSamplingRate = sampleRate;
+        mInFormat = format;
+        mInChannelMask = channelMask;
+
+        mInBuffSize = inBuffSize;
+    }
+
+    *buffSize = mInBuffSize;
+
+    return NO_ERROR;
+}
+
+sp<AudioIoDescriptor> AudioSystem::AudioFlingerClient::getIoDescriptor(audio_io_handle_t ioHandle)
+{
+    sp<AudioIoDescriptor> desc;
+    ssize_t index = mIoDescriptors.indexOfKey(ioHandle);
+    if (index >= 0) {
+        desc = mIoDescriptors.valueAt(index);
+    }
+    return desc;
+}
+
+status_t AudioSystem::AudioFlingerClient::addAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    Mutex::Autolock _l(mLock);
+    Vector < sp<AudioDeviceCallback> > callbacks;
+    ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+    if (ioIndex >= 0) {
+        callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+    }
+
+    for (size_t cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
+        if (callbacks[cbIndex] == callback) {
+            return INVALID_OPERATION;
+        }
+    }
+    callbacks.add(callback);
+
+    mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+    return NO_ERROR;
+}
+
+status_t AudioSystem::AudioFlingerClient::removeAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    Mutex::Autolock _l(mLock);
+    ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+    if (ioIndex < 0) {
+        return INVALID_OPERATION;
+    }
+    Vector < sp<AudioDeviceCallback> > callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+
+    size_t cbIndex;
+    for (cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
+        if (callbacks[cbIndex] == callback) {
+            break;
+        }
+    }
+    if (cbIndex == callbacks.size()) {
+        return INVALID_OPERATION;
+    }
+    callbacks.removeAt(cbIndex);
+    if (callbacks.size() != 0) {
+        mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+    } else {
+        mAudioDeviceCallbacks.removeItem(audioIo);
+    }
+    return NO_ERROR;
+}
+
+/* static */ void AudioSystem::setErrorCallback(audio_error_callback cb)
 {
     Mutex::Autolock _l(gLock);
     gAudioErrorCallback = cb;
 }
 
+/*static*/ void AudioSystem::setDynPolicyCallback(dynamic_policy_callback cb)
+{
+    Mutex::Autolock _l(gLock);
+    gDynPolicyCallback = cb;
+}
+
 // client singleton for AudioPolicyService binder interface
 // protected by gLockAPS
 sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
@@ -653,6 +748,7 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
@@ -662,7 +758,7 @@
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
-    return aps->getOutputForAttr(attr, output, session, stream,
+    return aps->getOutputForAttr(attr, output, session, stream, uid,
                                  samplingRate, format, channelMask,
                                  flags, selectedDeviceId, offloadInfo);
 }
@@ -697,14 +793,17 @@
 status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
                                 audio_io_handle_t *input,
                                 audio_session_t session,
+                                uid_t uid,
                                 uint32_t samplingRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                audio_input_flags_t flags)
+                                audio_input_flags_t flags,
+                                audio_port_handle_t selectedDeviceId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
-    return aps->getInputForAttr(attr, input, session, samplingRate, format, channelMask, flags);
+    return aps->getInputForAttr(
+            attr, input, session, uid, samplingRate, format, channelMask, flags, selectedDeviceId);
 }
 
 status_t AudioSystem::startInput(audio_io_handle_t input,
@@ -859,11 +958,10 @@
     // called by restoreTrack_l(), which needs new IAudioFlinger and IAudioPolicyService instances
     ALOGV("clearAudioConfigCache()");
     {
-        Mutex::Autolock _l(gLockCache);
-        gOutputs.clear();
-    }
-    {
         Mutex::Autolock _l(gLock);
+        if (gAudioFlingerClient != 0) {
+            gAudioFlingerClient->clearIoCache();
+        }
         gAudioFlinger.clear();
     }
     {
@@ -929,7 +1027,7 @@
     return aps->setAudioPortConfig(config);
 }
 
-status_t AudioSystem::addAudioPortCallback(const sp<AudioPortCallback>& callBack)
+status_t AudioSystem::addAudioPortCallback(const sp<AudioPortCallback>& callback)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
@@ -938,10 +1036,11 @@
     if (gAudioPolicyServiceClient == 0) {
         return NO_INIT;
     }
-    return gAudioPolicyServiceClient->addAudioPortCallback(callBack);
+    return gAudioPolicyServiceClient->addAudioPortCallback(callback);
 }
 
-status_t AudioSystem::removeAudioPortCallback(const sp<AudioPortCallback>& callBack)
+/*static*/
+status_t AudioSystem::removeAudioPortCallback(const sp<AudioPortCallback>& callback)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
@@ -950,9 +1049,39 @@
     if (gAudioPolicyServiceClient == 0) {
         return NO_INIT;
     }
-    return gAudioPolicyServiceClient->removeAudioPortCallback(callBack);
+    return gAudioPolicyServiceClient->removeAudioPortCallback(callback);
 }
 
+status_t AudioSystem::addAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc == 0) {
+        return NO_INIT;
+    }
+    return afc->addAudioDeviceCallback(callback, audioIo);
+}
+
+status_t AudioSystem::removeAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc == 0) {
+        return NO_INIT;
+    }
+    return afc->removeAudioDeviceCallback(callback, audioIo);
+}
+
+audio_port_handle_t AudioSystem::getDeviceIdForIo(audio_io_handle_t audioIo)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return PERMISSION_DENIED;
+    const sp<AudioIoDescriptor> desc = getIoDescriptor(audioIo);
+    if (desc == 0) {
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+    return desc->getDeviceId();
+}
 
 status_t AudioSystem::acquireSoundTriggerSession(audio_session_t *session,
                                        audio_io_handle_t *ioHandle,
@@ -1003,25 +1132,25 @@
 // ---------------------------------------------------------------------------
 
 status_t AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
-        const sp<AudioPortCallback>& callBack)
+        const sp<AudioPortCallback>& callback)
 {
     Mutex::Autolock _l(mLock);
     for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
-        if (mAudioPortCallbacks[i] == callBack) {
+        if (mAudioPortCallbacks[i] == callback) {
             return INVALID_OPERATION;
         }
     }
-    mAudioPortCallbacks.add(callBack);
+    mAudioPortCallbacks.add(callback);
     return NO_ERROR;
 }
 
 status_t AudioSystem::AudioPolicyServiceClient::removeAudioPortCallback(
-        const sp<AudioPortCallback>& callBack)
+        const sp<AudioPortCallback>& callback)
 {
     Mutex::Autolock _l(mLock);
     size_t i;
     for (i = 0; i < mAudioPortCallbacks.size(); i++) {
-        if (mAudioPortCallbacks[i] == callBack) {
+        if (mAudioPortCallbacks[i] == callback) {
             break;
         }
     }
@@ -1032,6 +1161,7 @@
     return NO_ERROR;
 }
 
+
 void AudioSystem::AudioPolicyServiceClient::onAudioPortListUpdate()
 {
     Mutex::Autolock _l(mLock);
@@ -1051,7 +1181,16 @@
 void AudioSystem::AudioPolicyServiceClient::onDynamicPolicyMixStateUpdate(
         String8 regId, int32_t state)
 {
-    ALOGV("TODO propagate onDynamicPolicyMixStateUpdate(%s, %d)", regId.string(), state);
+    ALOGV("AudioPolicyServiceClient::onDynamicPolicyMixStateUpdate(%s, %d)", regId.string(), state);
+    dynamic_policy_callback cb = NULL;
+    {
+        Mutex::Autolock _l(AudioSystem::gLock);
+        cb = gDynPolicyCallback;
+    }
+
+    if (cb != NULL) {
+        cb(DYNAMIC_POLICY_EVENT_MIX_STATE_UPDATE, regId, state);
+    }
 }
 
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 8555983..db316b0 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -234,6 +234,10 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
+        // No lock here: worst case we remove a NULL callback which will be a nop
+        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+        }
         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
         mAudioTrack.clear();
         mCblkMemory.clear();
@@ -393,6 +397,7 @@
         return BAD_VALUE;
     }
     mSampleRate = sampleRate;
+    mOriginalSampleRate = sampleRate;
     mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
 
     // Make copy of input parameter offloadInfo so that in the future:
@@ -470,6 +475,7 @@
     mSequence = 1;
     mObservedSequence = mSequence;
     mInUnderrun = false;
+    mPreviousTimestampValid = false;
 
     return NO_ERROR;
 }
@@ -496,6 +502,8 @@
     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
         // reset current position as seen by client to 0
         mPosition = 0;
+        mPreviousTimestampValid = false;
+
         // For offloaded tracks, we don't know if the hardware counters are really zero here,
         // since the flush is asynchronous and stop may not fully drain.
         // We save the time when the track is started to later verify whether
@@ -756,6 +764,15 @@
     return mSampleRate;
 }
 
+uint32_t AudioTrack::getOriginalSampleRate() const
+{
+    if (mIsTimed) {
+        return 0;
+    }
+
+    return mOriginalSampleRate;
+}
+
 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
 {
     AutoMutex lock(mLock);
@@ -995,6 +1012,7 @@
     mNewPosition = mUpdatePeriod;
     (void) updateAndGetPosition_l();
     mPosition = 0;
+    mPreviousTimestampValid = false;
 #if 0
     // The documentation is not clear on the behavior of reload() and the restoration
     // of loop count. Historically we have not restored loop count, start, end,
@@ -1028,6 +1046,14 @@
     return mSelectedDeviceId;
 }
 
+audio_port_handle_t AudioTrack::getRoutedDeviceId() {
+    AutoMutex lock(mLock);
+    if (mOutput == AUDIO_IO_HANDLE_NONE) {
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+    return AudioSystem::getDeviceIdForIo(mOutput);
+}
+
 status_t AudioTrack::attachAuxEffect(int effectId)
 {
     AutoMutex lock(mLock);
@@ -1057,13 +1083,16 @@
         return NO_INIT;
     }
 
+    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+    }
     audio_io_handle_t output;
     audio_stream_type_t streamType = mStreamType;
     audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
 
     status_t status;
     status = AudioSystem::getOutputForAttr(attr, &output,
-                                           (audio_session_t)mSessionId, &streamType,
+                                           (audio_session_t)mSessionId, &streamType, mClientUid,
                                            mSampleRate, mFormat, mChannelMask,
                                            mFlags, mSelectedDeviceId, mOffloadInfo);
 
@@ -1102,6 +1131,7 @@
     }
     if (mSampleRate == 0) {
         mSampleRate = afSampleRate;
+        mOriginalSampleRate = afSampleRate;
     }
     // Client decides whether the track is TIMED (see below), but can only express a preference
     // for FAST.  Server will perform additional tests.
@@ -1360,6 +1390,10 @@
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
 
+    if (mDeviceCallback != 0) {
+        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
+    }
+
     return NO_ERROR;
     }
 
@@ -2089,6 +2123,11 @@
 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
 {
     AutoMutex lock(mLock);
+
+    bool previousTimestampValid = mPreviousTimestampValid;
+    // Set false here to cover all the error return cases.
+    mPreviousTimestampValid = false;
+
     // FIXME not implemented for fast tracks; should use proxy and SSQ
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         return INVALID_OPERATION;
@@ -2187,6 +2226,46 @@
         // IAudioTrack.  And timestamp.mPosition is initially in server's
         // point of view, so we need to apply the same fudge factor to it.
     }
+
+    // Prevent retrograde motion in timestamp.
+    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
+    if (status == NO_ERROR) {
+        if (previousTimestampValid) {
+#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * 1000000000 + time.tv_nsec)
+            const uint64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
+            const uint64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
+#undef TIME_TO_NANOS
+            if (currentTimeNanos < previousTimeNanos) {
+                ALOGW("retrograde timestamp time");
+                // FIXME Consider blocking this from propagating upwards.
+            }
+
+            // Looking at signed delta will work even when the timestamps
+            // are wrapping around.
+            int32_t deltaPosition = static_cast<int32_t>(timestamp.mPosition
+                    - mPreviousTimestamp.mPosition);
+            // position can bobble slightly as an artifact; this hides the bobble
+            static const int32_t MINIMUM_POSITION_DELTA = 8;
+            if (deltaPosition < 0) {
+                // Only report once per position instead of spamming the log.
+                if (!mRetrogradeMotionReported) {
+                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
+                            deltaPosition,
+                            timestamp.mPosition,
+                            mPreviousTimestamp.mPosition);
+                    mRetrogradeMotionReported = true;
+                }
+            } else {
+                mRetrogradeMotionReported = false;
+            }
+            if (deltaPosition < MINIMUM_POSITION_DELTA) {
+                timestamp = mPreviousTimestamp;  // Use last valid timestamp.
+            }
+        }
+        mPreviousTimestamp = timestamp;
+        mPreviousTimestampValid = true;
+    }
+
     return status;
 }
 
@@ -2248,6 +2327,48 @@
     return mProxy->getUnderrunFrames();
 }
 
+status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s adding NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback == callback) {
+        ALOGW("%s adding same callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    status_t status = NO_ERROR;
+    if (mOutput != AUDIO_IO_HANDLE_NONE) {
+        if (mDeviceCallback != 0) {
+            ALOGW("%s callback already present!", __FUNCTION__);
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+        }
+        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
+    }
+    mDeviceCallback = callback;
+    return status;
+}
+
+status_t AudioTrack::removeAudioDeviceCallback(
+        const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s removing NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback != callback) {
+        ALOGW("%s removing different callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (mOutput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+    }
+    mDeviceCallback = 0;
+    return NO_ERROR;
+}
+
 // =========================================================================
 
 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 38055f9..d722fe9 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -174,9 +174,11 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
+                                const String16& opPackageName,
                                 size_t *pFrameCount,
                                 track_flags_t *flags,
                                 pid_t tid,
+                                int clientUid,
                                 int *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
@@ -190,11 +192,13 @@
         data.writeInt32(sampleRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
+        data.writeString16(opPackageName);
         size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
         data.writeInt64(frameCount);
         track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
         data.writeInt32(lFlags);
         data.writeInt32((int32_t) tid);
+        data.writeInt32((int32_t) clientUid);
         int lSessionId = AUDIO_SESSION_ALLOCATE;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
@@ -702,6 +706,7 @@
                                     int32_t priority,
                                     audio_io_handle_t output,
                                     int sessionId,
+                                    const String16& opPackageName,
                                     status_t *status,
                                     int *id,
                                     int *enabled)
@@ -722,6 +727,7 @@
         data.writeInt32(priority);
         data.writeInt32((int32_t) output);
         data.writeInt32(sessionId);
+        data.writeString16(opPackageName);
 
         status_t lStatus = remote()->transact(CREATE_EFFECT, data, &reply);
         if (lStatus != NO_ERROR) {
@@ -950,18 +956,19 @@
             uint32_t sampleRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
+            const String16& opPackageName = data.readString16();
             size_t frameCount = data.readInt64();
             track_flags_t flags = (track_flags_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
+            int clientUid = data.readInt32();
             int sessionId = data.readInt32();
             size_t notificationFrames = data.readInt64();
             sp<IMemory> cblk;
             sp<IMemory> buffers;
             status_t status;
             sp<IAudioRecord> record = openRecord(input,
-                    sampleRate, format, channelMask, &frameCount, &flags, tid, &sessionId,
-                    &notificationFrames,
-                    cblk, buffers, &status);
+                    sampleRate, format, channelMask, opPackageName, &frameCount, &flags, tid,
+                    clientUid, &sessionId, &notificationFrames, cblk, buffers, &status);
             LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
             reply->writeInt64(frameCount);
             reply->writeInt32(flags);
@@ -1247,12 +1254,13 @@
             int32_t priority = data.readInt32();
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
             int sessionId = data.readInt32();
+            const String16 opPackageName = data.readString16();
             status_t status;
             int id;
             int enabled;
 
             sp<IEffect> effect = createEffect(&desc, client, priority, output, sessionId,
-                    &status, &id, &enabled);
+                    opPackageName, &status, &id, &enabled);
             reply->writeInt32(status);
             reply->writeInt32(id);
             reply->writeInt32(enabled);
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 641e6c1..3429d36 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -39,25 +39,18 @@
     {
     }
 
-    void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2)
+    void ioConfigChanged(audio_io_config_event event, const sp<AudioIoDescriptor>& ioDesc)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlingerClient::getInterfaceDescriptor());
         data.writeInt32(event);
-        data.writeInt32((int32_t) ioHandle);
-        if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
-            uint32_t stream = *(const uint32_t *)param2;
-            ALOGV("ioConfigChanged stream %d", stream);
-            data.writeInt32(stream);
-        } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
-            const AudioSystem::OutputDescriptor *desc =
-                    (const AudioSystem::OutputDescriptor *)param2;
-            data.writeInt32(desc->samplingRate);
-            data.writeInt32(desc->format);
-            data.writeInt32(desc->channelMask);
-            data.writeInt64(desc->frameCount);
-            data.writeInt32(desc->latency);
-        }
+        data.writeInt32((int32_t)ioDesc->mIoHandle);
+        data.write(&ioDesc->mPatch, sizeof(struct audio_patch));
+        data.writeInt32(ioDesc->mSamplingRate);
+        data.writeInt32(ioDesc->mFormat);
+        data.writeInt32(ioDesc->mChannelMask);
+        data.writeInt64(ioDesc->mFrameCount);
+        data.writeInt32(ioDesc->mLatency);
         remote()->transact(IO_CONFIG_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
     }
 };
@@ -72,24 +65,16 @@
     switch (code) {
     case IO_CONFIG_CHANGED: {
             CHECK_INTERFACE(IAudioFlingerClient, data, reply);
-            int event = data.readInt32();
-            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
-            const void *param2 = NULL;
-            AudioSystem::OutputDescriptor desc;
-            uint32_t stream;
-            if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
-                stream = data.readInt32();
-                param2 = &stream;
-                ALOGV("STREAM_CONFIG_CHANGED stream %d", stream);
-            } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
-                desc.samplingRate = data.readInt32();
-                desc.format = (audio_format_t) data.readInt32();
-                desc.channelMask = (audio_channel_mask_t) data.readInt32();
-                desc.frameCount = data.readInt64();
-                desc.latency = data.readInt32();
-                param2 = &desc;
-            }
-            ioConfigChanged(event, ioHandle, param2);
+            audio_io_config_event event = (audio_io_config_event)data.readInt32();
+            sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
+            ioDesc->mIoHandle = (audio_io_handle_t) data.readInt32();
+            data.read(&ioDesc->mPatch, sizeof(struct audio_patch));
+            ioDesc->mSamplingRate = data.readInt32();
+            ioDesc->mFormat = (audio_format_t) data.readInt32();
+            ioDesc->mChannelMask = (audio_channel_mask_t) data.readInt32();
+            ioDesc->mFrameCount = data.readInt64();
+            ioDesc->mLatency = data.readInt32();
+            ioConfigChanged(event, ioDesc);
             return NO_ERROR;
         } break;
         default:
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index afae7f5..fd18f17 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -171,6 +171,7 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
@@ -207,6 +208,7 @@
                 data.writeInt32(1);
                 data.writeInt32(*stream);
             }
+            data.writeInt32(uid);
             data.writeInt32(samplingRate);
             data.writeInt32(static_cast <uint32_t>(format));
             data.writeInt32(channelMask);
@@ -275,10 +277,12 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
-                                     audio_input_flags_t flags)
+                                     audio_input_flags_t flags,
+                                     audio_port_handle_t selectedDeviceId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -292,10 +296,12 @@
         }
         data.write(attr, sizeof(audio_attributes_t));
         data.writeInt32(session);
+        data.writeInt32(uid);
         data.writeInt32(samplingRate);
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(flags);
+        data.writeInt32(selectedDeviceId);
         status_t status = remote()->transact(GET_INPUT_FOR_ATTR, data, &reply);
         if (status != NO_ERROR) {
             return status;
@@ -850,6 +856,7 @@
             if (hasStream) {
                 stream = (audio_stream_type_t)data.readInt32();
             }
+            uid_t uid = (uid_t)data.readInt32();
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
@@ -863,7 +870,7 @@
             }
             audio_io_handle_t output;
             status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
-                    &output, session, &stream,
+                    &output, session, &stream, uid,
                     samplingRate, format, channelMask,
                     flags, selectedDeviceId, hasOffloadInfo ? &offloadInfo : NULL);
             reply->writeInt32(status);
@@ -910,14 +917,16 @@
             audio_attributes_t attr;
             data.read(&attr, sizeof(audio_attributes_t));
             audio_session_t session = (audio_session_t)data.readInt32();
+            uid_t uid = (uid_t)data.readInt32();
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
+            audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
             audio_io_handle_t input;
-            status_t status = getInputForAttr(&attr, &input, session,
+            status_t status = getInputForAttr(&attr, &input, session, uid,
                                               samplingRate, format, channelMask,
-                                              flags);
+                                              flags, selectedDeviceId);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->writeInt32(input);
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index 9246a7c..2f440fe 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -142,7 +142,7 @@
 
         ssize_t result = reply.readInt32();
 
-        if (result >= ERROR_DRM_VENDOR_MIN && result <= ERROR_DRM_VENDOR_MAX) {
+        if (isCryptoError(result)) {
             errorDetailMsg->setTo(reply.readCString());
         }
 
@@ -319,8 +319,7 @@
 
             reply->writeInt32(result);
 
-            if (result >= ERROR_DRM_VENDOR_MIN
-                && result <= ERROR_DRM_VENDOR_MAX) {
+            if (isCryptoError(result)) {
                 reply->writeCString(errorDetailMsg.c_str());
             }
 
diff --git a/media/libmedia/IHDCP.cpp b/media/libmedia/IHDCP.cpp
index 9122f75..f3a8902 100644
--- a/media/libmedia/IHDCP.cpp
+++ b/media/libmedia/IHDCP.cpp
@@ -241,8 +241,19 @@
         case HDCP_ENCRYPT:
         {
             size_t size = data.readInt32();
+            size_t bufSize = 2 * size;
 
-            void *inData = malloc(2 * size);
+            // watch out for overflow
+            void *inData = NULL;
+            if (bufSize > size) {
+                inData = malloc(bufSize);
+            }
+
+            if (inData == NULL) {
+                reply->writeInt32(ERROR_OUT_OF_RANGE);
+                return OK;
+            }
+
             void *outData = (uint8_t *)inData + size;
 
             data.read(inData, size);
@@ -273,11 +284,17 @@
             size_t offset = data.readInt32();
             size_t size = data.readInt32();
             uint32_t streamCTR = data.readInt32();
-            void *outData = malloc(size);
+            void *outData = NULL;
             uint64_t inputCTR;
 
-            status_t err = encryptNative(graphicBuffer, offset, size,
-                                         streamCTR, &inputCTR, outData);
+            status_t err = ERROR_OUT_OF_RANGE;
+
+            outData = malloc(size);
+
+            if (outData != NULL) {
+                err = encryptNative(graphicBuffer, offset, size,
+                                             streamCTR, &inputCTR, outData);
+            }
 
             reply->writeInt32(err);
 
@@ -295,8 +312,19 @@
         case HDCP_DECRYPT:
         {
             size_t size = data.readInt32();
+            size_t bufSize = 2 * size;
 
-            void *inData = malloc(2 * size);
+            // watch out for overflow
+            void *inData = NULL;
+            if (bufSize > size) {
+                inData = malloc(bufSize);
+            }
+
+            if (inData == NULL) {
+                reply->writeInt32(ERROR_OUT_OF_RANGE);
+                return OK;
+            }
+
             void *outData = (uint8_t *)inData + size;
 
             data.read(inData, size);
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index 2ff7658..09137ef 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -24,6 +24,7 @@
 #include <binder/Parcel.h>
 #include <utils/String8.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
 
 namespace android {
 
@@ -106,11 +107,18 @@
             return UNKNOWN_ERROR;
         }
 
-        int32_t len = reply.readInt32();
+        size_t len = reply.readInt32();
 
-        if (len > 0) {
-            memcpy(buffer, mMemory->pointer(), len);
+        if (len > size) {
+            ALOGE("requested %zu, got %zu", size, len);
+            return ERROR_OUT_OF_RANGE;
         }
+        if (len > mMemory->size()) {
+            ALOGE("got %zu, but memory has %zu", len, mMemory->size());
+            return ERROR_OUT_OF_RANGE;
+        }
+
+        memcpy(buffer, mMemory->pointer(), len);
 
         return len;
     }
diff --git a/media/libmedia/IMediaHTTPService.cpp b/media/libmedia/IMediaHTTPService.cpp
index f30d0f3..0c16a2b 100644
--- a/media/libmedia/IMediaHTTPService.cpp
+++ b/media/libmedia/IMediaHTTPService.cpp
@@ -44,6 +44,7 @@
         status_t err = reply.readInt32();
 
         if (err != OK) {
+            ALOGE("Unable to make HTTP connection (err = %d)", err);
             return NULL;
         }
 
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 0091078..bde35f2 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -21,6 +21,9 @@
 
 #include <binder/Parcel.h>
 
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+
 #include <media/IDataSource.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayer.h>
@@ -41,7 +44,10 @@
     START,
     STOP,
     IS_PLAYING,
-    SET_PLAYBACK_RATE,
+    SET_PLAYBACK_SETTINGS,
+    GET_PLAYBACK_SETTINGS,
+    SET_SYNC_SETTINGS,
+    GET_SYNC_SETTINGS,
     PAUSE,
     SEEK_TO,
     GET_CURRENT_POSITION,
@@ -175,15 +181,63 @@
         return reply.readInt32();
     }
 
-    status_t setPlaybackRate(float rate)
+    status_t setPlaybackSettings(const AudioPlaybackRate& rate)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
-        data.writeFloat(rate);
-        remote()->transact(SET_PLAYBACK_RATE, data, &reply);
+        data.writeFloat(rate.mSpeed);
+        data.writeFloat(rate.mPitch);
+        data.writeInt32((int32_t)rate.mFallbackMode);
+        data.writeInt32((int32_t)rate.mStretchMode);
+        remote()->transact(SET_PLAYBACK_SETTINGS, data, &reply);
         return reply.readInt32();
     }
 
+    status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        remote()->transact(GET_PLAYBACK_SETTINGS, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            rate->mSpeed = reply.readFloat();
+            rate->mPitch = reply.readFloat();
+            rate->mFallbackMode = (AudioTimestretchFallbackMode)reply.readInt32();
+            rate->mStretchMode = (AudioTimestretchStretchMode)reply.readInt32();
+        }
+        return err;
+    }
+
+    status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        data.writeInt32((int32_t)sync.mSource);
+        data.writeInt32((int32_t)sync.mAudioAdjustMode);
+        data.writeFloat(sync.mTolerance);
+        data.writeFloat(videoFpsHint);
+        remote()->transact(SET_SYNC_SETTINGS, data, &reply);
+        return reply.readInt32();
+    }
+
+    status_t getSyncSettings(AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        remote()->transact(GET_SYNC_SETTINGS, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            AVSyncSettings settings;
+            settings.mSource = (AVSyncSource)reply.readInt32();
+            settings.mAudioAdjustMode = (AVSyncAudioAdjustMode)reply.readInt32();
+            settings.mTolerance = reply.readFloat();
+            *sync = settings;
+            *videoFps = reply.readFloat();
+        }
+        return err;
+    }
+
     status_t pause()
     {
         Parcel data, reply;
@@ -453,9 +507,51 @@
             reply->writeInt32(ret);
             return NO_ERROR;
         } break;
-        case SET_PLAYBACK_RATE: {
+        case SET_PLAYBACK_SETTINGS: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
-            reply->writeInt32(setPlaybackRate(data.readFloat()));
+            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            rate.mSpeed = data.readFloat();
+            rate.mPitch = data.readFloat();
+            rate.mFallbackMode = (AudioTimestretchFallbackMode)data.readInt32();
+            rate.mStretchMode = (AudioTimestretchStretchMode)data.readInt32();
+            reply->writeInt32(setPlaybackSettings(rate));
+            return NO_ERROR;
+        } break;
+        case GET_PLAYBACK_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            status_t err = getPlaybackSettings(&rate);
+            reply->writeInt32(err);
+            if (err == OK) {
+                reply->writeFloat(rate.mSpeed);
+                reply->writeFloat(rate.mPitch);
+                reply->writeInt32((int32_t)rate.mFallbackMode);
+                reply->writeInt32((int32_t)rate.mStretchMode);
+            }
+            return NO_ERROR;
+        } break;
+        case SET_SYNC_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            AVSyncSettings sync;
+            sync.mSource = (AVSyncSource)data.readInt32();
+            sync.mAudioAdjustMode = (AVSyncAudioAdjustMode)data.readInt32();
+            sync.mTolerance = data.readFloat();
+            float videoFpsHint = data.readFloat();
+            reply->writeInt32(setSyncSettings(sync, videoFpsHint));
+            return NO_ERROR;
+        } break;
+        case GET_SYNC_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            AVSyncSettings sync;
+            float videoFps;
+            status_t err = getSyncSettings(&sync, &videoFps);
+            reply->writeInt32(err);
+            if (err == OK) {
+                reply->writeInt32((int32_t)sync.mSource);
+                reply->writeInt32((int32_t)sync.mAudioAdjustMode);
+                reply->writeFloat(sync.mTolerance);
+                reply->writeFloat(videoFps);
+            }
             return NO_ERROR;
         } break;
         case PAUSE: {
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index aa7b2e1..05f8670 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -78,10 +78,11 @@
         return interface_cast<IMediaPlayer>(reply.readStrongBinder());
     }
 
-    virtual sp<IMediaRecorder> createMediaRecorder()
+    virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
+        data.writeString16(opPackageName);
         remote()->transact(CREATE_MEDIA_RECORDER, data, &reply);
         return interface_cast<IMediaRecorder>(reply.readStrongBinder());
     }
@@ -128,11 +129,12 @@
         return remote()->transact(PULL_BATTERY_DATA, data, reply);
     }
 
-    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
-            const String8& iface)
+    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
+            const sp<IRemoteDisplayClient>& client, const String8& iface)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
+        data.writeString16(opPackageName);
         data.writeStrongBinder(IInterface::asBinder(client));
         data.writeString8(iface);
         remote()->transact(LISTEN_FOR_REMOTE_DISPLAY, data, &reply);
@@ -166,7 +168,8 @@
         } break;
         case CREATE_MEDIA_RECORDER: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
-            sp<IMediaRecorder> recorder = createMediaRecorder();
+            const String16 opPackageName = data.readString16();
+            sp<IMediaRecorder> recorder = createMediaRecorder(opPackageName);
             reply->writeStrongBinder(IInterface::asBinder(recorder));
             return NO_ERROR;
         } break;
@@ -214,10 +217,11 @@
         } break;
         case LISTEN_FOR_REMOTE_DISPLAY: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
+            const String16 opPackageName = data.readString16();
             sp<IRemoteDisplayClient> client(
                     interface_cast<IRemoteDisplayClient>(data.readStrongBinder()));
             String8 iface(data.readString8());
-            sp<IRemoteDisplay> display(listenForRemoteDisplay(client, iface));
+            sp<IRemoteDisplay> display(listenForRemoteDisplay(opPackageName, client, iface));
             reply->writeStrongBinder(IInterface::asBinder(display));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 8ca256c..c7a1394 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -35,6 +35,7 @@
     RELEASE = IBinder::FIRST_CALL_TRANSACTION,
     INIT,
     CLOSE,
+    USE_PERSISTENT_SURFACE,
     QUERY_SURFACE_MEDIASOURCE,
     RESET,
     STOP,
@@ -75,6 +76,16 @@
         return reply.readInt32();
     }
 
+    status_t usePersistentSurface(const sp<IGraphicBufferConsumer>& surface)
+    {
+        ALOGV("usePersistentSurface(%p)", surface.get());
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+        data.writeStrongBinder(IInterface::asBinder(surface));
+        remote()->transact(USE_PERSISTENT_SURFACE, data, &reply);
+        return reply.readInt32();
+    }
+
     sp<IGraphicBufferProducer> querySurfaceMediaSource()
     {
         ALOGV("Query SurfaceMediaSource");
@@ -442,6 +453,14 @@
             reply->writeInt32(setCamera(camera, proxy));
             return NO_ERROR;
         } break;
+        case USE_PERSISTENT_SURFACE: {
+            ALOGV("USE_PERSISTENT_SURFACE");
+            CHECK_INTERFACE(IMediaRecorder, data, reply);
+            sp<IGraphicBufferConsumer> surface = interface_cast<IGraphicBufferConsumer>(
+                    data.readStrongBinder());
+            reply->writeInt32(usePersistentSurface(surface));
+            return NO_ERROR;
+        } break;
         case QUERY_SURFACE_MEDIASOURCE: {
             ALOGV("QUERY_SURFACE_MEDIASOURCE");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index e208df9..39b135b 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -41,6 +41,8 @@
     USE_BUFFER,
     USE_GRAPHIC_BUFFER,
     CREATE_INPUT_SURFACE,
+    CREATE_PERSISTENT_INPUT_SURFACE,
+    USE_PERSISTENT_INPUT_SURFACE,
     SIGNAL_END_OF_INPUT_STREAM,
     STORE_META_DATA_IN_BUFFERS,
     PREPARE_FOR_ADAPTIVE_PLAYBACK,
@@ -326,6 +328,51 @@
         return err;
     }
 
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer) {
+        Parcel data, reply;
+        status_t err;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        err = remote()->transact(CREATE_PERSISTENT_INPUT_SURFACE, data, &reply);
+        if (err != OK) {
+            ALOGW("binder transaction failed: %d", err);
+            return err;
+        }
+
+        err = reply.readInt32();
+        if (err != OK) {
+            return err;
+        }
+
+        *bufferProducer = IGraphicBufferProducer::asInterface(
+                reply.readStrongBinder());
+        *bufferConsumer = IGraphicBufferConsumer::asInterface(
+                reply.readStrongBinder());
+
+        return err;
+    }
+
+    virtual status_t usePersistentInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        status_t err;
+        data.writeInt32((int32_t)node);
+        data.writeInt32(port_index);
+        data.writeStrongBinder(IInterface::asBinder(bufferConsumer));
+
+        err = remote()->transact(USE_PERSISTENT_INPUT_SURFACE, data, &reply);
+
+        if (err != OK) {
+            ALOGW("binder transaction failed: %d", err);
+            return err;
+        }
+        return reply.readInt32();
+    }
+
+
     virtual status_t signalEndOfInputStream(node_id node) {
         Parcel data, reply;
         status_t err;
@@ -781,6 +828,42 @@
             return NO_ERROR;
         }
 
+        case CREATE_PERSISTENT_INPUT_SURFACE:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+            sp<IGraphicBufferProducer> bufferProducer;
+            sp<IGraphicBufferConsumer> bufferConsumer;
+            status_t err = createPersistentInputSurface(
+                    &bufferProducer, &bufferConsumer);
+
+            reply->writeInt32(err);
+
+            if (err == OK) {
+                reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
+                reply->writeStrongBinder(IInterface::asBinder(bufferConsumer));
+            }
+
+            return NO_ERROR;
+        }
+
+        case USE_PERSISTENT_INPUT_SURFACE:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+            node_id node = (node_id)data.readInt32();
+            OMX_U32 port_index = data.readInt32();
+
+            sp<IGraphicBufferConsumer> bufferConsumer =
+                    interface_cast<IGraphicBufferConsumer>(data.readStrongBinder());
+
+            status_t err = usePersistentInputSurface(
+                    node, port_index, bufferConsumer);
+
+            reply->writeInt32(err);
+            return NO_ERROR;
+        }
+
         case SIGNAL_END_OF_INPUT_STREAM:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
index eea2c43..40ec0cb 100644
--- a/media/libmedia/MediaResource.cpp
+++ b/media/libmedia/MediaResource.cpp
@@ -23,6 +23,8 @@
 
 const char kResourceSecureCodec[] = "secure-codec";
 const char kResourceNonSecureCodec[] = "non-secure-codec";
+const char kResourceAudioCodec[] = "audio-codec";
+const char kResourceVideoCodec[] = "video-codec";
 const char kResourceGraphicMemory[] = "graphic-memory";
 
 MediaResource::MediaResource() : mValue(0) {}
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 9d69b6a..dc46038 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -34,11 +34,12 @@
 
 // ---------------------------------------------------------------------------
 
-Visualizer::Visualizer (int32_t priority,
+Visualizer::Visualizer (const String16& opPackageName,
+         int32_t priority,
          effect_callback_t cbf,
          void* user,
          int sessionId)
-    :   AudioEffect(SL_IID_VISUALIZATION, NULL, priority, cbf, user, sessionId),
+    :   AudioEffect(SL_IID_VISUALIZATION, opPackageName, NULL, priority, cbf, user, sessionId),
         mCaptureRate(CAPTURE_RATE_DEF),
         mCaptureSize(CAPTURE_SIZE_DEF),
         mSampleRate(44100000),
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 9a276ae..81a5e8c 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -32,7 +32,9 @@
 #include <gui/Surface.h>
 
 #include <media/mediaplayer.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
+#include <media/AVSyncSettings.h>
 #include <media/IDataSource.h>
 
 #include <binder/MemoryBase.h>
@@ -60,7 +62,6 @@
     mLoop = false;
     mLeftVolume = mRightVolume = 1.0;
     mVideoWidth = mVideoHeight = 0;
-    mPlaybackRate = 1.0;
     mLockThreadId = 0;
     mAudioSessionId = AudioSystem::newAudioUniqueId();
     AudioSystem::acquireAudioSessionId(mAudioSessionId, -1);
@@ -389,6 +390,9 @@
         if ((mCurrentState & MEDIA_PLAYER_STARTED) && ! temp) {
             ALOGE("internal/external state mismatch corrected");
             mCurrentState = MEDIA_PLAYER_PAUSED;
+        } else if ((mCurrentState & MEDIA_PLAYER_PAUSED) && temp) {
+            ALOGE("internal/external state mismatch corrected");
+            mCurrentState = MEDIA_PLAYER_STARTED;
         }
         return temp;
     }
@@ -396,22 +400,50 @@
     return false;
 }
 
-status_t MediaPlayer::setPlaybackRate(float rate)
+status_t MediaPlayer::setPlaybackSettings(const AudioPlaybackRate& rate)
 {
-    ALOGV("setPlaybackRate: %f", rate);
-    if (rate <= 0.0) {
+    ALOGV("setPlaybackSettings: %f %f %d %d",
+            rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
+    // Negative speed and pitch does not make sense. Further validation will
+    // be done by the respective mediaplayers.
+    if (rate.mSpeed < 0.f || rate.mPitch < 0.f) {
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
-    if (mPlayer != 0) {
-        if (mPlaybackRate == rate) {
-            return NO_ERROR;
+    if (mPlayer == 0) return INVALID_OPERATION;
+    status_t err = mPlayer->setPlaybackSettings(rate);
+    if (err == OK) {
+        if (rate.mSpeed == 0.f && mCurrentState == MEDIA_PLAYER_STARTED) {
+            mCurrentState = MEDIA_PLAYER_PAUSED;
+        } else if (rate.mSpeed != 0.f && mCurrentState == MEDIA_PLAYER_PAUSED) {
+            mCurrentState = MEDIA_PLAYER_STARTED;
         }
-        mPlaybackRate = rate;
-        return mPlayer->setPlaybackRate(rate);
     }
-    ALOGV("setPlaybackRate: no active player");
-    return INVALID_OPERATION;
+    return err;
+}
+
+status_t MediaPlayer::getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) return INVALID_OPERATION;
+    return mPlayer->getPlaybackSettings(rate);
+}
+
+status_t MediaPlayer::setSyncSettings(const AVSyncSettings& sync, float videoFpsHint)
+{
+    ALOGV("setSyncSettings: %u %u %f %f",
+            sync.mSource, sync.mAudioAdjustMode, sync.mTolerance, videoFpsHint);
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) return INVALID_OPERATION;
+    return mPlayer->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t MediaPlayer::getSyncSettings(
+        AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) return INVALID_OPERATION;
+    return mPlayer->getSyncSettings(sync, videoFps);
 }
 
 status_t MediaPlayer::getVideoWidth(int *w)
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index a2d6e53..1f8b1d3 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -27,6 +27,7 @@
 #include <media/IMediaPlayerService.h>
 #include <media/IMediaRecorder.h>
 #include <media/mediaplayer.h>  // for MEDIA_ERROR_SERVER_DIED
+#include <media/stagefright/PersistentSurface.h>
 #include <gui/IGraphicBufferProducer.h>
 
 namespace android {
@@ -344,6 +345,24 @@
 
 
 
+status_t MediaRecorder::usePersistentSurface(const sp<PersistentSurface>& surface)
+{
+    ALOGV("usePersistentSurface");
+    if (mMediaRecorder == NULL) {
+        ALOGE("media recorder is not initialized yet");
+        return INVALID_OPERATION;
+    }
+    bool isInvalidState = (mCurrentState &
+                           (MEDIA_RECORDER_PREPARED |
+                            MEDIA_RECORDER_RECORDING));
+    if (isInvalidState) {
+        ALOGE("usePersistentSurface is called in an invalid state: %d", mCurrentState);
+        return INVALID_OPERATION;
+    }
+
+    return mMediaRecorder->usePersistentSurface(surface->getBufferConsumer());
+}
+
 status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
 {
     ALOGV("setVideoFrameRate(%d)", frames_per_second);
@@ -594,13 +613,13 @@
     return INVALID_OPERATION;
 }
 
-MediaRecorder::MediaRecorder() : mSurfaceMediaSource(NULL)
+MediaRecorder::MediaRecorder(const String16& opPackageName) : mSurfaceMediaSource(NULL)
 {
     ALOGV("constructor");
 
     const sp<IMediaPlayerService>& service(getMediaPlayerService());
     if (service != NULL) {
-        mMediaRecorder = service->createMediaRecorder();
+        mMediaRecorder = service->createMediaRecorder(opPackageName);
     }
     if (mMediaRecorder != NULL) {
         mCurrentState = MEDIA_RECORDER_IDLE;
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 3bc763f..891a9e9 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -307,10 +307,10 @@
     ALOGV("MediaPlayerService destroyed");
 }
 
-sp<IMediaRecorder> MediaPlayerService::createMediaRecorder()
+sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(const String16 &opPackageName)
 {
     pid_t pid = IPCThreadState::self()->getCallingPid();
-    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid);
+    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid, opPackageName);
     wp<MediaRecorderClient> w = recorder;
     Mutex::Autolock lock(mLock);
     mMediaRecorderClients.add(w);
@@ -381,12 +381,13 @@
 }
 
 sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay(
+        const String16 &opPackageName,
         const sp<IRemoteDisplayClient>& client, const String8& iface) {
     if (!checkPermission("android.permission.CONTROL_WIFI_DISPLAY")) {
         return NULL;
     }
 
-    return new RemoteDisplay(client, iface.string());
+    return new RemoteDisplay(opPackageName, client, iface.string());
 }
 
 status_t MediaPlayerService::AudioOutput::dump(int fd, const Vector<String16>& args) const
@@ -980,12 +981,52 @@
     return NO_ERROR;
 }
 
-status_t MediaPlayerService::Client::setPlaybackRate(float rate)
+status_t MediaPlayerService::Client::setPlaybackSettings(const AudioPlaybackRate& rate)
 {
-    ALOGV("[%d] setPlaybackRate(%f)", mConnId, rate);
+    ALOGV("[%d] setPlaybackSettings(%f, %f, %d, %d)",
+            mConnId, rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
     sp<MediaPlayerBase> p = getPlayer();
     if (p == 0) return UNKNOWN_ERROR;
-    return p->setPlaybackRate(rate);
+    return p->setPlaybackSettings(rate);
+}
+
+status_t MediaPlayerService::Client::getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */)
+{
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    status_t ret = p->getPlaybackSettings(rate);
+    if (ret == NO_ERROR) {
+        ALOGV("[%d] getPlaybackSettings(%f, %f, %d, %d)",
+                mConnId, rate->mSpeed, rate->mPitch, rate->mFallbackMode, rate->mStretchMode);
+    } else {
+        ALOGV("[%d] getPlaybackSettings returned %d", mConnId, ret);
+    }
+    return ret;
+}
+
+status_t MediaPlayerService::Client::setSyncSettings(
+        const AVSyncSettings& sync, float videoFpsHint)
+{
+    ALOGV("[%d] setSyncSettings(%u, %u, %f, %f)",
+            mConnId, sync.mSource, sync.mAudioAdjustMode, sync.mTolerance, videoFpsHint);
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    return p->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t MediaPlayerService::Client::getSyncSettings(
+        AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */)
+{
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    status_t ret = p->getSyncSettings(sync, videoFps);
+    if (ret == NO_ERROR) {
+        ALOGV("[%d] getSyncSettings(%u, %u, %f, %f)",
+                mConnId, sync->mSource, sync->mAudioAdjustMode, sync->mTolerance, *videoFps);
+    } else {
+        ALOGV("[%d] getSyncSettings returned %d", mConnId, ret);
+    }
+    return ret;
 }
 
 status_t MediaPlayerService::Client::getCurrentPosition(int *msec)
@@ -1310,7 +1351,7 @@
     mStreamType = AUDIO_STREAM_MUSIC;
     mLeftVolume = 1.0;
     mRightVolume = 1.0;
-    mPlaybackRatePermille = 1000;
+    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
     mSampleRateHz = 0;
     mMsecsPerFrame = 0;
     mAuxEffectId = 0;
@@ -1633,7 +1674,7 @@
 
     mSampleRateHz = sampleRate;
     mFlags = flags;
-    mMsecsPerFrame = mPlaybackRatePermille / (float) sampleRate;
+    mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
     uint32_t pos;
     if (t->getPosition(&pos) == OK) {
         mBytesWritten = uint64_t(pos) * t->frameSize();
@@ -1642,7 +1683,7 @@
 
     status_t res = NO_ERROR;
     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
-        res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
+        res = t->setPlaybackRate(mPlaybackRate);
         if (res == NO_ERROR) {
             t->setAuxEffectSendLevel(mSendLevel);
             res = t->attachAuxEffect(mAuxEffectId);
@@ -1738,22 +1779,38 @@
     }
 }
 
-status_t MediaPlayerService::AudioOutput::setPlaybackRatePermille(int32_t ratePermille)
+status_t MediaPlayerService::AudioOutput::setPlaybackRate(const AudioPlaybackRate &rate)
 {
-    ALOGV("setPlaybackRatePermille(%d)", ratePermille);
-    status_t res = NO_ERROR;
-    if (mTrack != 0) {
-        res = mTrack->setSampleRate(ratePermille * mSampleRateHz / 1000);
-    } else {
-        res = NO_INIT;
+    ALOGV("setPlaybackRate(%f %f %d %d)",
+                rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
+    if (mTrack == 0) {
+        // remember rate so that we can set it when the track is opened
+        mPlaybackRate = rate;
+        return OK;
     }
-    mPlaybackRatePermille = ratePermille;
+    status_t res = mTrack->setPlaybackRate(rate);
+    if (res != NO_ERROR) {
+        return res;
+    }
+    // rate.mSpeed is always greater than 0 if setPlaybackRate succeeded
+    CHECK_GT(rate.mSpeed, 0.f);
+    mPlaybackRate = rate;
     if (mSampleRateHz != 0) {
-        mMsecsPerFrame = mPlaybackRatePermille / (float) mSampleRateHz;
+        mMsecsPerFrame = 1E3f / (rate.mSpeed * mSampleRateHz);
     }
     return res;
 }
 
+status_t MediaPlayerService::AudioOutput::getPlaybackRate(AudioPlaybackRate *rate)
+{
+    ALOGV("setPlaybackRate");
+    if (mTrack == 0) {
+        return NO_INIT;
+    }
+    *rate = mTrack->getPlaybackRate();
+    return NO_ERROR;
+}
+
 status_t MediaPlayerService::AudioOutput::setAuxEffectSendLevel(float level)
 {
     ALOGV("setAuxEffectSendLevel(%f)", level);
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 6ddfe14..5103841 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -34,7 +34,9 @@
 
 namespace android {
 
+struct AudioPlaybackRate;
 class AudioTrack;
+struct AVSyncSettings;
 class IDataSource;
 class IMediaRecorder;
 class IMediaMetadataRetriever;
@@ -109,7 +111,9 @@
                 void            setAudioAttributes(const audio_attributes_t * attributes);
 
                 void            setVolume(float left, float right);
-        virtual status_t        setPlaybackRatePermille(int32_t ratePermille);
+        virtual status_t        setPlaybackRate(const AudioPlaybackRate& rate);
+        virtual status_t        getPlaybackRate(AudioPlaybackRate* rate /* nonnull */);
+
                 status_t        setAuxEffectSendLevel(float level);
                 status_t        attachAuxEffect(int effectId);
         virtual status_t        dump(int fd, const Vector<String16>& args) const;
@@ -139,7 +143,7 @@
         const audio_attributes_t *mAttributes;
         float                   mLeftVolume;
         float                   mRightVolume;
-        int32_t                 mPlaybackRatePermille;
+        AudioPlaybackRate       mPlaybackRate;
         uint32_t                mSampleRateHz; // sample rate of the content, as set in open()
         float                   mMsecsPerFrame;
         int                     mSessionId;
@@ -188,7 +192,7 @@
     static  void                instantiate();
 
     // IMediaPlayerService interface
-    virtual sp<IMediaRecorder>  createMediaRecorder();
+    virtual sp<IMediaRecorder>  createMediaRecorder(const String16 &opPackageName);
     void    removeMediaRecorderClient(wp<MediaRecorderClient> client);
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever();
 
@@ -200,8 +204,8 @@
     virtual sp<IDrm>            makeDrm();
     virtual sp<IHDCP>           makeHDCP(bool createEncryptionModule);
 
-    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
-            const String8& iface);
+    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
+            const sp<IRemoteDisplayClient>& client, const String8& iface);
     virtual status_t            dump(int fd, const Vector<String16>& args);
 
             void                removeClient(wp<Client> client);
@@ -262,7 +266,11 @@
         virtual status_t        stop();
         virtual status_t        pause();
         virtual status_t        isPlaying(bool* state);
-        virtual status_t        setPlaybackRate(float rate);
+        virtual status_t        setPlaybackSettings(const AudioPlaybackRate& rate);
+        virtual status_t        getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
+        virtual status_t        setSyncSettings(const AVSyncSettings& rate, float videoFpsHint);
+        virtual status_t        getSyncSettings(AVSyncSettings* rate /* nonnull */,
+                                                float* videoFps /* nonnull */);
         virtual status_t        seekTo(int msec);
         virtual status_t        getCurrentPosition(int* msec);
         virtual status_t        getDuration(int* msec);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 319ebb0..ed442e3 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -55,6 +55,16 @@
     return ok;
 }
 
+status_t MediaRecorderClient::usePersistentSurface(const sp<IGraphicBufferConsumer>& surface)
+{
+    ALOGV("usePersistentSurface");
+    Mutex::Autolock lock(mLock);
+    if (mRecorder == NULL) {
+        ALOGE("recorder is not initialized");
+        return NO_INIT;
+    }
+    return mRecorder->usePersistentSurface(surface);
+}
 
 sp<IGraphicBufferProducer> MediaRecorderClient::querySurfaceMediaSource()
 {
@@ -290,11 +300,12 @@
     return NO_ERROR;
 }
 
-MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid)
+MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid,
+        const String16& opPackageName)
 {
     ALOGV("Client constructor");
     mPid = pid;
-    mRecorder = new StagefrightRecorder;
+    mRecorder = new StagefrightRecorder(opPackageName);
     mMediaPlayerService = service;
 }
 
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index b45344b..7ac88cb 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -55,6 +55,7 @@
     virtual     status_t   close();
     virtual     status_t   release();
     virtual     status_t   dump(int fd, const Vector<String16>& args);
+    virtual     status_t   usePersistentSurface(const sp<IGraphicBufferConsumer>& surface);
     virtual     sp<IGraphicBufferProducer> querySurfaceMediaSource();
 
 private:
@@ -62,7 +63,8 @@
 
                            MediaRecorderClient(
                                    const sp<MediaPlayerService>& service,
-                                                               pid_t pid);
+                                                               pid_t pid,
+                                                               const String16& opPackageName);
     virtual                ~MediaRecorderClient();
 
     pid_t                  mPid;
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
index eb959b4..0eb4b5d 100644
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ b/media/libmediaplayerservice/RemoteDisplay.cpp
@@ -26,13 +26,14 @@
 namespace android {
 
 RemoteDisplay::RemoteDisplay(
+        const String16 &opPackageName,
         const sp<IRemoteDisplayClient> &client,
         const char *iface)
     : mLooper(new ALooper),
       mNetSession(new ANetworkSession) {
     mLooper->setName("wfd_looper");
 
-    mSource = new WifiDisplaySource(mNetSession, client);
+    mSource = new WifiDisplaySource(opPackageName, mNetSession, client);
     mLooper->registerHandler(mSource);
 
     mNetSession->start();
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
index 1a48981..d4573e9 100644
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ b/media/libmediaplayerservice/RemoteDisplay.h
@@ -33,6 +33,7 @@
 
 struct RemoteDisplay : public BnRemoteDisplay {
     RemoteDisplay(
+            const String16 &opPackageName,
             const sp<IRemoteDisplayClient> &client,
             const char *iface);
 
diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp
index b37aee3..3fedd9b 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.cpp
+++ b/media/libmediaplayerservice/StagefrightPlayer.cpp
@@ -188,6 +188,14 @@
     return mPlayer->getParameter(key, reply);
 }
 
+status_t StagefrightPlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    return mPlayer->setPlaybackSettings(rate);
+}
+
+status_t StagefrightPlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    return mPlayer->getPlaybackSettings(rate);
+}
+
 status_t StagefrightPlayer::getMetadata(
         const media::Metadata::Filter& /* ids */, Parcel *records) {
     using media::Metadata;
diff --git a/media/libmediaplayerservice/StagefrightPlayer.h b/media/libmediaplayerservice/StagefrightPlayer.h
index e6c30ff..96013df 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.h
+++ b/media/libmediaplayerservice/StagefrightPlayer.h
@@ -60,6 +60,8 @@
     virtual void setAudioSink(const sp<AudioSink> &audioSink);
     virtual status_t setParameter(int key, const Parcel &request);
     virtual status_t getParameter(int key, Parcel *reply);
+    virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    virtual status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
 
     virtual status_t getMetadata(
             const media::Metadata::Filter& ids, Parcel *records);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 8a0b060..509a592 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -69,8 +69,9 @@
 }
 
 
-StagefrightRecorder::StagefrightRecorder()
-    : mWriter(NULL),
+StagefrightRecorder::StagefrightRecorder(const String16 &opPackageName)
+    : MediaRecorderBase(opPackageName),
+      mWriter(NULL),
       mOutputFd(-1),
       mAudioSource(AUDIO_SOURCE_CNT),
       mVideoSource(VIDEO_SOURCE_LIST_END),
@@ -242,6 +243,13 @@
     return OK;
 }
 
+status_t StagefrightRecorder::usePersistentSurface(
+        const sp<IGraphicBufferConsumer>& surface) {
+    mPersistentSurface = surface;
+
+    return OK;
+}
+
 status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
     ALOGV("setOutputFile: %d, %lld, %lld", fd, offset, length);
     // These don't make any sense, do they?
@@ -905,6 +913,7 @@
     sp<AudioSource> audioSource =
         new AudioSource(
                 mAudioSource,
+                mOpPackageName,
                 mSampleRate,
                 mAudioChannels);
 
@@ -1558,8 +1567,8 @@
         flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
     }
 
-    sp<MediaCodecSource> encoder =
-            MediaCodecSource::Create(mLooper, format, cameraSource, flags);
+    sp<MediaCodecSource> encoder = MediaCodecSource::Create(
+            mLooper, format, cameraSource, mPersistentSurface, flags);
     if (encoder == NULL) {
         ALOGE("Failed to create video encoder");
         // When the encoder fails to be created, we need
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 8fa5bfa..1a7b720 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -35,12 +35,13 @@
 class MetaData;
 struct AudioSource;
 class MediaProfiles;
+class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
 class SurfaceMediaSource;
 struct ALooper;
 
 struct StagefrightRecorder : public MediaRecorderBase {
-    StagefrightRecorder();
+    StagefrightRecorder(const String16 &opPackageName);
     virtual ~StagefrightRecorder();
 
     virtual status_t init();
@@ -53,6 +54,7 @@
     virtual status_t setVideoFrameRate(int frames_per_second);
     virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
+    virtual status_t usePersistentSurface(const sp<IGraphicBufferConsumer>& surface);
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
     virtual status_t setParameters(const String8& params);
     virtual status_t setListener(const sp<IMediaRecorderClient>& listener);
@@ -72,6 +74,7 @@
     sp<ICamera> mCamera;
     sp<ICameraRecordingProxy> mCameraProxy;
     sp<IGraphicBufferProducer> mPreviewSurface;
+    sp<IGraphicBufferConsumer> mPersistentSurface;
     sp<IMediaRecorderClient> mListener;
     String16 mClientName;
     uid_t mClientUid;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 7eaa0e0..5e7b644 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1065,6 +1065,7 @@
 
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
+    format->setString("mime", mime);
 
     int32_t trackType;
     if (!strncasecmp(mime, "video/", 6)) {
@@ -1085,8 +1086,6 @@
     format->setString("language", lang);
 
     if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
-        format->setString("mime", mime);
-
         int32_t isAutoselect = 1, isDefault = 0, isForced = 0;
         meta->findInt32(kKeyTrackIsAutoselect, &isAutoselect);
         meta->findInt32(kKeyTrackIsDefault, &isDefault);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index a028b01..1fb4365 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -37,6 +37,9 @@
 
 #include <cutils/properties.h>
 
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -45,7 +48,9 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
+
 #include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
 
 #include "avc_utils.h"
 
@@ -96,16 +101,16 @@
 };
 
 struct NuPlayer::SetSurfaceAction : public Action {
-    SetSurfaceAction(const sp<NativeWindowWrapper> &wrapper)
-        : mWrapper(wrapper) {
+    SetSurfaceAction(const sp<Surface> &surface)
+        : mSurface(surface) {
     }
 
     virtual void execute(NuPlayer *player) {
-        player->performSetSurface(mWrapper);
+        player->performSetSurface(mSurface);
     }
 
 private:
-    sp<NativeWindowWrapper> mWrapper;
+    sp<Surface> mSurface;
 
     DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction);
 };
@@ -180,10 +185,12 @@
       mFlushingVideo(NONE),
       mResumePending(false),
       mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW),
-      mPlaybackRate(1.0),
+      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
+      mVideoFpsHint(-1.f),
       mStarted(false),
       mPaused(false),
-      mPausedByClient(false) {
+      mPausedByClient(false),
+      mPausedForBuffering(false) {
     clearFlushComplete();
 }
 
@@ -307,15 +314,12 @@
 
 void NuPlayer::setVideoSurfaceTextureAsync(
         const sp<IGraphicBufferProducer> &bufferProducer) {
-    sp<AMessage> msg = new AMessage(kWhatSetVideoNativeWindow, this);
+    sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
 
     if (bufferProducer == NULL) {
-        msg->setObject("native-window", NULL);
+        msg->setObject("surface", NULL);
     } else {
-        msg->setObject(
-                "native-window",
-                new NativeWindowWrapper(
-                    new Surface(bufferProducer, true /* controlledByApp */)));
+        msg->setObject("surface", new Surface(bufferProducer, true /* controlledByApp */));
     }
 
     msg->post();
@@ -331,10 +335,61 @@
     (new AMessage(kWhatStart, this))->post();
 }
 
-void NuPlayer::setPlaybackRate(float rate) {
-    sp<AMessage> msg = new AMessage(kWhatSetRate, this);
-    msg->setFloat("rate", rate);
-    msg->post();
+status_t NuPlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    // do some cursory validation of the settings here. audio modes are
+    // only validated when set on the audiosink.
+     if ((rate.mSpeed != 0.f && rate.mSpeed < AUDIO_TIMESTRETCH_SPEED_MIN)
+            || rate.mSpeed > AUDIO_TIMESTRETCH_SPEED_MAX
+            || rate.mPitch < AUDIO_TIMESTRETCH_SPEED_MIN
+            || rate.mPitch > AUDIO_TIMESTRETCH_SPEED_MAX) {
+        return BAD_VALUE;
+    }
+    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
+    writeToAMessage(msg, rate);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, rate);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
+    writeToAMessage(msg, sync, videoFpsHint);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::getSyncSettings(
+        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, sync, videoFps);
+        }
+    }
+    return err;
 }
 
 void NuPlayer::pause() {
@@ -368,23 +423,35 @@
     int32_t trackType;
     CHECK(format->findInt32("type", &trackType));
 
+    AString mime;
+    if (!format->findString("mime", &mime)) {
+        // Java MediaPlayer only uses mimetype for subtitle and timedtext tracks.
+        // If we can't find the mimetype here it means that we wouldn't be needing
+        // the mimetype on the Java end. We still write a placeholder mime to keep the
+        // (de)serialization logic simple.
+        if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+            mime = "audio/";
+        } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+            mime = "video/";
+        } else {
+            TRESPASS();
+        }
+    }
+
     AString lang;
     CHECK(format->findString("language", &lang));
 
     reply->writeInt32(2); // write something non-zero
     reply->writeInt32(trackType);
+    reply->writeString16(String16(mime.c_str()));
     reply->writeString16(String16(lang.c_str()));
 
     if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
-        AString mime;
-        CHECK(format->findString("mime", &mime));
-
         int32_t isAuto, isDefault, isForced;
         CHECK(format->findInt32("auto", &isAuto));
         CHECK(format->findInt32("default", &isDefault));
         CHECK(format->findInt32("forced", &isForced));
 
-        reply->writeString16(String16(mime.c_str()));
         reply->writeInt32(isAuto);
         reply->writeInt32(isDefault);
         reply->writeInt32(isForced);
@@ -555,15 +622,15 @@
             break;
         }
 
-        case kWhatSetVideoNativeWindow:
+        case kWhatSetVideoSurface:
         {
-            ALOGV("kWhatSetVideoNativeWindow");
+            ALOGV("kWhatSetVideoSurface");
 
             sp<RefBase> obj;
-            CHECK(msg->findObject("native-window", &obj));
-
+            CHECK(msg->findObject("surface", &obj));
+            sp<Surface> surface = static_cast<Surface *>(obj.get());
             if (mSource == NULL || mSource->getFormat(false /* audio */) == NULL) {
-                performSetSurface(static_cast<NativeWindowWrapper *>(obj.get()));
+                performSetSurface(surface);
                 break;
             }
 
@@ -571,9 +638,7 @@
                     new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
                                            FLUSH_CMD_SHUTDOWN /* video */));
 
-            mDeferredActions.push_back(
-                    new SetSurfaceAction(
-                        static_cast<NativeWindowWrapper *>(obj.get())));
+            mDeferredActions.push_back(new SetSurfaceAction(surface));
 
             if (obj != NULL) {
                 if (mStarted) {
@@ -619,7 +684,10 @@
         {
             ALOGV("kWhatStart");
             if (mStarted) {
-                onResume();
+                // do not resume yet if the source is still buffering
+                if (!mPausedForBuffering) {
+                    onResume();
+                }
             } else {
                 onStart();
             }
@@ -627,13 +695,114 @@
             break;
         }
 
-        case kWhatSetRate:
+        case kWhatConfigPlayback:
         {
-            ALOGV("kWhatSetRate");
-            CHECK(msg->findFloat("rate", &mPlaybackRate));
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate /* sanitized */;
+            readFromAMessage(msg, &rate);
+            status_t err = OK;
             if (mRenderer != NULL) {
-                mRenderer->setPlaybackRate(mPlaybackRate);
+                err = mRenderer->setPlaybackSettings(rate);
             }
+            if (err == OK) {
+                if (rate.mSpeed == 0.f) {
+                    onPause();
+                    // save all other settings (using non-paused speed)
+                    // so we can restore them on start
+                    AudioPlaybackRate newRate = rate;
+                    newRate.mSpeed = mPlaybackSettings.mSpeed;
+                    mPlaybackSettings = newRate;
+                } else { /* rate.mSpeed != 0.f */
+                    onResume();
+                    mPlaybackSettings = rate;
+                }
+            }
+
+            if (mVideoDecoder != NULL) {
+                float rate = getFrameRate();
+                if (rate > 0) {
+                    sp<AMessage> params = new AMessage();
+                    params->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
+                    mVideoDecoder->setParameters(params);
+                }
+            }
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetPlaybackSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate = mPlaybackSettings;
+            status_t err = OK;
+            if (mRenderer != NULL) {
+                err = mRenderer->getPlaybackSettings(&rate);
+            }
+            if (err == OK) {
+                // get playback settings used by renderer, as it may be
+                // slightly off due to audiosink not taking small changes.
+                mPlaybackSettings = rate;
+                if (mPaused) {
+                    rate.mSpeed = 0.f;
+                }
+            }
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, rate);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatConfigSync:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatConfigSync");
+            AVSyncSettings sync;
+            float videoFpsHint;
+            readFromAMessage(msg, &sync, &videoFpsHint);
+            status_t err = OK;
+            if (mRenderer != NULL) {
+                err = mRenderer->setSyncSettings(sync, videoFpsHint);
+            }
+            if (err == OK) {
+                mSyncSettings = sync;
+                mVideoFpsHint = videoFpsHint;
+            }
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetSyncSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AVSyncSettings sync = mSyncSettings;
+            float videoFps = mVideoFpsHint;
+            status_t err = OK;
+            if (mRenderer != NULL) {
+                err = mRenderer->getSyncSettings(&sync, &videoFps);
+                if (err == OK) {
+                    mSyncSettings = sync;
+                    mVideoFpsHint = videoFps;
+                }
+            }
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, sync, videoFps);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
             break;
         }
 
@@ -656,7 +825,7 @@
 
             // initialize video before audio because successful initialization of
             // video may change deep buffer mode of audio.
-            if (mNativeWindow != NULL) {
+            if (mSurface != NULL) {
                 instantiateDecoder(false, &mVideoDecoder);
             }
 
@@ -704,7 +873,7 @@
             }
 
             if ((mAudioDecoder == NULL && mAudioSink != NULL)
-                    || (mVideoDecoder == NULL && mNativeWindow != NULL)) {
+                    || (mVideoDecoder == NULL && mSurface != NULL)) {
                 msg->post(100000ll);
                 mScanSourcesPending = true;
             }
@@ -1050,7 +1219,7 @@
 
     // TRICKY: We rely on mRenderer being null, so that decoder does not start requesting
     // data on instantiation.
-    if (mNativeWindow != NULL) {
+    if (mSurface != NULL) {
         err = instantiateDecoder(false, &mVideoDecoder);
         if (err != OK) {
             return err;
@@ -1103,14 +1272,16 @@
     mRendererLooper->setName("NuPlayerRenderer");
     mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
     mRendererLooper->registerHandler(mRenderer);
-    if (mPlaybackRate != 1.0) {
-        mRenderer->setPlaybackRate(mPlaybackRate);
+
+    status_t err = mRenderer->setPlaybackSettings(mPlaybackSettings);
+    if (err != OK) {
+        mSource->stop();
+        notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+        return;
     }
 
-    sp<MetaData> meta = getFileMeta();
-    int32_t rate;
-    if (meta != NULL
-            && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
+    float rate = getFrameRate();
+    if (rate > 0) {
         mRenderer->setVideoFrameRate(rate);
     }
 
@@ -1249,6 +1420,8 @@
         return -EWOULDBLOCK;
     }
 
+    format->setInt32("priority", 0 /* realtime */);
+
     if (!audio) {
         AString mime;
         CHECK(format->findString("mime", &mime));
@@ -1265,6 +1438,11 @@
         if (mSourceFlags & Source::FLAG_PROTECTED) {
             format->setInt32("protected", true);
         }
+
+        float rate = getFrameRate();
+        if (rate > 0) {
+            format->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
+        }
     }
 
     if (audio) {
@@ -1285,10 +1463,10 @@
         notify->setInt32("generation", mVideoDecoderGeneration);
 
         *decoder = new Decoder(
-                notify, mSource, mRenderer, mNativeWindow, mCCDecoder);
+                notify, mSource, mRenderer, mSurface, mCCDecoder);
 
         // enable FRC if high-quality AV sync is requested, even if not
-        // queuing to native window, as this will even improve textureview
+        // directly queuing to display, as this will even improve textureview
         // playback.
         {
             char value[PROPERTY_VALUE_MAX];
@@ -1460,9 +1638,8 @@
 
 status_t NuPlayer::setVideoScalingMode(int32_t mode) {
     mVideoScalingMode = mode;
-    if (mNativeWindow != NULL) {
-        status_t ret = native_window_set_scaling_mode(
-                mNativeWindow->getNativeWindow().get(), mVideoScalingMode);
+    if (mSurface != NULL) {
+        status_t ret = native_window_set_scaling_mode(mSurface.get(), mVideoScalingMode);
         if (ret != OK) {
             ALOGE("Failed to set scaling mode (%d): %s",
                 -ret, strerror(-ret));
@@ -1537,6 +1714,28 @@
     return mSource->getFileFormatMeta();
 }
 
+float NuPlayer::getFrameRate() {
+    sp<MetaData> meta = mSource->getFormatMeta(false /* audio */);
+    if (meta == NULL) {
+        return 0;
+    }
+    int32_t rate;
+    if (!meta->findInt32(kKeyFrameRate, &rate)) {
+        // fall back to try file meta
+        sp<MetaData> fileMeta = getFileMeta();
+        if (fileMeta == NULL) {
+            ALOGW("source has video meta but not file meta");
+            return -1;
+        }
+        int32_t fileMetaRate;
+        if (!fileMeta->findInt32(kKeyFrameRate, &fileMetaRate)) {
+            return -1;
+        }
+        return fileMetaRate;
+    }
+    return rate;
+}
+
 void NuPlayer::schedulePollDuration() {
     sp<AMessage> msg = new AMessage(kWhatPollDuration, this);
     msg->setInt32("generation", mPollDurationGeneration);
@@ -1656,10 +1855,10 @@
     }
 }
 
-void NuPlayer::performSetSurface(const sp<NativeWindowWrapper> &wrapper) {
+void NuPlayer::performSetSurface(const sp<Surface> &surface) {
     ALOGV("performSetSurface");
 
-    mNativeWindow = wrapper;
+    mSurface = surface;
 
     // XXX - ignore error from setVideoScalingMode for now
     setVideoScalingMode(mVideoScalingMode);
@@ -1812,9 +2011,10 @@
         case Source::kWhatPauseOnBufferingStart:
         {
             // ignore if not playing
-            if (mStarted && !mPausedByClient) {
+            if (mStarted) {
                 ALOGI("buffer low, pausing...");
 
+                mPausedForBuffering = true;
                 onPause();
             }
             // fall-thru
@@ -1829,10 +2029,15 @@
         case Source::kWhatResumeOnBufferingEnd:
         {
             // ignore if not playing
-            if (mStarted && !mPausedByClient) {
+            if (mStarted) {
                 ALOGI("buffer ready, resuming...");
 
-                onResume();
+                mPausedForBuffering = false;
+
+                // do not resume yet if client didn't unpause
+                if (!mPausedByClient) {
+                    onResume();
+                }
             }
             // fall-thru
         }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 14bdb01..df9debc 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -18,14 +18,16 @@
 
 #define NU_PLAYER_H_
 
+#include <media/AudioResamplerPublic.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/foundation/AHandler.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 
 namespace android {
 
 struct ABuffer;
 struct AMessage;
+struct AudioPlaybackRate;
+struct AVSyncSettings;
 class IDataSource;
 class MetaData;
 struct NuPlayerDriver;
@@ -54,7 +56,11 @@
             const sp<IGraphicBufferProducer> &bufferProducer);
 
     void setAudioSink(const sp<MediaPlayerBase::AudioSink> &sink);
-    void setPlaybackRate(float rate);
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+    status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+    status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
     void start();
 
     void pause();
@@ -74,6 +80,7 @@
     void getStats(int64_t *mNumFramesTotal, int64_t *mNumFramesDropped);
 
     sp<MetaData> getFileMeta();
+    float getFrameRate();
 
 protected:
     virtual ~NuPlayer();
@@ -105,10 +112,13 @@
     enum {
         kWhatSetDataSource              = '=DaS',
         kWhatPrepare                    = 'prep',
-        kWhatSetVideoNativeWindow       = '=NaW',
+        kWhatSetVideoSurface            = '=VSu',
         kWhatSetAudioSink               = '=AuS',
         kWhatMoreDataQueued             = 'more',
-        kWhatSetRate                    = 'setR',
+        kWhatConfigPlayback             = 'cfPB',
+        kWhatConfigSync                 = 'cfSy',
+        kWhatGetPlaybackSettings        = 'gPbS',
+        kWhatGetSyncSettings            = 'gSyS',
         kWhatStart                      = 'strt',
         kWhatScanSources                = 'scan',
         kWhatVideoNotify                = 'vidN',
@@ -131,7 +141,7 @@
     uid_t mUID;
     sp<Source> mSource;
     uint32_t mSourceFlags;
-    sp<NativeWindowWrapper> mNativeWindow;
+    sp<Surface> mSurface;
     sp<MediaPlayerBase::AudioSink> mAudioSink;
     sp<DecoderBase> mVideoDecoder;
     bool mOffloadAudio;
@@ -180,7 +190,9 @@
 
     int32_t mVideoScalingMode;
 
-    float mPlaybackRate;
+    AudioPlaybackRate mPlaybackSettings;
+    AVSyncSettings mSyncSettings;
+    float mVideoFpsHint;
     bool mStarted;
 
     // Actual pause state, either as requested by client or due to buffering.
@@ -191,6 +203,9 @@
     // still become true, when we pause internally due to buffering.
     bool mPausedByClient;
 
+    // Pause state as requested by source (internally) due to buffering
+    bool mPausedForBuffering;
+
     inline const sp<DecoderBase> &getDecoder(bool audio) {
         return audio ? mAudioDecoder : mVideoDecoder;
     }
@@ -239,7 +254,7 @@
     void performDecoderFlush(FlushCommand audio, FlushCommand video);
     void performReset();
     void performScanSources();
-    void performSetSurface(const sp<NativeWindowWrapper> &wrapper);
+    void performSetSurface(const sp<Surface> &wrapper);
     void performResumeDecoders(bool needNotify);
 
     void onSourceNotify(const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index acc9ef5..376c93a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -33,6 +33,8 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 
+#include <gui/Surface.h>
+
 #include "avc_utils.h"
 #include "ATSParser.h"
 
@@ -42,10 +44,10 @@
         const sp<AMessage> &notify,
         const sp<Source> &source,
         const sp<Renderer> &renderer,
-        const sp<NativeWindowWrapper> &nativeWindow,
+        const sp<Surface> &surface,
         const sp<CCDecoder> &ccDecoder)
     : DecoderBase(notify),
-      mNativeWindow(nativeWindow),
+      mSurface(surface),
       mSource(source),
       mRenderer(renderer),
       mCCDecoder(ccDecoder),
@@ -178,14 +180,9 @@
     mIsAudio = !strncasecmp("audio/", mime.c_str(), 6);
     mIsVideoAVC = !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str());
 
-    sp<Surface> surface = NULL;
-    if (mNativeWindow != NULL) {
-        surface = mNativeWindow->getSurfaceTextureClient();
-    }
-
     mComponentName = mime;
     mComponentName.append(" decoder");
-    ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), surface.get());
+    ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
 
     mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
     int32_t secure = 0;
@@ -210,17 +207,17 @@
     mCodec->getName(&mComponentName);
 
     status_t err;
-    if (mNativeWindow != NULL) {
+    if (mSurface != NULL) {
         // disconnect from surface as MediaCodec will reconnect
         err = native_window_api_disconnect(
-                surface.get(), NATIVE_WINDOW_API_MEDIA);
+                mSurface.get(), NATIVE_WINDOW_API_MEDIA);
         // We treat this as a warning, as this is a preparatory step.
         // Codec will try to connect to the surface, which is where
         // any error signaling will occur.
         ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
     }
     err = mCodec->configure(
-            format, surface, NULL /* crypto */, 0 /* flags */);
+            format, mSurface, NULL /* crypto */, 0 /* flags */);
     if (err != OK) {
         ALOGE("Failed to configure %s decoder (err=%d)", mComponentName.c_str(), err);
         mCodec->release();
@@ -252,6 +249,14 @@
     mResumePending = false;
 }
 
+void NuPlayer::Decoder::onSetParameters(const sp<AMessage> &params) {
+    if (mCodec == NULL) {
+        ALOGW("onSetParameters called before codec is created.");
+        return;
+    }
+    mCodec->setParameters(params);
+}
+
 void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
     bool hadNoRenderer = (mRenderer == NULL);
     mRenderer = renderer;
@@ -329,12 +334,10 @@
         mCodec = NULL;
         ++mBufferGeneration;
 
-        if (mNativeWindow != NULL) {
+        if (mSurface != NULL) {
             // reconnect to surface as MediaCodec disconnected from it
             status_t error =
-                    native_window_api_connect(
-                            mNativeWindow->getNativeWindow().get(),
-                            NATIVE_WINDOW_API_MEDIA);
+                    native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
             ALOGW_IF(error != NO_ERROR,
                     "[%s] failed to connect to native window, error=%d",
                     mComponentName.c_str(), error);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 9f0ef1b5..070d51a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -27,7 +27,7 @@
     Decoder(const sp<AMessage> &notify,
             const sp<Source> &source,
             const sp<Renderer> &renderer = NULL,
-            const sp<NativeWindowWrapper> &nativeWindow = NULL,
+            const sp<Surface> &surface = NULL,
             const sp<CCDecoder> &ccDecoder = NULL);
 
     virtual void getStats(
@@ -40,6 +40,7 @@
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
     virtual void onConfigure(const sp<AMessage> &format);
+    virtual void onSetParameters(const sp<AMessage> &params);
     virtual void onSetRenderer(const sp<Renderer> &renderer);
     virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
     virtual void onResume(bool notifyComplete);
@@ -53,7 +54,7 @@
         kWhatRenderBuffer        = 'rndr',
     };
 
-    sp<NativeWindowWrapper> mNativeWindow;
+    sp<Surface> mSurface;
 
     sp<Source> mSource;
     sp<Renderer> mRenderer;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index 36b41ec..9d509bf 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -70,6 +70,12 @@
     mDecoderLooper->registerHandler(this);
 }
 
+void NuPlayer::DecoderBase::setParameters(const sp<AMessage> &params) {
+    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
+    msg->setMessage("params", params);
+    msg->post();
+}
+
 void NuPlayer::DecoderBase::setRenderer(const sp<Renderer> &renderer) {
     sp<AMessage> msg = new AMessage(kWhatSetRenderer, this);
     msg->setObject("renderer", renderer);
@@ -123,6 +129,14 @@
             break;
         }
 
+        case kWhatSetParameters:
+        {
+            sp<AMessage> params;
+            CHECK(msg->findMessage("params", &params));
+            onSetParameters(params);
+            break;
+        }
+
         case kWhatSetRenderer:
         {
             sp<RefBase> obj;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index 262f5d5..b52e7f7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -33,6 +33,7 @@
 
     void configure(const sp<AMessage> &format);
     void init();
+    void setParameters(const sp<AMessage> &params);
 
     void setRenderer(const sp<Renderer> &renderer);
 
@@ -62,6 +63,7 @@
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
     virtual void onConfigure(const sp<AMessage> &format) = 0;
+    virtual void onSetParameters(const sp<AMessage> &params) = 0;
     virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
     virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers) = 0;
     virtual void onResume(bool notifyComplete) = 0;
@@ -78,6 +80,7 @@
 private:
     enum {
         kWhatConfigure           = 'conf',
+        kWhatSetParameters       = 'setP',
         kWhatSetRenderer         = 'setR',
         kWhatGetInputBuffers     = 'gInB',
         kWhatRequestInputBuffers = 'reqB',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index fdb9039..d7b070e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -88,6 +88,10 @@
     }
 }
 
+void NuPlayer::DecoderPassThrough::onSetParameters(const sp<AMessage> &/*params*/) {
+    ALOGW("onSetParameters() called unexpectedly");
+}
+
 void NuPlayer::DecoderPassThrough::onSetRenderer(
         const sp<Renderer> &renderer) {
     // renderer can't be changed during offloading
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index b7dcb8d..2f6df2c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -40,6 +40,7 @@
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
     virtual void onConfigure(const sp<AMessage> &format);
+    virtual void onSetParameters(const sp<AMessage> &params);
     virtual void onSetRenderer(const sp<Renderer> &renderer);
     virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
     virtual void onResume(bool notifyComplete);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 04a324c..231f2e1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -360,9 +360,32 @@
     return mState == STATE_RUNNING && !mAtEOS;
 }
 
-status_t NuPlayerDriver::setPlaybackRate(float rate) {
-    mPlayer->setPlaybackRate(rate);
-    return OK;
+status_t NuPlayerDriver::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    Mutex::Autolock autoLock(mLock);
+    status_t err = mPlayer->setPlaybackSettings(rate);
+    if (err == OK) {
+        if (rate.mSpeed == 0.f && mState == STATE_RUNNING) {
+            mState = STATE_PAUSED;
+            // try to update position
+            (void)mPlayer->getCurrentPosition(&mPositionUs);
+            notifyListener_l(MEDIA_PAUSED);
+        } else if (rate.mSpeed != 0.f && mState == STATE_PAUSED) {
+            mState = STATE_RUNNING;
+        }
+    }
+    return err;
+}
+
+status_t NuPlayerDriver::getPlaybackSettings(AudioPlaybackRate *rate) {
+    return mPlayer->getPlaybackSettings(rate);
+}
+
+status_t NuPlayerDriver::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+    return mPlayer->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t NuPlayerDriver::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
+    return mPlayer->getSyncSettings(sync, videoFps);
 }
 
 status_t NuPlayerDriver::seekTo(int msec) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 65f170e..9da7fc1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -49,7 +49,10 @@
     virtual status_t stop();
     virtual status_t pause();
     virtual bool isPlaying();
-    virtual status_t setPlaybackRate(float rate);
+    virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    virtual status_t getPlaybackSettings(AudioPlaybackRate *rate);
+    virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+    virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps);
     virtual status_t seekTo(int msec);
     virtual status_t getCurrentPosition(int *msec);
     virtual status_t getDuration(int *msec);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index f229452..6b8f99c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -66,7 +66,7 @@
       mVideoQueueGeneration(0),
       mAudioDrainGeneration(0),
       mVideoDrainGeneration(0),
-      mPlaybackRate(1.0),
+      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
       mAnchorNumFramesWritten(-1),
@@ -89,6 +89,8 @@
       mLastAudioBufferDrained(0),
       mWakeLock(new AWakeLock()) {
     mMediaClock = new MediaClock;
+    mPlaybackRate = mPlaybackSettings.mSpeed;
+    mMediaClock->setPlaybackRate(mPlaybackRate);
 }
 
 NuPlayer::Renderer::~Renderer() {
@@ -121,10 +123,111 @@
     msg->post();
 }
 
-void NuPlayer::Renderer::setPlaybackRate(float rate) {
-    sp<AMessage> msg = new AMessage(kWhatSetRate, this);
-    msg->setFloat("rate", rate);
-    msg->post();
+status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
+    writeToAMessage(msg, rate);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
+    if (rate.mSpeed == 0.f) {
+        onPause();
+        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
+        // have to correspond to the any non-0 speed (e.g old speed). Keep
+        // settings nonetheless, using the old speed, in case audiosink changes.
+        AudioPlaybackRate newRate = rate;
+        newRate.mSpeed = mPlaybackSettings.mSpeed;
+        mPlaybackSettings = newRate;
+        return OK;
+    }
+
+    if (mAudioSink != NULL) {
+        status_t err = mAudioSink->setPlaybackRate(rate);
+        if (err != OK) {
+            return err;
+        }
+    }
+    mPlaybackSettings = rate;
+    mPlaybackRate = rate.mSpeed;
+    mMediaClock->setPlaybackRate(mPlaybackRate);
+    return OK;
+}
+
+status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, rate);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    if (mAudioSink != NULL) {
+        status_t err = mAudioSink->getPlaybackRate(rate);
+        if (err == OK) {
+            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
+                ALOGW("correcting mismatch in internal/external playback rate");
+            }
+            // get playback settings used by audiosink, as it may be
+            // slightly off due to audiosink not taking small changes.
+            mPlaybackSettings = *rate;
+            if (mPaused) {
+                rate->mSpeed = 0.f;
+            }
+        }
+        return err;
+    }
+    *rate = mPlaybackSettings;
+    return OK;
+}
+
+status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
+    writeToAMessage(msg, sync, videoFpsHint);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
+    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
+        return BAD_VALUE;
+    }
+    // TODO: support sync sources
+    return INVALID_OPERATION;
+}
+
+status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
+    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, sync, videoFps);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onGetSyncSettings(
+        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
+    *sync = mSyncSettings;
+    *videoFps = -1.f;
+    return OK;
 }
 
 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
@@ -365,13 +468,63 @@
             break;
         }
 
-        case kWhatSetRate:
+        case kWhatConfigPlayback:
         {
-            CHECK(msg->findFloat("rate", &mPlaybackRate));
-            int32_t ratePermille = (int32_t)(0.5f + 1000 * mPlaybackRate);
-            mPlaybackRate = ratePermille / 1000.0f;
-            mMediaClock->setPlaybackRate(mPlaybackRate);
-            mAudioSink->setPlaybackRatePermille(ratePermille);
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate;
+            readFromAMessage(msg, &rate);
+            status_t err = onConfigPlayback(rate);
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetPlaybackSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            status_t err = onGetPlaybackSettings(&rate);
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, rate);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatConfigSync:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AVSyncSettings sync;
+            float videoFpsHint;
+            readFromAMessage(msg, &sync, &videoFpsHint);
+            status_t err = onConfigSync(sync, videoFpsHint);
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetSyncSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatGetSyncSettings");
+            AVSyncSettings sync;
+            float videoFps = -1.f;
+            status_t err = onGetSyncSettings(&sync, &videoFps);
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, sync, videoFps);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
             break;
         }
 
@@ -1087,6 +1240,16 @@
             mAudioSink->pause();
             mAudioSink->flush();
             mAudioSink->start();
+        } else {
+            mAudioSink->pause();
+            mAudioSink->flush();
+            // Call stop() to signal to the AudioSink to completely fill the
+            // internal buffer before resuming playback.
+            mAudioSink->stop();
+            if (!mPaused) {
+                mAudioSink->start();
+            }
+            mNumFramesWritten = 0;
         }
     } else {
         flushQueue(&mVideoQueue);
@@ -1176,7 +1339,6 @@
 
 void NuPlayer::Renderer::onPause() {
     if (mPaused) {
-        ALOGW("Renderer::onPause() called while already paused!");
         return;
     }
 
@@ -1214,6 +1376,12 @@
     {
         Mutex::Autolock autoLock(mLock);
         mPaused = false;
+
+        // configure audiosink as we did not do it when pausing
+        if (mAudioSink != NULL) {
+            mAudioSink->setPlaybackRate(mPlaybackSettings);
+        }
+
         mMediaClock->setPlaybackRate(mPlaybackRate);
 
         if (!mAudioQueue.empty()) {
@@ -1433,10 +1601,10 @@
                     &offloadInfo);
 
             if (err == OK) {
-                if (mPlaybackRate != 1.0) {
-                    mAudioSink->setPlaybackRatePermille(
-                            (int32_t)(mPlaybackRate * 1000 + 0.5f));
-                }
+                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
+            }
+
+            if (err == OK) {
                 // If the playback is offloaded to h/w, we pass
                 // the HAL some metadata information.
                 // We don't want to do this for PCM because it
@@ -1486,16 +1654,15 @@
                     NULL,
                     NULL,
                     (audio_output_flags_t)pcmFlags);
+        if (err == OK) {
+            err = mAudioSink->setPlaybackRate(mPlaybackSettings);
+        }
         if (err != OK) {
             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
             return err;
         }
         mCurrentPcmInfo = info;
-        if (mPlaybackRate != 1.0) {
-            mAudioSink->setPlaybackRatePermille(
-                    (int32_t)(mPlaybackRate * 1000 + 0.5f));
-        }
         mAudioSink->start();
     }
     if (audioSinkChanged) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 38843d5..928b71b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -18,6 +18,9 @@
 
 #define NUPLAYER_RENDERER_H_
 
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+
 #include "NuPlayer.h"
 
 namespace android {
@@ -48,7 +51,10 @@
 
     void queueEOS(bool audio, status_t finalResult);
 
-    void setPlaybackRate(float rate);
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate /* sanitized */);
+    status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+    status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+    status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
 
     void flush(bool audio, bool notifyComplete);
 
@@ -102,7 +108,10 @@
         kWhatPostDrainVideoQueue = 'pDVQ',
         kWhatQueueBuffer         = 'queB',
         kWhatQueueEOS            = 'qEOS',
-        kWhatSetRate             = 'setR',
+        kWhatConfigPlayback      = 'cfPB',
+        kWhatConfigSync          = 'cfSy',
+        kWhatGetPlaybackSettings = 'gPbS',
+        kWhatGetSyncSettings     = 'gSyS',
         kWhatFlush               = 'flus',
         kWhatPause               = 'paus',
         kWhatResume              = 'resm',
@@ -141,7 +150,12 @@
     int32_t mVideoDrainGeneration;
 
     sp<MediaClock> mMediaClock;
-    float mPlaybackRate;
+    float mPlaybackRate; // audio track rate
+
+    AudioPlaybackRate mPlaybackSettings;
+    AVSyncSettings mSyncSettings;
+    float mVideoFpsHint;
+
     int64_t mAudioFirstAnchorTimeMediaUs;
     int64_t mAnchorTimeMediaUs;
     int64_t mAnchorNumFramesWritten;
@@ -217,6 +231,11 @@
     void onAudioSinkChanged();
     void onDisableOffloadAudio();
     void onEnableOffloadAudio();
+    status_t onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */);
+    status_t onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+    status_t onConfigSync(const AVSyncSettings &sync, float videoFpsHint);
+    status_t onGetSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
     void onPause();
     void onResume();
     void onSetVideoFrameRate(float fps);
@@ -252,6 +271,6 @@
     DISALLOW_EVIL_CONSTRUCTORS(Renderer);
 };
 
-}  // namespace android
+} // namespace android
 
 #endif  // NUPLAYER_RENDERER_H_
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index da22f11..5475a4a 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -24,6 +24,8 @@
 #include <inttypes.h>
 #include <utils/Trace.h>
 
+#include <gui/Surface.h>
+
 #include <media/stagefright/ACodec.h>
 
 #include <binder/MemoryDealer.h>
@@ -37,10 +39,10 @@
 #include <media/stagefright/BufferProducerWrapper.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
-
+#include <media/stagefright/PersistentSurface.h>
+#include <media/stagefright/SurfaceUtils.h>
 #include <media/hardware/HardwareAPI.h>
 
 #include <OMX_AudioExt.h>
@@ -259,9 +261,12 @@
 
     bool onConfigureComponent(const sp<AMessage> &msg);
     void onCreateInputSurface(const sp<AMessage> &msg);
+    void onUsePersistentInputSurface(const sp<AMessage> &msg);
     void onStart();
     void onShutdown(bool keepComponentAllocated);
 
+    status_t setupInputSurface();
+
     DISALLOW_EVIL_CONSTRUCTORS(LoadedState);
 };
 
@@ -405,6 +410,7 @@
     : mQuirks(0),
       mNode(0),
       mSentFormat(false),
+      mIsVideo(false),
       mIsEncoder(false),
       mUseMetadataOnEncoderOutput(false),
       mShutdownInProgress(false),
@@ -474,10 +480,30 @@
     msg->post();
 }
 
+status_t ACodec::setSurface(const sp<Surface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatSetSurface, this);
+    msg->setObject("surface", surface);
+
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+
+    if (err == OK) {
+        (void)response->findInt32("err", &err);
+    }
+    return err;
+}
+
 void ACodec::initiateCreateInputSurface() {
     (new AMessage(kWhatCreateInputSurface, this))->post();
 }
 
+void ACodec::initiateUsePersistentInputSurface(
+        const sp<PersistentSurface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatUsePersistentInputSurface, this);
+    msg->setObject("input-surface", surface);
+    msg->post();
+}
+
 void ACodec::signalEndOfInputStream() {
     (new AMessage(kWhatSignalEndOfInputStream, this))->post();
 }
@@ -521,6 +547,119 @@
     }
 }
 
+status_t ACodec::handleSetSurface(const sp<Surface> &surface) {
+    // allow keeping unset surface
+    if (surface == NULL) {
+        if (mNativeWindow != NULL) {
+            ALOGW("cannot unset a surface");
+            return INVALID_OPERATION;
+        }
+        return OK;
+    }
+
+    // allow keeping unset surface
+    if (mNativeWindow == NULL) {
+        ALOGW("component was not configured with a surface");
+        return INVALID_OPERATION;
+    }
+
+    ANativeWindow *nativeWindow = surface.get();
+    // if we have not yet started the codec, we can simply set the native window
+    if (mBuffers[kPortIndexInput].size() == 0) {
+        mNativeWindow = surface;
+        return OK;
+    }
+
+    // we do not support changing a tunneled surface after start
+    if (mTunneled) {
+        ALOGW("cannot change tunneled surface");
+        return INVALID_OPERATION;
+    }
+
+    status_t err = setupNativeWindowSizeFormatAndUsage(nativeWindow);
+    if (err != OK) {
+        return err;
+    }
+
+    // get min undequeued count. We cannot switch to a surface that has a higher
+    // undequeued count than we allocated.
+    int minUndequeuedBuffers = 0;
+    err = nativeWindow->query(
+            nativeWindow, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+            &minUndequeuedBuffers);
+    if (err != 0) {
+        ALOGE("NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)",
+                strerror(-err), -err);
+        return err;
+    }
+    if (minUndequeuedBuffers > (int)mNumUndequeuedBuffers) {
+        ALOGE("new surface holds onto more buffers (%d) than planned for (%zu)",
+                minUndequeuedBuffers, mNumUndequeuedBuffers);
+        return BAD_VALUE;
+    }
+
+    // we cannot change the number of output buffers while OMX is running
+    // set up surface to the same count
+    Vector<BufferInfo> &buffers = mBuffers[kPortIndexOutput];
+    ALOGV("setting up surface for %zu buffers", buffers.size());
+
+    err = native_window_set_buffer_count(nativeWindow, buffers.size());
+    if (err != 0) {
+        ALOGE("native_window_set_buffer_count failed: %s (%d)", strerror(-err),
+                -err);
+        return err;
+    }
+
+    // for meta data mode, we move dequeud buffers to the new surface.
+    // for non-meta mode, we must move all registered buffers
+    for (size_t i = 0; i < buffers.size(); ++i) {
+        const BufferInfo &info = buffers[i];
+        // skip undequeued buffers for meta data mode
+        if (mStoreMetaDataInOutputBuffers
+                && info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
+            ALOGV("skipping buffer %p", info.mGraphicBuffer->getNativeBuffer());
+            continue;
+        }
+        ALOGV("attaching buffer %p", info.mGraphicBuffer->getNativeBuffer());
+
+        err = surface->attachBuffer(info.mGraphicBuffer->getNativeBuffer());
+        if (err != OK) {
+            ALOGE("failed to attach buffer %p to the new surface: %s (%d)",
+                    info.mGraphicBuffer->getNativeBuffer(),
+                    strerror(-err), -err);
+            return err;
+        }
+    }
+
+    // cancel undequeued buffers to new surface
+    if (!mStoreMetaDataInOutputBuffers) {
+        for (size_t i = 0; i < buffers.size(); ++i) {
+            const BufferInfo &info = buffers[i];
+            if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
+                ALOGV("canceling buffer %p", info.mGraphicBuffer->getNativeBuffer());
+                err = nativeWindow->cancelBuffer(
+                        nativeWindow, info.mGraphicBuffer->getNativeBuffer(), -1);
+                if (err != OK) {
+                    ALOGE("failed to cancel buffer %p to the new surface: %s (%d)",
+                            info.mGraphicBuffer->getNativeBuffer(),
+                            strerror(-err), -err);
+                    return err;
+                }
+            }
+        }
+        // disallow further allocation
+        (void)surface->getIGraphicBufferProducer()->allowAllocation(false);
+    }
+
+    // push blank buffers to previous window if requested
+    if (mFlags & kFlagPushBlankBuffersToNativeWindowOnShutdown) {
+        pushBlankBuffersToNativeWindow(mNativeWindow.get());
+    }
+
+    mNativeWindow = nativeWindow;
+    return OK;
+}
+
 status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
 
@@ -615,9 +754,7 @@
     return OK;
 }
 
-status_t ACodec::configureOutputBuffersFromNativeWindow(
-        OMX_U32 *bufferCount, OMX_U32 *bufferSize,
-        OMX_U32 *minUndequeuedBuffers) {
+status_t ACodec::setupNativeWindowSizeFormatAndUsage(ANativeWindow *nativeWindow /* nonnull */) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexOutput;
@@ -629,49 +766,6 @@
         return err;
     }
 
-    err = native_window_set_buffers_dimensions(
-            mNativeWindow.get(),
-            def.format.video.nFrameWidth,
-            def.format.video.nFrameHeight);
-
-    if (err != 0) {
-        ALOGE("native_window_set_buffers_dimensions failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_set_buffers_format(
-            mNativeWindow.get(),
-            def.format.video.eColorFormat);
-
-    if (err != 0) {
-        ALOGE("native_window_set_buffers_format failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    if (mRotationDegrees != 0) {
-        uint32_t transform = 0;
-        switch (mRotationDegrees) {
-            case 0: transform = 0; break;
-            case 90: transform = HAL_TRANSFORM_ROT_90; break;
-            case 180: transform = HAL_TRANSFORM_ROT_180; break;
-            case 270: transform = HAL_TRANSFORM_ROT_270; break;
-            default: transform = 0; break;
-        }
-
-        if (transform > 0) {
-            err = native_window_set_buffers_transform(
-                    mNativeWindow.get(), transform);
-            if (err != 0) {
-                ALOGE("native_window_set_buffers_transform failed: %s (%d)",
-                        strerror(-err), -err);
-                return err;
-            }
-        }
-    }
-
-    // Set up the native window.
     OMX_U32 usage = 0;
     err = mOMX->getGraphicBufferUsage(mNode, kPortIndexOutput, &usage);
     if (err != 0) {
@@ -685,43 +779,32 @@
         usage |= GRALLOC_USAGE_PROTECTED;
     }
 
-    // Make sure to check whether either Stagefright or the video decoder
-    // requested protected buffers.
-    if (usage & GRALLOC_USAGE_PROTECTED) {
-        // Verify that the ANativeWindow sends images directly to
-        // SurfaceFlinger.
-        int queuesToNativeWindow = 0;
-        err = mNativeWindow->query(
-                mNativeWindow.get(), NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER,
-                &queuesToNativeWindow);
-        if (err != 0) {
-            ALOGE("error authenticating native window: %d", err);
-            return err;
-        }
-        if (queuesToNativeWindow != 1) {
-            ALOGE("native window could not be authenticated");
-            return PERMISSION_DENIED;
-        }
+    usage |= GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP;
+
+    ALOGV("gralloc usage: %#x(OMX) => %#x(ACodec)", omxUsage, usage);
+    return setNativeWindowSizeFormatAndUsage(
+            nativeWindow,
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            def.format.video.eColorFormat,
+            mRotationDegrees,
+            usage);
+}
+
+status_t ACodec::configureOutputBuffersFromNativeWindow(
+        OMX_U32 *bufferCount, OMX_U32 *bufferSize,
+        OMX_U32 *minUndequeuedBuffers) {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+    if (err == OK) {
+        err = setupNativeWindowSizeFormatAndUsage(mNativeWindow.get());
     }
-
-    int consumerUsage = 0;
-    err = mNativeWindow->query(
-            mNativeWindow.get(), NATIVE_WINDOW_CONSUMER_USAGE_BITS,
-            &consumerUsage);
-    if (err != 0) {
-        ALOGW("failed to get consumer usage bits. ignoring");
-        err = 0;
-    }
-
-    ALOGV("gralloc usage: %#x(OMX) => %#x(ACodec) + %#x(Consumer) = %#x",
-            omxUsage, usage, consumerUsage, usage | consumerUsage);
-    usage |= consumerUsage;
-    err = native_window_set_usage(
-            mNativeWindow.get(),
-            usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
-
-    if (err != 0) {
-        ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
+    if (err != OK) {
         return err;
     }
 
@@ -805,6 +888,11 @@
         return err;
     mNumUndequeuedBuffers = minUndequeuedBuffers;
 
+    if (!mStoreMetaDataInOutputBuffers) {
+        static_cast<Surface*>(mNativeWindow.get())
+                ->getIGraphicBufferProducer()->allowAllocation(true);
+    }
+
     ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
          "output port",
          mComponentName.c_str(), bufferCount, bufferSize);
@@ -863,6 +951,11 @@
         }
     }
 
+    if (!mStoreMetaDataInOutputBuffers) {
+        static_cast<Surface*>(mNativeWindow.get())
+                ->getIGraphicBufferProducer()->allowAllocation(false);
+    }
+
     return err;
 }
 
@@ -1186,6 +1279,7 @@
 
     mIsEncoder = encoder;
 
+
     status_t err = setComponentRole(encoder /* isEncoder */, mime);
 
     if (err != OK) {
@@ -1244,6 +1338,7 @@
     // sps/pps to idr frames, since in metadata mode the bitstream is in an
     // opaque handle, to which we don't have access.
     int32_t video = !strncasecmp(mime, "video/", 6);
+    mIsVideo = video;
     if (encoder && video) {
         OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
             && msg->findInt32("store-metadata-in-buffers-output", &storeMeta)
@@ -1303,9 +1398,8 @@
         }
     }
     if (haveNativeWindow) {
-        sp<NativeWindowWrapper> windowWrapper(
-                static_cast<NativeWindowWrapper *>(obj.get()));
-        sp<ANativeWindow> nativeWindow = windowWrapper->getNativeWindow();
+        sp<ANativeWindow> nativeWindow =
+            static_cast<ANativeWindow *>(static_cast<Surface *>(obj.get()));
 
         // START of temporary support for automatic FRC - THIS WILL BE REMOVED
         int32_t autoFrc;
@@ -1464,13 +1558,8 @@
         }
 
         if (haveNativeWindow) {
-            sp<NativeWindowWrapper> nativeWindow(
-                    static_cast<NativeWindowWrapper *>(obj.get()));
-            CHECK(nativeWindow != NULL);
-            mNativeWindow = nativeWindow->getNativeWindow();
-
-            native_window_set_scaling_mode(
-                    mNativeWindow.get(), NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+            mNativeWindow = static_cast<Surface *>(obj.get());
+            CHECK(mNativeWindow != NULL);
         }
 
         // initialize native window now to get actual output format
@@ -3966,156 +4055,6 @@
     notify->post();
 }
 
-status_t ACodec::pushBlankBuffersToNativeWindow() {
-    status_t err = NO_ERROR;
-    ANativeWindowBuffer* anb = NULL;
-    int numBufs = 0;
-    int minUndequeuedBufs = 0;
-
-    // We need to reconnect to the ANativeWindow as a CPU client to ensure that
-    // no frames get dropped by SurfaceFlinger assuming that these are video
-    // frames.
-    err = native_window_api_disconnect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_MEDIA);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_api_connect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_CPU);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_set_buffers_dimensions(mNativeWindow.get(), 1, 1);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffers_dimensions failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_buffers_format(mNativeWindow.get(), HAL_PIXEL_FORMAT_RGBX_8888);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffers_format failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_scaling_mode(mNativeWindow.get(),
-                NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank_frames: set_scaling_mode failed: %s (%d)",
-              strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_usage(mNativeWindow.get(),
-            GRALLOC_USAGE_SW_WRITE_OFTEN);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_usage failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = mNativeWindow->query(mNativeWindow.get(),
-            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
-                "failed: %s (%d)", strerror(-err), -err);
-        goto error;
-    }
-
-    numBufs = minUndequeuedBufs + 1;
-    err = native_window_set_buffer_count(mNativeWindow.get(), numBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    // We  push numBufs + 1 buffers to ensure that we've drawn into the same
-    // buffer twice.  This should guarantee that the buffer has been displayed
-    // on the screen and then been replaced, so an previous video frames are
-    // guaranteed NOT to be currently displayed.
-    for (int i = 0; i < numBufs + 1; i++) {
-        err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &anb);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: dequeueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
-
-        // Fill the buffer with the a 1x1 checkerboard pattern ;)
-        uint32_t* img = NULL;
-        err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: lock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        *img = 0;
-
-        err = buf->unlock();
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: unlock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        err = mNativeWindow->queueBuffer(mNativeWindow.get(),
-                buf->getNativeBuffer(), -1);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: queueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        anb = NULL;
-    }
-
-error:
-
-    if (err != NO_ERROR) {
-        // Clean up after an error.
-        if (anb != NULL) {
-            mNativeWindow->cancelBuffer(mNativeWindow.get(), anb, -1);
-        }
-
-        native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-
-        return err;
-    } else {
-        // Clean up after success.
-        err = native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        err = native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        return NO_ERROR;
-    }
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 ACodec::PortDescription::PortDescription() {
@@ -4188,7 +4127,24 @@
             return onOMXMessage(msg);
         }
 
+        case ACodec::kWhatSetSurface:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<RefBase> obj;
+            CHECK(msg->findObject("surface", &obj));
+
+            status_t err = mCodec->handleSetSurface(static_cast<Surface *>(obj.get()));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
         case ACodec::kWhatCreateInputSurface:
+        case ACodec::kWhatUsePersistentInputSurface:
         case ACodec::kWhatSignalEndOfInputStream:
         {
             // This may result in an app illegal state exception.
@@ -4906,7 +4862,10 @@
     CHECK(mCodec->mNode == 0);
 
     OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
+    if (client.connect() != OK) {
+        mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
+        return false;
+    }
 
     sp<IOMX> omx = client.interface();
 
@@ -5081,6 +5040,13 @@
             break;
         }
 
+        case ACodec::kWhatUsePersistentInputSurface:
+        {
+            onUsePersistentInputSurface(msg);
+            handled = true;
+            break;
+        }
+
         case ACodec::kWhatStart:
         {
             onStart();
@@ -5148,20 +5114,10 @@
     return true;
 }
 
-void ACodec::LoadedState::onCreateInputSurface(
-        const sp<AMessage> & /* msg */) {
-    ALOGV("onCreateInputSurface");
+status_t ACodec::LoadedState::setupInputSurface() {
+    status_t err = OK;
 
-    sp<AMessage> notify = mCodec->mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
-
-    sp<IGraphicBufferProducer> bufferProducer;
-    status_t err;
-
-    err = mCodec->mOMX->createInputSurface(mCodec->mNode, kPortIndexInput,
-            &bufferProducer);
-
-    if (err == OK && mCodec->mRepeatFrameDelayUs > 0ll) {
+    if (mCodec->mRepeatFrameDelayUs > 0ll) {
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
                 kPortIndexInput,
@@ -5174,10 +5130,11 @@
                   "frames (err %d)",
                   mCodec->mComponentName.c_str(),
                   err);
+            return err;
         }
     }
 
-    if (err == OK && mCodec->mMaxPtsGapUs > 0ll) {
+    if (mCodec->mMaxPtsGapUs > 0ll) {
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
                 kPortIndexInput,
@@ -5189,10 +5146,11 @@
             ALOGE("[%s] Unable to configure max timestamp gap (err %d)",
                     mCodec->mComponentName.c_str(),
                     err);
+            return err;
         }
     }
 
-    if (err == OK && mCodec->mMaxFps > 0) {
+    if (mCodec->mMaxFps > 0) {
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
                 kPortIndexInput,
@@ -5204,10 +5162,11 @@
             ALOGE("[%s] Unable to configure max fps (err %d)",
                     mCodec->mComponentName.c_str(),
                     err);
+            return err;
         }
     }
 
-    if (err == OK && mCodec->mTimePerCaptureUs > 0ll
+    if (mCodec->mTimePerCaptureUs > 0ll
             && mCodec->mTimePerFrameUs > 0ll) {
         int64_t timeLapse[2];
         timeLapse[0] = mCodec->mTimePerFrameUs;
@@ -5223,10 +5182,11 @@
             ALOGE("[%s] Unable to configure time lapse (err %d)",
                     mCodec->mComponentName.c_str(),
                     err);
+            return err;
         }
     }
 
-    if (err == OK && mCodec->mCreateInputBuffersSuspended) {
+    if (mCodec->mCreateInputBuffersSuspended) {
         bool suspend = true;
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
@@ -5239,9 +5199,28 @@
             ALOGE("[%s] Unable to configure option to suspend (err %d)",
                   mCodec->mComponentName.c_str(),
                   err);
+            return err;
         }
     }
 
+    return OK;
+}
+
+void ACodec::LoadedState::onCreateInputSurface(
+        const sp<AMessage> & /* msg */) {
+    ALOGV("onCreateInputSurface");
+
+    sp<AMessage> notify = mCodec->mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
+
+    sp<IGraphicBufferProducer> bufferProducer;
+    status_t err = mCodec->mOMX->createInputSurface(
+            mCodec->mNode, kPortIndexInput, &bufferProducer);
+
+    if (err == OK) {
+        err = setupInputSurface();
+    }
+
     if (err == OK) {
         notify->setObject("input-surface",
                 new BufferProducerWrapper(bufferProducer));
@@ -5256,6 +5235,35 @@
     notify->post();
 }
 
+void ACodec::LoadedState::onUsePersistentInputSurface(
+        const sp<AMessage> &msg) {
+    ALOGV("onUsePersistentInputSurface");
+
+    sp<AMessage> notify = mCodec->mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatInputSurfaceAccepted);
+
+    sp<RefBase> obj;
+    CHECK(msg->findObject("input-surface", &obj));
+    sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
+
+    status_t err = mCodec->mOMX->usePersistentInputSurface(
+            mCodec->mNode, kPortIndexInput, surface->getBufferConsumer());
+
+    if (err == OK) {
+        err = setupInputSurface();
+    }
+
+    if (err != OK) {
+        // Can't use mCodec->signalError() here -- MediaCodec won't forward
+        // the error through because it's in the "configured" state.  We
+        // send a kWhatInputSurfaceAccepted with an error value instead.
+        ALOGE("[%s] onUsePersistentInputSurface returning error %d",
+                mCodec->mComponentName.c_str(), err);
+        notify->setInt32("err", err);
+    }
+    notify->post();
+}
+
 void ACodec::LoadedState::onStart() {
     ALOGV("onStart");
 
@@ -5684,6 +5692,15 @@
         }
     }
 
+    float rate;
+    if (params->findFloat("operating-rate", &rate) && rate > 0) {
+        status_t err = setOperatingRate(rate, mIsVideo);
+        if (err != OK) {
+            ALOGE("Failed to set parameter 'operating-rate' (err %d)", err);
+            return err;
+        }
+    }
+
     return OK;
 }
 
@@ -5932,7 +5949,7 @@
             // them has made it to the display.  This allows the OMX
             // component teardown to zero out any protected buffers
             // without the risk of scanning out one of those buffers.
-            mCodec->pushBlankBuffersToNativeWindow();
+            pushBlankBuffersToNativeWindow(mCodec->mNativeWindow.get());
         }
 
         mCodec->changeState(mCodec->mIdleToLoadedState);
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 45581f3..fa17b2e 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -57,6 +57,7 @@
         StagefrightMediaScanner.cpp       \
         StagefrightMetadataRetriever.cpp  \
         SurfaceMediaSource.cpp            \
+        SurfaceUtils.cpp                  \
         ThrottledSource.cpp               \
         TimeSource.cpp                    \
         TimedEventQueue.cpp               \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index e24824b..dd9d393 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -408,11 +408,22 @@
     }
 }
 
-status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
+status_t AudioPlayer::setPlaybackRate(const AudioPlaybackRate &rate) {
     if (mAudioSink.get() != NULL) {
-        return mAudioSink->setPlaybackRatePermille(ratePermille);
+        return mAudioSink->setPlaybackRate(rate);
     } else if (mAudioTrack != 0){
-        return mAudioTrack->setSampleRate(ratePermille * mSampleRate / 1000);
+        return mAudioTrack->setPlaybackRate(rate);
+    } else {
+        return NO_INIT;
+    }
+}
+
+status_t AudioPlayer::getPlaybackRate(AudioPlaybackRate *rate /* nonnull */) {
+    if (mAudioSink.get() != NULL) {
+        return mAudioSink->getPlaybackRate(rate);
+    } else if (mAudioTrack != 0) {
+        *rate = mAudioTrack->getPlaybackRate();
+        return OK;
     } else {
         return NO_INIT;
     }
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 804f131..e5a6a9b 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -50,7 +50,8 @@
 }
 
 AudioSource::AudioSource(
-        audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount)
+        audio_source_t inputSource, const String16 &opPackageName, uint32_t sampleRate,
+        uint32_t channelCount)
     : mStarted(false),
       mSampleRate(sampleRate),
       mPrevSampleTimeUs(0),
@@ -78,6 +79,7 @@
         mRecord = new AudioRecord(
                     inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
                     audio_channel_in_mask_from_count(channelCount),
+                    opPackageName,
                     (size_t) (bufCount * frameCount),
                     AudioRecordCallbackFunction,
                     this,
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index c14625d..df01e7c 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -241,6 +241,8 @@
 
     mClockEstimator = new WindowedLinearFitEstimator();
 
+    mPlaybackSettings = AUDIO_PLAYBACK_RATE_DEFAULT;
+
     reset();
 }
 
@@ -1009,6 +1011,10 @@
                 return err;
             }
         }
+
+        if (mAudioPlayer != NULL) {
+            mAudioPlayer->setPlaybackRate(mPlaybackSettings);
+        }
     }
 
     if (mTimeSource == NULL && mAudioPlayer == NULL) {
@@ -1131,6 +1137,10 @@
     }
 
     if (err == OK) {
+        err = mAudioPlayer->setPlaybackRate(mPlaybackSettings);
+    }
+
+    if (err == OK) {
         modifyFlags(AUDIO_RUNNING, SET);
 
         mWatchForAudioEOS = true;
@@ -2553,14 +2563,6 @@
         {
             return setCacheStatCollectFreq(request);
         }
-        case KEY_PARAMETER_PLAYBACK_RATE_PERMILLE:
-        {
-            if (mAudioPlayer != NULL) {
-                return mAudioPlayer->setPlaybackRatePermille(request.readInt32());
-            } else {
-                return NO_INIT;
-            }
-        }
         default:
         {
             return ERROR_UNSUPPORTED;
@@ -2597,6 +2599,58 @@
     }
 }
 
+status_t AwesomePlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    Mutex::Autolock autoLock(mLock);
+    // cursory sanity check for non-audio and paused cases
+    if ((rate.mSpeed != 0.f && rate.mSpeed < AUDIO_TIMESTRETCH_SPEED_MIN)
+        || rate.mSpeed > AUDIO_TIMESTRETCH_SPEED_MAX
+        || rate.mPitch < AUDIO_TIMESTRETCH_SPEED_MIN
+        || rate.mPitch > AUDIO_TIMESTRETCH_SPEED_MAX) {
+        return BAD_VALUE;
+    }
+
+    status_t err = OK;
+    if (rate.mSpeed == 0.f) {
+        if (mFlags & PLAYING) {
+            modifyFlags(CACHE_UNDERRUN, CLEAR); // same as pause
+            err = pause_l();
+        }
+        if (err == OK) {
+            // save settings (using old speed) in case player is resumed
+            AudioPlaybackRate newRate = rate;
+            newRate.mSpeed = mPlaybackSettings.mSpeed;
+            mPlaybackSettings = newRate;
+        }
+        return err;
+    }
+    if (mAudioPlayer != NULL) {
+        err = mAudioPlayer->setPlaybackRate(rate);
+    }
+    if (err == OK) {
+        mPlaybackSettings = rate;
+        if (!(mFlags & PLAYING)) {
+            play_l();
+        }
+    }
+    return err;
+}
+
+status_t AwesomePlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    if (mAudioPlayer != NULL) {
+        status_t err = mAudioPlayer->getPlaybackRate(rate);
+        if (err == OK) {
+            mPlaybackSettings = *rate;
+            Mutex::Autolock autoLock(mLock);
+            if (!(mFlags & PLAYING)) {
+                rate->mSpeed = 0.f;
+            }
+        }
+        return err;
+    }
+    *rate = mPlaybackSettings;
+    return OK;
+}
+
 status_t AwesomePlayer::getTrackInfo(Parcel *reply) const {
     Mutex::Autolock autoLock(mLock);
     size_t trackCount = mExtractor->countTracks();
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 55e3c19..2e54e8c 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -282,6 +282,41 @@
 
     mFirstFramePos = pos;
     mFixedHeader = header;
+    mMeta = new MetaData;
+    sp<XINGSeeker> seeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
+
+    if (seeker == NULL) {
+        mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
+    } else {
+        mSeeker = seeker;
+        int encd = seeker->getEncoderDelay();
+        int encp = seeker->getEncoderPadding();
+        if (encd != 0 || encp != 0) {
+            mMeta->setInt32(kKeyEncoderDelay, encd);
+            mMeta->setInt32(kKeyEncoderPadding, encp);
+        }
+    }
+
+    if (mSeeker != NULL) {
+        // While it is safe to send the XING/VBRI frame to the decoder, this will
+        // result in an extra 1152 samples being output. In addition, the bitrate
+        // of the Xing header might not match the rest of the file, which could
+        // lead to problems when seeking. The real first frame to decode is after
+        // the XING/VBRI frame, so skip there.
+        size_t frame_size;
+        int sample_rate;
+        int num_channels;
+        int bitrate;
+        GetMPEGAudioFrameSize(
+                header, &frame_size, &sample_rate, &num_channels, &bitrate);
+        pos += frame_size;
+        if (!Resync(mDataSource, 0, &pos, &post_id3_pos, &header)) {
+            // mInitCheck will remain NO_INIT
+            return;
+        }
+        mFirstFramePos = pos;
+        mFixedHeader = header;
+    }
 
     size_t frame_size;
     int sample_rate;
@@ -292,8 +327,6 @@
 
     unsigned layer = 4 - ((header >> 17) & 3);
 
-    mMeta = new MetaData;
-
     switch (layer) {
         case 1:
             mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I);
@@ -312,27 +345,6 @@
     mMeta->setInt32(kKeyBitRate, bitrate * 1000);
     mMeta->setInt32(kKeyChannelCount, num_channels);
 
-    sp<XINGSeeker> seeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
-
-    if (seeker == NULL) {
-        mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
-    } else {
-        mSeeker = seeker;
-        int encd = seeker->getEncoderDelay();
-        int encp = seeker->getEncoderPadding();
-        if (encd != 0 || encp != 0) {
-            mMeta->setInt32(kKeyEncoderDelay, encd);
-            mMeta->setInt32(kKeyEncoderPadding, encp);
-        }
-    }
-
-    if (mSeeker != NULL) {
-        // While it is safe to send the XING/VBRI frame to the decoder, this will
-        // result in an extra 1152 samples being output. The real first frame to
-        // decode is after the XING/VBRI frame, so skip there.
-        mFirstFramePos += frame_size;
-    }
-
     int64_t durationUs;
 
     if (mSeeker == NULL || !mSeeker->getDuration(&durationUs)) {
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index beb12ec..3bc22f2 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -94,6 +94,7 @@
     void addChunkOffset(off64_t offset);
     int32_t getTrackId() const { return mTrackId; }
     status_t dump(int fd, const Vector<String16>& args) const;
+    static const char *getFourCCForMime(const char *mime);
 
 private:
     enum {
@@ -426,6 +427,33 @@
     return OK;
 }
 
+// static
+const char *MPEG4Writer::Track::getFourCCForMime(const char *mime) {
+    if (mime == NULL) {
+        return NULL;
+    }
+    if (!strncasecmp(mime, "audio/", 6)) {
+        if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, mime)) {
+            return "samr";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mime)) {
+            return "sawb";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime)) {
+            return "mp4a";
+        }
+    } else if (!strncasecmp(mime, "video/", 6)) {
+        if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
+            return "mp4v";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
+            return "s263";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
+            return "avc1";
+        }
+    } else {
+        ALOGE("Track (%s) other than video or audio is not supported", mime);
+    }
+    return NULL;
+}
+
 status_t MPEG4Writer::addSource(const sp<MediaSource> &source) {
     Mutex::Autolock l(mLock);
     if (mStarted) {
@@ -441,14 +469,11 @@
 
     CHECK(source.get() != NULL);
 
-    // A track of type other than video or audio is not supported.
     const char *mime;
     source->getFormat()->findCString(kKeyMIMEType, &mime);
     bool isAudio = !strncasecmp(mime, "audio/", 6);
-    bool isVideo = !strncasecmp(mime, "video/", 6);
-    if (!isAudio && !isVideo) {
-        ALOGE("Track (%s) other than video or audio is not supported",
-            mime);
+    if (Track::getFourCCForMime(mime) == NULL) {
+        ALOGE("Unsupported mime '%s'", mime);
         return ERROR_UNSUPPORTED;
     }
 
@@ -2730,17 +2755,13 @@
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
-        mOwner->beginBox("mp4v");
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
-        mOwner->beginBox("s263");
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
-        mOwner->beginBox("avc1");
-    } else {
+    const char *fourcc = getFourCCForMime(mime);
+    if (fourcc == NULL) {
         ALOGE("Unknown mime type '%s'.", mime);
         CHECK(!"should not be here, unknown mime type.");
     }
 
+    mOwner->beginBox(fourcc);        // video format
     mOwner->writeInt32(0);           // reserved
     mOwner->writeInt16(0);           // reserved
     mOwner->writeInt16(1);           // data ref index
@@ -2784,14 +2805,8 @@
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    const char *fourcc = NULL;
-    if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, mime)) {
-        fourcc = "samr";
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mime)) {
-        fourcc = "sawb";
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime)) {
-        fourcc = "mp4a";
-    } else {
+    const char *fourcc = getFourCCForMime(mime);
+    if (fourcc == NULL) {
         ALOGE("Unknown mime type '%s'.", mime);
         CHECK(!"should not be here, unknown mime type.");
     }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 7065a6e..44f6542 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -28,6 +28,7 @@
 #include <binder/MemoryDealer.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
+#include <media/IOMX.h>
 #include <media/IResourceManagerService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -42,7 +43,10 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaFilter.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/NativeWindowWrapper.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/PersistentSurface.h>
+#include <media/stagefright/SurfaceUtils.h>
 #include <private/android_filesystem_config.h>
 #include <utils/Log.h>
 #include <utils/Singleton.h>
@@ -310,6 +314,26 @@
     return ret == OK ? codec : NULL; // NULL deallocates codec.
 }
 
+// static
+sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
+    OMXClient client;
+    CHECK_EQ(client.connect(), (status_t)OK);
+    sp<IOMX> omx = client.interface();
+
+    sp<IGraphicBufferProducer> bufferProducer;
+    sp<IGraphicBufferConsumer> bufferConsumer;
+
+    status_t err = omx->createPersistentInputSurface(
+            &bufferProducer, &bufferConsumer);
+
+    if (err != OK) {
+        ALOGE("Failed to create persistent input surface.");
+        return NULL;
+    }
+
+    return new PersistentSurface(bufferProducer, bufferConsumer);
+}
+
 MediaCodec::MediaCodec(const sp<ALooper> &looper)
     : mState(UNINITIALIZED),
       mLooper(looper),
@@ -392,6 +416,10 @@
             tmp.erase(tmp.size() - 7, 7);
         }
         const sp<IMediaCodecList> mcl = MediaCodecList::getInstance();
+        if (mcl == NULL) {
+            mCodec = NULL;  // remove the codec.
+            return NO_INIT; // if called from Java should raise IOException
+        }
         ssize_t codecIdx = mcl->findCodecByName(tmp.c_str());
         if (codecIdx >= 0) {
             const sp<MediaCodecInfo> info = mcl->getCodecInfo(codecIdx);
@@ -434,7 +462,8 @@
     status_t err;
     Vector<MediaResource> resources;
     const char *type = secureCodec ? kResourceSecureCodec : kResourceNonSecureCodec;
-    resources.push_back(MediaResource(String8(type), 1));
+    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
     for (int i = 0; i <= kMaxRetry; ++i) {
         if (i > 0) {
             // Don't try to reclaim resource for the first time.
@@ -462,7 +491,7 @@
 
 status_t MediaCodec::configure(
         const sp<AMessage> &format,
-        const sp<Surface> &nativeWindow,
+        const sp<Surface> &surface,
         const sp<ICrypto> &crypto,
         uint32_t flags) {
     sp<AMessage> msg = new AMessage(kWhatConfigure, this);
@@ -474,12 +503,7 @@
 
     msg->setMessage("format", format);
     msg->setInt32("flags", flags);
-
-    if (nativeWindow != NULL) {
-        msg->setObject(
-                "native-window",
-                new NativeWindowWrapper(nativeWindow));
-    }
+    msg->setObject("surface", surface);
 
     if (crypto != NULL) {
         msg->setPointer("crypto", crypto.get());
@@ -492,7 +516,8 @@
     Vector<MediaResource> resources;
     const char *type = (mFlags & kFlagIsSecure) ?
             kResourceSecureCodec : kResourceNonSecureCodec;
-    resources.push_back(MediaResource(String8(type), 1));
+    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
     // Don't know the buffer size at this point, but it's fine to use 1 because
     // the reclaimResource call doesn't consider the requester's buffer size for now.
     resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
@@ -523,6 +548,23 @@
     return err;
 }
 
+status_t MediaCodec::usePersistentInputSurface(
+        const sp<PersistentSurface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatUsePersistentInputSurface, this);
+    msg->setObject("input-surface", surface.get());
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::setSurface(const sp<Surface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatSetSurface, this);
+    msg->setObject("surface", surface);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
 status_t MediaCodec::createInputSurface(
         sp<IGraphicBufferProducer>* bufferProducer) {
     sp<AMessage> msg = new AMessage(kWhatCreateInputSurface, this);
@@ -557,9 +599,9 @@
     return size;
 }
 
-void MediaCodec::addResource(const char *type, uint64_t value) {
+void MediaCodec::addResource(const String8 &type, const String8 &subtype, uint64_t value) {
     Vector<MediaResource> resources;
-    resources.push_back(MediaResource(String8(type), value));
+    resources.push_back(MediaResource(type, subtype, value));
     mResourceManagerService->addResource(
             getCallingPid(), getId(mResourceManagerClient), mResourceManagerClient, resources);
 }
@@ -571,7 +613,8 @@
     Vector<MediaResource> resources;
     const char *type = (mFlags & kFlagIsSecure) ?
             kResourceSecureCodec : kResourceNonSecureCodec;
-    resources.push_back(MediaResource(String8(type), 1));
+    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
     // Don't know the buffer size at this point, but it's fine to use 1 because
     // the reclaimResource call doesn't consider the requester's buffer size for now.
     resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
@@ -1183,7 +1226,9 @@
                         mFlags &= ~kFlagIsSecure;
                         resourceType = String8(kResourceNonSecureCodec);
                     }
-                    addResource(resourceType, 1);
+
+                    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+                    addResource(resourceType, String8(subtype), 1);
 
                     (new AMessage)->postReply(mReplyID);
                     break;
@@ -1213,7 +1258,7 @@
                 {
                     // response to initiateCreateInputSurface()
                     status_t err = NO_ERROR;
-                    sp<AMessage> response = new AMessage();
+                    sp<AMessage> response = new AMessage;
                     if (!msg->findInt32("err", &err)) {
                         sp<RefBase> obj;
                         msg->findObject("input-surface", &obj);
@@ -1227,10 +1272,24 @@
                     break;
                 }
 
+                case CodecBase::kWhatInputSurfaceAccepted:
+                {
+                    // response to initiateUsePersistentInputSurface()
+                    status_t err = NO_ERROR;
+                    sp<AMessage> response = new AMessage();
+                    if (!msg->findInt32("err", &err)) {
+                        mHaveInputSurface = true;
+                    } else {
+                        response->setInt32("err", err);
+                    }
+                    response->postReply(mReplyID);
+                    break;
+                }
+
                 case CodecBase::kWhatSignaledInputEOS:
                 {
                     // response to signalEndOfInputStream()
-                    sp<AMessage> response = new AMessage();
+                    sp<AMessage> response = new AMessage;
                     status_t err;
                     if (msg->findInt32("err", &err)) {
                         response->setInt32("err", err);
@@ -1297,7 +1356,11 @@
                             // allocating input buffers, so this is a good
                             // indication that now all buffers are allocated.
                             if (mIsVideo) {
-                                addResource(kResourceGraphicMemory, getGraphicBufferSize());
+                                String8 subtype;
+                                addResource(
+                                        String8(kResourceGraphicMemory),
+                                        subtype,
+                                        getGraphicBufferSize());
                             }
                             setState(STARTED);
                             (new AMessage)->postReply(mReplyID);
@@ -1314,13 +1377,13 @@
                     ALOGV("codec output format changed");
 
                     if (mSoftRenderer == NULL &&
-                            mNativeWindow != NULL &&
+                            mSurface != NULL &&
                             (mFlags & kFlagUsesSoftwareRenderer)) {
                         AString mime;
                         CHECK(msg->findString("mime", &mime));
 
                         if (mime.startsWithIgnoreCase("video/")) {
-                            mSoftRenderer = new SoftwareRenderer(mNativeWindow);
+                            mSoftRenderer = new SoftwareRenderer(mSurface);
                         }
                     }
 
@@ -1592,26 +1655,25 @@
             }
 
             sp<RefBase> obj;
-            if (!msg->findObject("native-window", &obj)) {
-                obj.clear();
-            }
+            CHECK(msg->findObject("surface", &obj));
 
             sp<AMessage> format;
             CHECK(msg->findMessage("format", &format));
 
+            int32_t push;
+            if (msg->findInt32("push-blank-buffers-on-shutdown", &push) && push != 0) {
+                mFlags |= kFlagPushBlankBuffersOnShutdown;
+            }
+
             if (obj != NULL) {
                 format->setObject("native-window", obj);
-
-                status_t err = setNativeWindow(
-                    static_cast<NativeWindowWrapper *>(obj.get())
-                        ->getSurfaceTextureClient());
-
+                status_t err = handleSetSurface(static_cast<Surface *>(obj.get()));
                 if (err != OK) {
                     PostReplyWithError(replyID, err);
                     break;
                 }
             } else {
-                setNativeWindow(NULL);
+                handleSetSurface(NULL);
             }
 
             mReplyID = replyID;
@@ -1638,7 +1700,67 @@
             break;
         }
 
+        case kWhatSetSurface:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            status_t err = OK;
+            sp<Surface> surface;
+
+            switch (mState) {
+                case CONFIGURED:
+                case STARTED:
+                case FLUSHED:
+                {
+                    sp<RefBase> obj;
+                    (void)msg->findObject("surface", &obj);
+                    sp<Surface> surface = static_cast<Surface *>(obj.get());
+                    if (mSurface == NULL) {
+                        // do not support setting surface if it was not set
+                        err = INVALID_OPERATION;
+                    } else if (obj == NULL) {
+                        // do not support unsetting surface
+                        err = BAD_VALUE;
+                    } else {
+                        err = connectToSurface(surface);
+                        if (err == BAD_VALUE) {
+                            // assuming reconnecting to same surface
+                            // TODO: check if it is the same surface
+                            err = OK;
+                        } else {
+                            if (err == OK) {
+                                if (mFlags & kFlagUsesSoftwareRenderer) {
+                                    if (mSoftRenderer != NULL
+                                            && (mFlags & kFlagPushBlankBuffersOnShutdown)) {
+                                        pushBlankBuffersToNativeWindow(mSurface.get());
+                                    }
+                                    mSoftRenderer = new SoftwareRenderer(surface);
+                                    // TODO: check if this was successful
+                                } else {
+                                    err = mCodec->setSurface(surface);
+                                }
+                            }
+                            if (err == OK) {
+                                (void)disconnectFromSurface();
+                                mSurface = surface;
+                            }
+                        }
+                    }
+                    break;
+                }
+
+                default:
+                    err = INVALID_OPERATION;
+                    break;
+            }
+
+            PostReplyWithError(replyID, err);
+            break;
+        }
+
         case kWhatCreateInputSurface:
+        case kWhatUsePersistentInputSurface:
         {
             sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
@@ -1650,10 +1772,17 @@
             }
 
             mReplyID = replyID;
-            mCodec->initiateCreateInputSurface();
+            if (msg->what() == kWhatCreateInputSurface) {
+                mCodec->initiateCreateInputSurface();
+            } else {
+                sp<RefBase> obj;
+                CHECK(msg->findObject("input-surface", &obj));
+
+                mCodec->initiateUsePersistentInputSurface(
+                        static_cast<PersistentSurface *>(obj.get()));
+            }
             break;
         }
-
         case kWhatStart:
         {
             sp<AReplyToken> replyID;
@@ -1729,6 +1858,10 @@
                     msg->what() == kWhatStop /* keepComponentAllocated */);
 
             returnBuffersToCodec();
+
+            if (mSoftRenderer != NULL && (mFlags & kFlagPushBlankBuffersOnShutdown)) {
+                pushBlankBuffersToNativeWindow(mSurface.get());
+            }
             break;
         }
 
@@ -2094,7 +2227,7 @@
         mSoftRenderer = NULL;
 
         mCrypto.clear();
-        setNativeWindow(NULL);
+        handleSetSurface(NULL);
 
         mInputFormat.clear();
         mOutputFormat.clear();
@@ -2400,37 +2533,44 @@
     return index;
 }
 
-status_t MediaCodec::setNativeWindow(
-        const sp<Surface> &surfaceTextureClient) {
-    status_t err;
-
-    if (mNativeWindow != NULL) {
-        err = native_window_api_disconnect(
-                mNativeWindow.get(), NATIVE_WINDOW_API_MEDIA);
-
-        if (err != OK) {
-            ALOGW("native_window_api_disconnect returned an error: %s (%d)",
-                    strerror(-err), err);
+status_t MediaCodec::connectToSurface(const sp<Surface> &surface) {
+    status_t err = OK;
+    if (surface != NULL) {
+        err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+        if (err == BAD_VALUE) {
+            ALOGI("native window already connected. Assuming no change of surface");
+        } else if (err != OK) {
+            ALOGE("native_window_api_connect returned an error: %s (%d)", strerror(-err), err);
         }
-
-        mNativeWindow.clear();
     }
+    return err;
+}
 
-    if (surfaceTextureClient != NULL) {
-        err = native_window_api_connect(
-                surfaceTextureClient.get(), NATIVE_WINDOW_API_MEDIA);
-
+status_t MediaCodec::disconnectFromSurface() {
+    status_t err = OK;
+    if (mSurface != NULL) {
+        err = native_window_api_disconnect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
         if (err != OK) {
-            ALOGE("native_window_api_connect returned an error: %s (%d)",
-                    strerror(-err), err);
-
-            return err;
+            ALOGW("native_window_api_disconnect returned an error: %s (%d)", strerror(-err), err);
         }
-
-        mNativeWindow = surfaceTextureClient;
+        // assume disconnected even on error
+        mSurface.clear();
     }
+    return err;
+}
 
-    return OK;
+status_t MediaCodec::handleSetSurface(const sp<Surface> &surface) {
+    status_t err = OK;
+    if (mSurface != NULL) {
+        (void)disconnectFromSurface();
+    }
+    if (surface != NULL) {
+        err = connectToSurface(surface);
+        if (err == OK) {
+            mSurface = surface;
+        }
+    }
+    return err;
 }
 
 void MediaCodec::onInputBufferAvailable() {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 6a6f99d..e212fb8 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -77,6 +77,10 @@
                         infos.push_back(gCodecList->getCodecInfo(i));
                     }
                 }
+            } else {
+                // failure to initialize may be temporary. retry on next call.
+                delete gCodecList;
+                gCodecList = NULL;
             }
         }
     }
@@ -152,7 +156,7 @@
     OMXClient client;
     mInitCheck = client.connect();
     if (mInitCheck != OK) {
-        return;
+        return;  // this may fail if IMediaPlayerService is not available.
     }
     mOMX = client.interface();
     parseXMLFile(codecs_xml);
@@ -866,14 +870,16 @@
         return -EINVAL;
     }
 
-    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio: range
+    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio,
+    // measured-frame-rate, measured-blocks-per-second: range
     // quality: range + default + [scale]
     // complexity: range + default
     bool found;
 
     if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
             || name == "blocks-per-second" || name == "complexity"
-            || name == "frame-rate" || name == "quality" || name == "size") {
+            || name == "frame-rate" || name == "quality" || name == "size"
+            || name == "measured-blocks-per-second" || name == "measured-frame-rate") {
         AString min, max;
         if (msg->findString("min", &min) && msg->findString("max", &max)) {
             min.append("-");
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index b272448..9b57733 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -20,6 +20,7 @@
 
 #include <inttypes.h>
 
+#include <gui/IGraphicBufferConsumer.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
@@ -29,10 +30,11 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
-#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/PersistentSurface.h>
 #include <media/stagefright/Utils.h>
 
 namespace android {
@@ -258,9 +260,10 @@
         const sp<ALooper> &looper,
         const sp<AMessage> &format,
         const sp<MediaSource> &source,
+        const sp<IGraphicBufferConsumer> &consumer,
         uint32_t flags) {
     sp<MediaCodecSource> mediaSource =
-            new MediaCodecSource(looper, format, source, flags);
+            new MediaCodecSource(looper, format, source, consumer, flags);
 
     if (mediaSource->init() == OK) {
         return mediaSource;
@@ -328,6 +331,7 @@
         const sp<ALooper> &looper,
         const sp<AMessage> &outputFormat,
         const sp<MediaSource> &source,
+        const sp<IGraphicBufferConsumer> &consumer,
         uint32_t flags)
     : mLooper(looper),
       mOutputFormat(outputFormat),
@@ -337,6 +341,7 @@
       mStarted(false),
       mStopping(false),
       mDoMoreWorkPending(false),
+      mGraphicBufferConsumer(consumer),
       mFirstSampleTimeUs(-1ll),
       mEncoderReachedEOS(false),
       mErrorCode(OK) {
@@ -418,7 +423,15 @@
     if (mFlags & FLAG_USE_SURFACE_INPUT) {
         CHECK(mIsVideo);
 
-        err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+        if (mGraphicBufferConsumer != NULL) {
+            // When using persistent surface, we are only interested in the
+            // consumer, but have to use PersistentSurface as a wrapper to
+            // pass consumer over messages (similar to BufferProducerWrapper)
+            err = mEncoder->usePersistentInputSurface(
+                    new PersistentSurface(NULL, mGraphicBufferConsumer));
+        } else {
+            err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+        }
 
         if (err != OK) {
             return err;
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
index 8030a36..97264fb 100644
--- a/media/libstagefright/MediaSync.cpp
+++ b/media/libstagefright/MediaSync.cpp
@@ -56,6 +56,10 @@
         mPlaybackRate(0.0) {
     mMediaClock = new MediaClock;
 
+    // initialize settings
+    mPlaybackSettings = AUDIO_PLAYBACK_RATE_DEFAULT;
+    mPlaybackSettings.mSpeed = mPlaybackRate;
+
     mLooper = new ALooper;
     mLooper->setName("MediaSync");
     mLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
@@ -75,12 +79,17 @@
     }
 }
 
-status_t MediaSync::configureSurface(const sp<IGraphicBufferProducer> &output) {
+status_t MediaSync::setSurface(const sp<IGraphicBufferProducer> &output) {
     Mutex::Autolock lock(mMutex);
 
     // TODO: support suface change.
     if (mOutput != NULL) {
-        ALOGE("configureSurface: output surface has already been configured.");
+        ALOGE("setSurface: output surface has already been configured.");
+        return INVALID_OPERATION;
+    }
+
+    if (output == NULL && mSyncSettings.mSource == AVSYNC_SOURCE_VSYNC) {
+        ALOGE("setSurface: output surface is used as sync source and cannot be removed.");
         return INVALID_OPERATION;
     }
 
@@ -94,7 +103,7 @@
                             true /* producerControlledByApp */,
                             &queueBufferOutput);
         if (status != NO_ERROR) {
-            ALOGE("configureSurface: failed to connect (%d)", status);
+            ALOGE("setSurface: failed to connect (%d)", status);
             return status;
         }
 
@@ -105,25 +114,44 @@
 }
 
 // |audioTrack| is used only for querying information.
-status_t MediaSync::configureAudioTrack(
-        const sp<AudioTrack> &audioTrack, uint32_t nativeSampleRateInHz) {
+status_t MediaSync::setAudioTrack(const sp<AudioTrack> &audioTrack) {
     Mutex::Autolock lock(mMutex);
 
     // TODO: support audio track change.
     if (mAudioTrack != NULL) {
-        ALOGE("configureAudioTrack: audioTrack has already been configured.");
+        ALOGE("setAudioTrack: audioTrack has already been configured.");
         return INVALID_OPERATION;
     }
 
-    if (audioTrack != NULL && nativeSampleRateInHz <= 0) {
-        ALOGE("configureAudioTrack: native sample rate should be positive.");
-        return BAD_VALUE;
+    if (audioTrack == NULL && mSyncSettings.mSource == AVSYNC_SOURCE_AUDIO) {
+        ALOGE("setAudioTrack: audioTrack is used as sync source and cannot be removed.");
+        return INVALID_OPERATION;
     }
 
-    mAudioTrack = audioTrack;
-    mNativeSampleRateInHz = nativeSampleRateInHz;
+    if (audioTrack != NULL) {
+        // check if audio track supports the playback settings
+        if (mPlaybackSettings.mSpeed != 0.f
+                && audioTrack->setPlaybackRate(mPlaybackSettings) != OK) {
+            ALOGE("playback settings are not supported by the audio track");
+            return INVALID_OPERATION;
+        }
+        uint32_t nativeSampleRateInHz = audioTrack->getOriginalSampleRate();
+        if (nativeSampleRateInHz <= 0) {
+            ALOGE("setAudioTrack: native sample rate should be positive.");
+            return BAD_VALUE;
+        }
+        mAudioTrack = audioTrack;
+        mNativeSampleRateInHz = nativeSampleRateInHz;
+        (void)setPlaybackSettings_l(mPlaybackSettings);
+    }
+    else {
+        mAudioTrack = NULL;
+        mNativeSampleRateInHz = 0;
+    }
 
-    return NO_ERROR;
+    // potentially resync to new source
+    resync_l();
+    return OK;
 }
 
 status_t MediaSync::createInputSurface(
@@ -152,27 +180,37 @@
         bufferConsumer->consumerConnect(listener, false /* controlledByApp */);
     if (status == NO_ERROR) {
         bufferConsumer->setConsumerName(String8("MediaSync"));
+        // propagate usage bits from output surface
+        int usage = 0;
+        mOutput->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+        bufferConsumer->setConsumerUsageBits(usage);
         *outBufferProducer = bufferProducer;
         mInput = bufferConsumer;
     }
     return status;
 }
 
-status_t MediaSync::setPlaybackRate(float rate) {
-    if (rate < 0.0) {
-        return BAD_VALUE;
+void MediaSync::resync_l() {
+    AVSyncSource src = mSyncSettings.mSource;
+    if (src == AVSYNC_SOURCE_DEFAULT) {
+        if (mAudioTrack != NULL) {
+            src = AVSYNC_SOURCE_AUDIO;
+        } else {
+            src = AVSYNC_SOURCE_SYSTEM_CLOCK;
+        }
     }
 
-    Mutex::Autolock lock(mMutex);
+    // TODO: resync ourselves to the current clock (e.g. on sync source change)
+    updatePlaybackRate_l(mPlaybackRate);
+}
 
+void MediaSync::updatePlaybackRate_l(float rate) {
     if (rate > mPlaybackRate) {
         mNextBufferItemMediaUs = -1;
     }
     mPlaybackRate = rate;
     mMediaClock->setPlaybackRate(rate);
     onDrainVideo_l();
-
-    return OK;
 }
 
 sp<const MediaClock> MediaSync::getMediaClock() {
@@ -264,6 +302,95 @@
     mInput->setConsumerName(String8(name.c_str()));
 }
 
+status_t MediaSync::setVideoFrameRateHint(float rate) {
+    // ignored until we add the FrameScheduler
+    return rate >= 0.f ? OK : BAD_VALUE;
+}
+
+float MediaSync::getVideoFrameRate() {
+    // we don't know the frame rate
+    return -1.f;
+}
+
+status_t MediaSync::setSyncSettings(const AVSyncSettings &syncSettings) {
+    // validate settings
+    if (syncSettings.mSource >= AVSYNC_SOURCE_MAX
+            || syncSettings.mAudioAdjustMode >= AVSYNC_AUDIO_ADJUST_MODE_MAX
+            || syncSettings.mTolerance < 0.f
+            || syncSettings.mTolerance >= AVSYNC_TOLERANCE_MAX) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+
+    // verify that we have the sync source
+    switch (syncSettings.mSource) {
+        case AVSYNC_SOURCE_AUDIO:
+            if (mAudioTrack == NULL) {
+                ALOGE("setSyncSettings: audio sync source requires an audio track");
+                return BAD_VALUE;
+            }
+            break;
+        case AVSYNC_SOURCE_VSYNC:
+            if (mOutput == NULL) {
+                ALOGE("setSyncSettings: vsync sync source requires an output surface");
+                return BAD_VALUE;
+            }
+            break;
+        default:
+            break;
+    }
+
+    mSyncSettings = syncSettings;
+    resync_l();
+    return OK;
+}
+
+void MediaSync::getSyncSettings(AVSyncSettings *syncSettings) {
+    Mutex::Autolock lock(mMutex);
+    *syncSettings = mSyncSettings;
+}
+
+status_t MediaSync::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    Mutex::Autolock lock(mMutex);
+
+    status_t err = setPlaybackSettings_l(rate);
+    if (err == OK) {
+        // TODO: adjust rate if using VSYNC as source
+        updatePlaybackRate_l(rate.mSpeed);
+    }
+    return err;
+}
+
+status_t MediaSync::setPlaybackSettings_l(const AudioPlaybackRate &rate) {
+    if (rate.mSpeed < 0.f || rate.mPitch < 0.f) {
+        // We don't validate other audio settings.
+        // They will be validated when/if audiotrack is set.
+        return BAD_VALUE;
+    }
+
+    if (mAudioTrack != NULL) {
+        if (rate.mSpeed == 0.f) {
+            mAudioTrack->pause();
+        } else {
+            status_t err = mAudioTrack->setPlaybackRate(rate);
+            if (err != OK) {
+                return BAD_VALUE;
+            }
+
+            // ignore errors
+            (void)mAudioTrack->start();
+        }
+    }
+    mPlaybackSettings = rate;
+    return OK;
+}
+
+void MediaSync::getPlaybackSettings(AudioPlaybackRate *rate) {
+    Mutex::Autolock lock(mMutex);
+    *rate = mPlaybackSettings;
+}
+
 int64_t MediaSync::getRealTime(int64_t mediaTimeUs, int64_t nowUs) {
     int64_t realUs;
     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
@@ -430,7 +557,16 @@
         return;
     }
 
+    if (mBuffersFromInput.indexOfKey(bufferItem.mGraphicBuffer->getId()) >= 0) {
+        // Something is wrong since this buffer should be at our hands, bail.
+        mInput->consumerDisconnect();
+        onAbandoned_l(true /* isInput */);
+        return;
+    }
+    mBuffersFromInput.add(bufferItem.mGraphicBuffer->getId(), bufferItem.mGraphicBuffer);
+
     mBufferItems.push_back(bufferItem);
+
     if (mBufferItems.size() == 1) {
         onDrainVideo_l();
     }
@@ -497,9 +633,19 @@
 
 void MediaSync::returnBufferToInput_l(
         const sp<GraphicBuffer> &buffer, const sp<Fence> &fence) {
+    ssize_t ix = mBuffersFromInput.indexOfKey(buffer->getId());
+    if (ix < 0) {
+        // The buffer is unknown, something is wrong, bail.
+        mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
+        onAbandoned_l(false /* isInput */);
+        return;
+    }
+    sp<GraphicBuffer> oldBuffer = mBuffersFromInput.valueAt(ix);
+    mBuffersFromInput.removeItemsAt(ix);
+
     // Attach and release the buffer back to the input.
     int consumerSlot;
-    status_t status = mInput->attachBuffer(&consumerSlot, buffer);
+    status_t status = mInput->attachBuffer(&consumerSlot, oldBuffer);
     ALOGE_IF(status != NO_ERROR, "attaching buffer to input failed (%d)", status);
     if (status == NO_ERROR) {
         status = mInput->releaseBuffer(consumerSlot, 0 /* frameNumber */,
@@ -512,7 +658,7 @@
         return;
     }
 
-    ALOGV("released buffer %#llx to input", (long long)buffer->getId());
+    ALOGV("released buffer %#llx to input", (long long)oldBuffer->getId());
 
     // Notify any waiting onFrameAvailable calls.
     --mNumOutstandingBuffers;
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 230c1f7..44695ce 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -104,6 +104,14 @@
             node_id node, OMX_U32 port_index,
             sp<IGraphicBufferProducer> *bufferProducer);
 
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer);
+
+    virtual status_t usePersistentInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer);
+
     virtual status_t signalEndOfInputStream(node_id node);
 
     virtual status_t allocateBuffer(
@@ -340,6 +348,21 @@
     return err;
 }
 
+status_t MuxOMX::createPersistentInputSurface(
+        sp<IGraphicBufferProducer> *bufferProducer,
+        sp<IGraphicBufferConsumer> *bufferConsumer) {
+    // TODO: local or remote? Always use remote for now
+    return mRemoteOMX->createPersistentInputSurface(
+            bufferProducer, bufferConsumer);
+}
+
+status_t MuxOMX::usePersistentInputSurface(
+        node_id node, OMX_U32 port_index,
+        const sp<IGraphicBufferConsumer> &bufferConsumer) {
+    return getOMX(node)->usePersistentInputSurface(
+            node, port_index, bufferConsumer);
+}
+
 status_t MuxOMX::signalEndOfInputStream(node_id node) {
     return getOMX(node)->signalEndOfInputStream(node);
 }
@@ -400,10 +423,16 @@
     sp<IBinder> binder = sm->getService(String16("media.player"));
     sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
 
-    CHECK(service.get() != NULL);
+    if (service.get() == NULL) {
+        ALOGE("Cannot obtain IMediaPlayerService");
+        return NO_INIT;
+    }
 
     mOMX = service->getOMX();
-    CHECK(mOMX.get() != NULL);
+    if (mOMX.get() == NULL) {
+        ALOGE("Cannot obtain IOMX");
+        return NO_INIT;
+    }
 
     if (!mOMX->livesLocally(0 /* node */, getpid())) {
         ALOGI("Using client-side OMX mux.");
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 8d4bab8..aa6a7c0 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -43,6 +43,7 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/SurfaceUtils.h>
 #include <media/stagefright/Utils.h>
 #include <media/stagefright/SkipCutBuffer.h>
 #include <utils/Vector.h>
@@ -1783,35 +1784,6 @@
     return OK;
 }
 
-status_t OMXCodec::applyRotation() {
-    sp<MetaData> meta = mSource->getFormat();
-
-    int32_t rotationDegrees;
-    if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
-        rotationDegrees = 0;
-    }
-
-    uint32_t transform;
-    switch (rotationDegrees) {
-        case 0: transform = 0; break;
-        case 90: transform = HAL_TRANSFORM_ROT_90; break;
-        case 180: transform = HAL_TRANSFORM_ROT_180; break;
-        case 270: transform = HAL_TRANSFORM_ROT_270; break;
-        default: transform = 0; break;
-    }
-
-    status_t err = OK;
-
-    if (transform) {
-        err = native_window_set_buffers_transform(
-                mNativeWindow.get(), transform);
-        ALOGE("native_window_set_buffers_transform failed: %s (%d)",
-                strerror(-err), -err);
-    }
-
-    return err;
-}
-
 status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
     // Get the number of buffers needed.
     OMX_PARAM_PORTDEFINITIONTYPE def;
@@ -1825,30 +1797,11 @@
         return err;
     }
 
-    err = native_window_set_buffers_dimensions(
-            mNativeWindow.get(),
-            def.format.video.nFrameWidth,
-            def.format.video.nFrameHeight);
+    sp<MetaData> meta = mSource->getFormat();
 
-    if (err != 0) {
-        ALOGE("native_window_set_buffers_dimensions failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_set_buffers_format(
-            mNativeWindow.get(),
-            def.format.video.eColorFormat);
-
-    if (err != 0) {
-        ALOGE("native_window_set_buffers_format failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = applyRotation();
-    if (err != OK) {
-        return err;
+    int32_t rotationDegrees;
+    if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
+        rotationDegrees = 0;
     }
 
     // Set up the native window.
@@ -1859,34 +1812,19 @@
         // XXX: Currently this error is logged, but not fatal.
         usage = 0;
     }
+
     if (mFlags & kEnableGrallocUsageProtected) {
         usage |= GRALLOC_USAGE_PROTECTED;
     }
 
-    // Make sure to check whether either Stagefright or the video decoder
-    // requested protected buffers.
-    if (usage & GRALLOC_USAGE_PROTECTED) {
-        // Verify that the ANativeWindow sends images directly to
-        // SurfaceFlinger.
-        int queuesToNativeWindow = 0;
-        err = mNativeWindow->query(
-                mNativeWindow.get(), NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER,
-                &queuesToNativeWindow);
-        if (err != 0) {
-            ALOGE("error authenticating native window: %d", err);
-            return err;
-        }
-        if (queuesToNativeWindow != 1) {
-            ALOGE("native window could not be authenticated");
-            return PERMISSION_DENIED;
-        }
-    }
-
-    ALOGV("native_window_set_usage usage=0x%x", usage);
-    err = native_window_set_usage(
-            mNativeWindow.get(), usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
+    err = setNativeWindowSizeFormatAndUsage(
+            mNativeWindow.get(),
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            def.format.video.eColorFormat,
+            rotationDegrees,
+            usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
     if (err != 0) {
-        ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
         return err;
     }
 
@@ -2053,156 +1991,6 @@
     return bufInfo;
 }
 
-status_t OMXCodec::pushBlankBuffersToNativeWindow() {
-    status_t err = NO_ERROR;
-    ANativeWindowBuffer* anb = NULL;
-    int numBufs = 0;
-    int minUndequeuedBufs = 0;
-
-    // We need to reconnect to the ANativeWindow as a CPU client to ensure that
-    // no frames get dropped by SurfaceFlinger assuming that these are video
-    // frames.
-    err = native_window_api_disconnect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_MEDIA);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_api_connect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_CPU);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_set_buffers_dimensions(mNativeWindow.get(), 1, 1);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffers_dimensions failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_buffers_format(mNativeWindow.get(), HAL_PIXEL_FORMAT_RGBX_8888);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffers_format failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_usage(mNativeWindow.get(),
-            GRALLOC_USAGE_SW_WRITE_OFTEN);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_usage failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_scaling_mode(mNativeWindow.get(),
-            NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-    if (err != OK) {
-        ALOGE("error pushing blank frames: set_scaling_mode failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = mNativeWindow->query(mNativeWindow.get(),
-            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
-                "failed: %s (%d)", strerror(-err), -err);
-        goto error;
-    }
-
-    numBufs = minUndequeuedBufs + 1;
-    err = native_window_set_buffer_count(mNativeWindow.get(), numBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    // We  push numBufs + 1 buffers to ensure that we've drawn into the same
-    // buffer twice.  This should guarantee that the buffer has been displayed
-    // on the screen and then been replaced, so an previous video frames are
-    // guaranteed NOT to be currently displayed.
-    for (int i = 0; i < numBufs + 1; i++) {
-        err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &anb);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: dequeueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
-
-        // Fill the buffer with the a 1x1 checkerboard pattern ;)
-        uint32_t* img = NULL;
-        err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: lock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        *img = 0;
-
-        err = buf->unlock();
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: unlock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        err = mNativeWindow->queueBuffer(mNativeWindow.get(),
-                buf->getNativeBuffer(), -1);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: queueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        anb = NULL;
-    }
-
-error:
-
-    if (err != NO_ERROR) {
-        // Clean up after an error.
-        if (anb != NULL) {
-            mNativeWindow->cancelBuffer(mNativeWindow.get(), anb, -1);
-        }
-
-        native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-
-        return err;
-    } else {
-        // Clean up after success.
-        err = native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        err = native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        return NO_ERROR;
-    }
-}
-
 int64_t OMXCodec::getDecodingTimeUs() {
     CHECK(mIsEncoder && mIsVideo);
 
@@ -2784,7 +2572,7 @@
                     // them has made it to the display.  This allows the OMX
                     // component teardown to zero out any protected buffers
                     // without the risk of scanning out one of those buffers.
-                    pushBlankBuffersToNativeWindow();
+                    pushBlankBuffersToNativeWindow(mNativeWindow.get());
                 }
 
                 setState(IDLE_TO_LOADED);
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index d577034..4297549 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -753,7 +753,9 @@
     oggpack_buffer bits;
     oggpack_readinit(&bits, &ref);
 
-    CHECK_EQ(oggpack_read(&bits, 8), type);
+    if (oggpack_read(&bits, 8) != type) {
+        return ERROR_MALFORMED;
+    }
     for (size_t i = 0; i < 6; ++i) {
         oggpack_read(&bits, 8);  // skip 'vorbis'
     }
@@ -761,7 +763,9 @@
     switch (type) {
         case 1:
         {
-            CHECK_EQ(0, _vorbis_unpack_info(&mVi, &bits));
+            if (0 != _vorbis_unpack_info(&mVi, &bits)) {
+                return ERROR_MALFORMED;
+            }
 
             mMeta->setData(kKeyVorbisInfo, 0, data, size);
             mMeta->setInt32(kKeySampleRate, mVi.rate);
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
new file mode 100644
index 0000000..6b62e43
--- /dev/null
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SurfaceUtils"
+#include <utils/Log.h>
+
+#include <media/stagefright/SurfaceUtils.h>
+
+#include <gui/Surface.h>
+
+namespace android {
+
+status_t setNativeWindowSizeFormatAndUsage(
+        ANativeWindow *nativeWindow /* nonnull */,
+        int width, int height, int format, int rotation, int usage) {
+    status_t err = native_window_set_buffers_dimensions(nativeWindow, width, height);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_buffers_dimensions failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    err = native_window_set_buffers_format(nativeWindow, format);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_buffers_format failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    int transform = 0;
+    if ((rotation % 90) == 0) {
+        switch ((rotation / 90) & 3) {
+            case 1:  transform = HAL_TRANSFORM_ROT_90;  break;
+            case 2:  transform = HAL_TRANSFORM_ROT_180; break;
+            case 3:  transform = HAL_TRANSFORM_ROT_270; break;
+            default: transform = 0;                     break;
+        }
+    }
+
+    err = native_window_set_buffers_transform(nativeWindow, transform);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_buffers_transform failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    // Make sure to check whether either Stagefright or the video decoder
+    // requested protected buffers.
+    if (usage & GRALLOC_USAGE_PROTECTED) {
+        // Verify that the ANativeWindow sends images directly to
+        // SurfaceFlinger.
+        int queuesToNativeWindow = 0;
+        err = nativeWindow->query(
+                nativeWindow, NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER, &queuesToNativeWindow);
+        if (err != NO_ERROR) {
+            ALOGE("error authenticating native window: %s (%d)", strerror(-err), -err);
+            return err;
+        }
+        if (queuesToNativeWindow != 1) {
+            ALOGE("native window could not be authenticated");
+            return PERMISSION_DENIED;
+        }
+    }
+
+    int consumerUsage = 0;
+    err = nativeWindow->query(nativeWindow, NATIVE_WINDOW_CONSUMER_USAGE_BITS, &consumerUsage);
+    if (err != NO_ERROR) {
+        ALOGW("failed to get consumer usage bits. ignoring");
+        err = NO_ERROR;
+    }
+
+    int finalUsage = usage | consumerUsage;
+    ALOGV("gralloc usage: %#x(producer) + %#x(consumer) = %#x", usage, consumerUsage, finalUsage);
+    err = native_window_set_usage(nativeWindow, finalUsage);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    err = native_window_set_scaling_mode(
+            nativeWindow, NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_scaling_mode failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    ALOGD("set up nativeWindow %p for %dx%d, color %#x, rotation %d, usage %#x",
+            nativeWindow, width, height, format, rotation, finalUsage);
+    return NO_ERROR;
+}
+
+status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */) {
+    status_t err = NO_ERROR;
+    ANativeWindowBuffer* anb = NULL;
+    int numBufs = 0;
+    int minUndequeuedBufs = 0;
+
+    // We need to reconnect to the ANativeWindow as a CPU client to ensure that
+    // no frames get dropped by SurfaceFlinger assuming that these are video
+    // frames.
+    err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_CPU);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
+        (void)native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+        return err;
+    }
+
+    err = setNativeWindowSizeFormatAndUsage(
+            nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN);
+    if (err != NO_ERROR) {
+        goto error;
+    }
+
+    static_cast<Surface*>(nativeWindow)->getIGraphicBufferProducer()->allowAllocation(true);
+
+    err = nativeWindow->query(nativeWindow,
+            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
+                "failed: %s (%d)", strerror(-err), -err);
+        goto error;
+    }
+
+    numBufs = minUndequeuedBufs + 1;
+    err = native_window_set_buffer_count(nativeWindow, numBufs);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)", strerror(-err), -err);
+        goto error;
+    }
+
+    // We push numBufs + 1 buffers to ensure that we've drawn into the same
+    // buffer twice.  This should guarantee that the buffer has been displayed
+    // on the screen and then been replaced, so an previous video frames are
+    // guaranteed NOT to be currently displayed.
+    for (int i = 0; i < numBufs + 1; i++) {
+        err = native_window_dequeue_buffer_and_wait(nativeWindow, &anb);
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: dequeueBuffer failed: %s (%d)",
+                    strerror(-err), -err);
+            break;
+        }
+
+        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+
+        // Fill the buffer with the a 1x1 checkerboard pattern ;)
+        uint32_t *img = NULL;
+        err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: lock failed: %s (%d)", strerror(-err), -err);
+            break;
+        }
+
+        *img = 0;
+
+        err = buf->unlock();
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: unlock failed: %s (%d)", strerror(-err), -err);
+            break;
+        }
+
+        err = nativeWindow->queueBuffer(nativeWindow, buf->getNativeBuffer(), -1);
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: queueBuffer failed: %s (%d)", strerror(-err), -err);
+            break;
+        }
+
+        anb = NULL;
+    }
+
+error:
+
+    if (anb != NULL) {
+        nativeWindow->cancelBuffer(nativeWindow, anb, -1);
+        anb = NULL;
+    }
+
+    // Clean up after success or error.
+    status_t err2 = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_CPU);
+    if (err2 != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err2), -err2);
+        if (err == NO_ERROR) {
+            err = err2;
+        }
+    }
+
+    err2 = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+    if (err2 != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
+        if (err == NO_ERROR) {
+            err = err2;
+        }
+    }
+
+    return err;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 0d8e64a..413628d 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -181,6 +181,11 @@
         msg->setInt32("rotation-degrees", rotationDegrees);
     }
 
+    int32_t fps;
+    if (meta->findInt32(kKeyFrameRate, &fps)) {
+        msg->setInt32("frame-rate", fps);
+    }
+
     uint32_t type;
     const void *data;
     size_t size;
@@ -588,6 +593,11 @@
         meta->setInt32(kKeyMaxHeight, maxHeight);
     }
 
+    int32_t fps;
+    if (msg->findInt32("frame-rate", &fps)) {
+        meta->setInt32(kKeyFrameRate, fps);
+    }
+
     // reassemble the csd data into its original form
     sp<ABuffer> csd0;
     if (msg->findBuffer("csd-0", &csd0)) {
@@ -891,5 +901,39 @@
             || (t0.mSeq == t1.mSeq && t0.mTimeUs < t1.mTimeUs);
 }
 
+void writeToAMessage(sp<AMessage> msg, const AudioPlaybackRate &rate) {
+    msg->setFloat("speed", rate.mSpeed);
+    msg->setFloat("pitch", rate.mPitch);
+    msg->setInt32("audio-fallback-mode", rate.mFallbackMode);
+    msg->setInt32("audio-stretch-mode", rate.mStretchMode);
+}
+
+void readFromAMessage(const sp<AMessage> &msg, AudioPlaybackRate *rate /* nonnull */) {
+    *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+    CHECK(msg->findFloat("speed", &rate->mSpeed));
+    CHECK(msg->findFloat("pitch", &rate->mPitch));
+    CHECK(msg->findInt32("audio-fallback-mode", (int32_t *)&rate->mFallbackMode));
+    CHECK(msg->findInt32("audio-stretch-mode", (int32_t *)&rate->mStretchMode));
+}
+
+void writeToAMessage(sp<AMessage> msg, const AVSyncSettings &sync, float videoFpsHint) {
+    msg->setInt32("sync-source", sync.mSource);
+    msg->setInt32("audio-adjust-mode", sync.mAudioAdjustMode);
+    msg->setFloat("tolerance", sync.mTolerance);
+    msg->setFloat("video-fps", videoFpsHint);
+}
+
+void readFromAMessage(
+        const sp<AMessage> &msg,
+        AVSyncSettings *sync /* nonnull */,
+        float *videoFps /* nonnull */) {
+    AVSyncSettings settings;
+    CHECK(msg->findInt32("sync-source", (int32_t *)&settings.mSource));
+    CHECK(msg->findInt32("audio-adjust-mode", (int32_t *)&settings.mAudioAdjustMode));
+    CHECK(msg->findFloat("tolerance", &settings.mTolerance));
+    CHECK(msg->findFloat("video-fps", videoFps));
+    *sync = settings;
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
old mode 100644
new mode 100755
index 06b2163..6afac74
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -625,7 +625,7 @@
         return errType;
     }
 
-    mStride = ALIGN16(mWidth);
+    mStride = mWidth;
 
     if (mInputDataIsMeta) {
         if (mConversionBuffer) {
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 970acf3..e654843 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -661,7 +661,8 @@
         BufferInfo *outputBufferInfo = *outputBufferInfoQueue.begin();
         OMX_BUFFERHEADERTYPE *outputBufferHeader = outputBufferInfo->mHeader;
 
-        if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+        if ((inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) &&
+                inputBufferHeader->nFilledLen == 0) {
             inputBufferInfoQueue.erase(inputBufferInfoQueue.begin());
             inputBufferInfo->mOwnedByUs = false;
             notifyEmptyBufferDone(inputBufferHeader);
@@ -762,6 +763,9 @@
                        encoded_packet->data.frame.sz);
                 outputBufferInfo->mOwnedByUs = false;
                 outputBufferInfoQueue.erase(outputBufferInfoQueue.begin());
+                if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                    outputBufferHeader->nFlags |= OMX_BUFFERFLAG_EOS;
+                }
                 notifyFillBufferDone(outputBufferHeader);
             }
         }
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 8f356b6..c559682 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -364,7 +364,7 @@
         } else {
             numFrames = vorbis_dsp_pcmout(
                     mState, (int16_t *)outHeader->pBuffer,
-                    kMaxNumSamplesPerBuffer);
+                    (kMaxNumSamplesPerBuffer / mVi->channels));
 
             if (numFrames < 0) {
                 ALOGE("vorbis_dsp_pcmout returned %d", numFrames);
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
index ecbda36..fa9d630 100644
--- a/media/libstagefright/filters/MediaFilter.cpp
+++ b/media/libstagefright/filters/MediaFilter.cpp
@@ -76,6 +76,11 @@
     (new AMessage(kWhatCreateInputSurface, this))->post();
 }
 
+void MediaFilter::initiateUsePersistentInputSurface(
+        const sp<PersistentSurface> & /* surface */) {
+    ALOGW("initiateUsePersistentInputSurface() unsupported");
+}
+
 void MediaFilter::initiateStart() {
     (new AMessage(kWhatStart, this))->post();
 }
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index bb89567..2d9b3d4 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -30,12 +30,11 @@
 namespace android {
 
 MediaHTTP::MediaHTTP(const sp<IMediaHTTPConnection> &conn)
-    : mInitCheck(NO_INIT),
+    : mInitCheck((conn != NULL) ? OK : NO_INIT),
       mHTTPConnection(conn),
       mCachedSizeValid(false),
       mCachedSize(0ll),
       mDrmManagerClient(NULL) {
-    mInitCheck = OK;
 }
 
 MediaHTTP::~MediaHTTP() {
@@ -171,6 +170,10 @@
 }
 
 String8 MediaHTTP::getUri() {
+    if (mInitCheck != OK) {
+        return String8::empty();
+    }
+
     String8 uri;
     if (OK == mHTTPConnection->getUri(&uri)) {
         return uri;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index d8c38e7..64a8532 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1503,11 +1503,10 @@
             ALOGV("discarding fetcher-%d", fetcher->getFetcherID());
             fetcher->stopAsync();
         } else {
-            float threshold = -1.0f; // always finish fetching by default
+            float threshold = 0.0f; // default to pause after current block (47Kbytes)
             bool disconnect = false;
             if (timeUs >= 0ll) {
                 // seeking, no need to finish fetching
-                threshold = 0.0f;
                 disconnect = true;
             } else if (delayRemoval) {
                 // adapting, abort if remaining of current segment is over threshold
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 53087b6..5a0deec 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -1424,11 +1424,17 @@
 
     int64_t minDiffUs, maxDiffUs;
     if (mSeekMode == LiveSession::kSeekModeNextSample) {
+        // if the previous fetcher paused in the middle of a segment, we
+        // want to start at a segment that overlaps the last sample
         minDiffUs = -mPlaylist->getTargetDuration();
         maxDiffUs = 0ll;
     } else {
+        // if the previous fetcher paused at the end of a segment, ideally
+        // we want to start at the segment that's roughly aligned with its
+        // next segment, but if the two variants are not well aligned we
+        // adjust the diff to within (-T/2, T/2)
         minDiffUs = -mPlaylist->getTargetDuration() / 2;
-        maxDiffUs = mPlaylist->getTargetDuration();
+        maxDiffUs = mPlaylist->getTargetDuration() / 2;
     }
 
     int32_t oldSeqNumber = mSeqNumber;
@@ -1611,6 +1617,9 @@
                 ALOGE("MPEG2 Transport streams do not contain subtitles.");
                 return ERROR_MALFORMED;
             }
+            if (stream == LiveSession::STREAMTYPE_METADATA) {
+                continue;
+            }
             ATSParser::SourceType type =LiveSession::getSourceTypeForStream(stream);
             sp<AnotherPacketSource> source =
                 static_cast<AnotherPacketSource *>(
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 8bba804..758b2c9 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -21,6 +21,7 @@
 #include "HTTPBase.h"
 #include "TimedEventQueue.h"
 
+#include <media/AudioResamplerPublic.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/OMXClient.h>
@@ -93,6 +94,8 @@
 
     status_t setParameter(int key, const Parcel &request);
     status_t getParameter(int key, Parcel *reply);
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
     status_t invoke(const Parcel &request, Parcel *reply);
     status_t setCacheStatCollectFreq(const Parcel &request);
 
@@ -180,6 +183,7 @@
     sp<MediaSource> mOmxSource;
     sp<MediaSource> mAudioSource;
     AudioPlayer *mAudioPlayer;
+    AudioPlaybackRate mPlaybackSettings;
     int64_t mDurationUs;
 
     int32_t mDisplayWidth;
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index b5487fa..b1ee628 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -95,6 +95,14 @@
             node_id node, OMX_U32 port_index,
             sp<IGraphicBufferProducer> *bufferProducer);
 
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer);
+
+    virtual status_t usePersistentInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer);
+
     virtual status_t signalEndOfInputStream(node_id node);
 
     virtual status_t allocateBuffer(
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index d87b408..03c9a8a 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -29,6 +29,8 @@
 struct OMXMaster;
 class GraphicBufferSource;
 
+status_t StatusFromOMXError(OMX_ERRORTYPE err);
+
 struct OMXNodeInstance {
     OMXNodeInstance(
             OMX *owner, const sp<IOMXObserver> &observer, const char *name);
@@ -81,6 +83,13 @@
     status_t createInputSurface(
             OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer);
 
+    static status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer);
+
+    status_t usePersistentInputSurface(
+            OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer);
+
     status_t signalEndOfInputStream();
 
     status_t allocateBuffer(
@@ -202,6 +211,8 @@
             OMX_BUFFERHEADERTYPE *header,
             OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr);
 
+    status_t createGraphicBufferSource(
+            OMX_U32 portIndex, sp<IGraphicBufferConsumer> consumer = NULL);
     sp<GraphicBufferSource> getGraphicBufferSource();
     void setGraphicBufferSource(const sp<GraphicBufferSource>& bufferSource);
 
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index ddca437..70d2c69 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -925,6 +925,11 @@
         ALOGV("codec id = %s", codecID);
         ALOGV("codec name = %s", track->GetCodecNameAsUTF8());
 
+        if (codecID == NULL) {
+            ALOGW("unknown codecID is not supported.");
+            continue;
+        }
+
         size_t codecPrivateSize;
         const unsigned char *codecPrivate =
             track->GetCodecPrivate(codecPrivateSize);
@@ -941,10 +946,7 @@
                 const mkvparser::VideoTrack *vtrack =
                     static_cast<const mkvparser::VideoTrack *>(track);
 
-                if (codecID == NULL) {
-                    ALOGW("unknown codecID is not supported.");
-                    continue;
-                } else if (!strcmp("V_MPEG4/ISO/AVC", codecID)) {
+                if (!strcmp("V_MPEG4/ISO/AVC", codecID)) {
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
                     meta->setData(kKeyAVCC, 0, codecPrivate, codecPrivateSize);
                 } else if (!strcmp("V_MPEG4/ISO/ASP", codecID)) {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5411821..0d071b2 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -302,9 +302,13 @@
                 // The two checks below shouldn't happen,
                 // we already checked above the stream count matches
                 ssize_t index = newType2PIDs.indexOfKey(temp[i]->type());
-                CHECK(index >= 0);
+                if (index < 0) {
+                    return false;
+                }
                 Vector<int32_t> &newPIDs = newType2PIDs.editValueAt(index);
-                CHECK(newPIDs.size() > 0);
+                if (newPIDs.isEmpty()) {
+                    return false;
+                }
 
                 // get the next PID for temp[i]->type() in the new PID map
                 Vector<int32_t>::iterator it = newPIDs.begin();
@@ -335,13 +339,11 @@
         return ERROR_MALFORMED;
     }
 
-    CHECK_EQ(br->getBits(1), 0u);
+    br->skipBits(1);  // '0'
     MY_LOGV("  reserved = %u", br->getBits(2));
 
     unsigned section_length = br->getBits(12);
     ALOGV("  section_length = %u", section_length);
-    CHECK_EQ(section_length & 0xc00, 0u);
-    CHECK_LE(section_length, 1021u);
 
     MY_LOGV("  program_number = %u", br->getBits(16));
     MY_LOGV("  reserved = %u", br->getBits(2));
@@ -358,7 +360,6 @@
 
     unsigned program_info_length = br->getBits(12);
     ALOGV("  program_info_length = %u", program_info_length);
-    CHECK_EQ(program_info_length & 0xc00, 0u);
 
     br->skipBits(program_info_length * 8);  // skip descriptors
 
@@ -369,8 +370,7 @@
     // final CRC.
     size_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
 
-    while (infoBytesRemaining > 0) {
-        CHECK_GE(infoBytesRemaining, 5u);
+    while (infoBytesRemaining >= 5) {
 
         unsigned streamType = br->getBits(8);
         ALOGV("    stream_type = 0x%02x", streamType);
@@ -384,9 +384,6 @@
 
         unsigned ES_info_length = br->getBits(12);
         ALOGV("    ES_info_length = %u", ES_info_length);
-        CHECK_EQ(ES_info_length & 0xc00, 0u);
-
-        CHECK_GE(infoBytesRemaining - 5, ES_info_length);
 
 #if 0
         br->skipBits(ES_info_length * 8);  // skip descriptors
@@ -398,13 +395,13 @@
             unsigned descLength = br->getBits(8);
             ALOGV("      len = %u", descLength);
 
-            CHECK_GE(info_bytes_remaining, 2 + descLength);
-
+            if (info_bytes_remaining < descLength) {
+                return ERROR_MALFORMED;
+            }
             br->skipBits(descLength * 8);
 
             info_bytes_remaining -= descLength + 2;
         }
-        CHECK_EQ(info_bytes_remaining, 0u);
 #endif
 
         StreamInfo info;
@@ -415,7 +412,9 @@
         infoBytesRemaining -= 5 + ES_info_length;
     }
 
-    CHECK_EQ(infoBytesRemaining, 0u);
+    if (infoBytesRemaining != 0) {
+        ALOGW("Section data remains unconsumed");
+    }
     MY_LOGV("  CRC = 0x%08x", br->getBits(32));
 
     bool PIDsChanged = false;
@@ -680,7 +679,10 @@
     }
 
     size_t payloadSizeBits = br->numBitsLeft();
-    CHECK_EQ(payloadSizeBits % 8, 0u);
+    if (payloadSizeBits % 8 != 0u) {
+        ALOGE("Wrong value");
+        return BAD_VALUE;
+    }
 
     size_t neededSize = mBuffer->size() + payloadSizeBits / 8;
     if (mBuffer->capacity() < neededSize) {
@@ -797,8 +799,6 @@
         return ERROR_MALFORMED;
     }
 
-    CHECK_EQ(packet_startcode_prefix, 0x000001u);
-
     unsigned stream_id = br->getBits(8);
     ALOGV("stream_id = 0x%02x", stream_id);
 
@@ -813,7 +813,9 @@
             && stream_id != 0xff  // program_stream_directory
             && stream_id != 0xf2  // DSMCC
             && stream_id != 0xf8) {  // H.222.1 type E
-        CHECK_EQ(br->getBits(2), 2u);
+        if (br->getBits(2) != 2u) {
+            return ERROR_MALFORMED;
+        }
 
         MY_LOGV("PES_scrambling_control = %u", br->getBits(2));
         MY_LOGV("PES_priority = %u", br->getBits(1));
@@ -847,34 +849,51 @@
         uint64_t PTS = 0, DTS = 0;
 
         if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
-            CHECK_GE(optional_bytes_remaining, 5u);
+            if (optional_bytes_remaining < 5u) {
+                return ERROR_MALFORMED;
+            }
 
             if (br->getBits(4) != PTS_DTS_flags) {
-                ALOGE("PES data Error!");
                 return ERROR_MALFORMED;
             }
             PTS = ((uint64_t)br->getBits(3)) << 30;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= ((uint64_t)br->getBits(15)) << 15;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= br->getBits(15);
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("PTS = 0x%016" PRIx64 " (%.2f)", PTS, PTS / 90000.0);
 
             optional_bytes_remaining -= 5;
 
             if (PTS_DTS_flags == 3) {
-                CHECK_GE(optional_bytes_remaining, 5u);
+                if (optional_bytes_remaining < 5u) {
+                    return ERROR_MALFORMED;
+                }
 
-                CHECK_EQ(br->getBits(4), 1u);
+                if (br->getBits(4) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 DTS = ((uint64_t)br->getBits(3)) << 30;
-                CHECK_EQ(br->getBits(1), 1u);
+                if (br->getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= ((uint64_t)br->getBits(15)) << 15;
-                CHECK_EQ(br->getBits(1), 1u);
+                if (br->getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= br->getBits(15);
-                CHECK_EQ(br->getBits(1), 1u);
+                if (br->getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 ALOGV("DTS = %" PRIu64, DTS);
 
@@ -883,31 +902,47 @@
         }
 
         if (ESCR_flag) {
-            CHECK_GE(optional_bytes_remaining, 6u);
+            if (optional_bytes_remaining < 6u) {
+                return ERROR_MALFORMED;
+            }
 
             br->getBits(2);
 
             uint64_t ESCR = ((uint64_t)br->getBits(3)) << 30;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= ((uint64_t)br->getBits(15)) << 15;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= br->getBits(15);
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("ESCR = %" PRIu64, ESCR);
             MY_LOGV("ESCR_extension = %u", br->getBits(9));
 
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 6;
         }
 
         if (ES_rate_flag) {
-            CHECK_GE(optional_bytes_remaining, 3u);
+            if (optional_bytes_remaining < 3u) {
+                return ERROR_MALFORMED;
+            }
 
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             MY_LOGV("ES_rate = %u", br->getBits(22));
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 3;
         }
@@ -917,7 +952,9 @@
         // ES data follows.
 
         if (PES_packet_length != 0) {
-            CHECK_GE(PES_packet_length, PES_header_data_length + 3);
+            if (PES_packet_length < PES_header_data_length + 3) {
+                return ERROR_MALFORMED;
+            }
 
             unsigned dataLength =
                 PES_packet_length - 3 - PES_header_data_length;
@@ -930,7 +967,9 @@
                 return ERROR_MALFORMED;
             }
 
-            CHECK_GE(br->numBitsLeft(), dataLength * 8);
+            if (br->numBitsLeft() < dataLength * 8) {
+                return ERROR_MALFORMED;
+            }
 
             onPayloadData(
                     PTS_DTS_flags, PTS, DTS, br->data(), dataLength);
@@ -942,15 +981,21 @@
                     br->data(), br->numBitsLeft() / 8);
 
             size_t payloadSizeBits = br->numBitsLeft();
-            CHECK_EQ(payloadSizeBits % 8, 0u);
+            if (payloadSizeBits % 8 != 0u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("There's %zu bytes of payload.", payloadSizeBits / 8);
         }
     } else if (stream_id == 0xbe) {  // padding_stream
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br->skipBits(PES_packet_length * 8);
     } else {
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br->skipBits(PES_packet_length * 8);
     }
 
@@ -1082,7 +1127,10 @@
 }
 
 status_t ATSParser::feedTSPacket(const void *data, size_t size) {
-    CHECK_EQ(size, kTSPacketSize);
+    if (size != kTSPacketSize) {
+        ALOGE("Wrong TS packet size");
+        return BAD_VALUE;
+    }
 
     ABitReader br((const uint8_t *)data, kTSPacketSize);
     return parseTS(&br);
@@ -1108,14 +1156,23 @@
         }
     } else if (type == DISCONTINUITY_ABSOLUTE_TIME) {
         int64_t timeUs;
-        CHECK(extra->findInt64("timeUs", &timeUs));
+        if (!extra->findInt64("timeUs", &timeUs)) {
+            ALOGE("timeUs not found");
+            return;
+        }
 
-        CHECK(mPrograms.empty());
+        if (!mPrograms.empty()) {
+            ALOGE("mPrograms is not empty");
+            return;
+        }
         mAbsoluteTimeAnchorUs = timeUs;
         return;
     } else if (type == DISCONTINUITY_TIME_OFFSET) {
         int64_t offset;
-        CHECK(extra->findInt64("offset", &offset));
+        if (!extra->findInt64("offset", &offset)) {
+            ALOGE("offset not found");
+            return;
+        }
 
         mTimeOffsetValid = true;
         mTimeOffsetUs = offset;
@@ -1128,7 +1185,10 @@
 }
 
 void ATSParser::signalEOS(status_t finalResult) {
-    CHECK_NE(finalResult, (status_t)OK);
+    if (finalResult == (status_t) OK) {
+        ALOGE("finalResult not OK");
+        return;
+    }
 
     for (size_t i = 0; i < mPrograms.size(); ++i) {
         mPrograms.editItemAt(i)->signalEOS(finalResult);
@@ -1144,14 +1204,12 @@
     }
     unsigned section_syntax_indictor = br->getBits(1);
     ALOGV("  section_syntax_indictor = %u", section_syntax_indictor);
-    CHECK_EQ(section_syntax_indictor, 1u);
 
-    CHECK_EQ(br->getBits(1), 0u);
+    br->skipBits(1);  // '0'
     MY_LOGV("  reserved = %u", br->getBits(2));
 
     unsigned section_length = br->getBits(12);
     ALOGV("  section_length = %u", section_length);
-    CHECK_EQ(section_length & 0xc00, 0u);
 
     MY_LOGV("  transport_stream_id = %u", br->getBits(16));
     MY_LOGV("  reserved = %u", br->getBits(2));
@@ -1161,7 +1219,6 @@
     MY_LOGV("  last_section_number = %u", br->getBits(8));
 
     size_t numProgramBytes = (section_length - 5 /* header */ - 4 /* crc */);
-    CHECK_EQ((numProgramBytes % 4), 0u);
 
     for (size_t i = 0; i < numProgramBytes / 4; ++i) {
         unsigned program_number = br->getBits(16);
@@ -1221,7 +1278,9 @@
             br->skipBits(skip * 8);
         }
 
-        CHECK((br->numBitsLeft() % 8) == 0);
+        if (br->numBitsLeft() % 8 != 0) {
+            return ERROR_MALFORMED;
+        }
         status_t err = section->append(br->data(), br->numBitsLeft() / 8);
 
         if (err != OK) {
@@ -1291,7 +1350,7 @@
     return OK;
 }
 
-void ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
+status_t ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
     unsigned adaptation_field_length = br->getBits(8);
 
     if (adaptation_field_length > 0) {
@@ -1307,6 +1366,9 @@
         size_t numBitsRead = 4;
 
         if (PCR_flag) {
+            if (adaptation_field_length * 8 < 52) {
+                return ERROR_MALFORMED;
+            }
             br->skipBits(4);
             uint64_t PCR_base = br->getBits(32);
             PCR_base = (PCR_base << 1) | br->getBits(1);
@@ -1337,10 +1399,9 @@
             numBitsRead += 52;
         }
 
-        CHECK_GE(adaptation_field_length * 8, numBitsRead);
-
         br->skipBits(adaptation_field_length * 8 - numBitsRead);
     }
+    return OK;
 }
 
 status_t ATSParser::parseTS(ABitReader *br) {
@@ -1375,15 +1436,16 @@
 
     // ALOGI("PID = 0x%04x, continuity_counter = %u", PID, continuity_counter);
 
-    if (adaptation_field_control == 2 || adaptation_field_control == 3) {
-        parseAdaptationField(br, PID);
-    }
-
     status_t err = OK;
 
-    if (adaptation_field_control == 1 || adaptation_field_control == 3) {
-        err = parsePID(
-                br, PID, continuity_counter, payload_unit_start_indicator);
+    if (adaptation_field_control == 2 || adaptation_field_control == 3) {
+        err = parseAdaptationField(br, PID);
+    }
+    if (err == OK) {
+        if (adaptation_field_control == 1 || adaptation_field_control == 3) {
+            err = parsePID(
+                    br, PID, continuity_counter, payload_unit_start_indicator);
+        }
     }
 
     ++mNumTSPacketsParsed;
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 87ab1a0..4def333 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -133,7 +133,7 @@
         unsigned continuity_counter,
         unsigned payload_unit_start_indicator);
 
-    void parseAdaptationField(ABitReader *br, unsigned PID);
+    status_t parseAdaptationField(ABitReader *br, unsigned PID);
     status_t parseTS(ABitReader *br);
 
     void updatePCR(unsigned PID, uint64_t PCR, size_t byteOffsetFromStart);
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index f28a1fd..7b5b46a 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -421,8 +421,8 @@
             }
 
             default:
-                TRESPASS();
-                break;
+                ALOGE("Unknown mode: %d", mMode);
+                return ERROR_MALFORMED;
         }
     }
 
@@ -503,7 +503,10 @@
         case METADATA:
             return dequeueAccessUnitMetadata();
         default:
-            CHECK_EQ((unsigned)mMode, (unsigned)MPEG_AUDIO);
+            if (mMode != MPEG_AUDIO) {
+                ALOGE("Unknown mode");
+                return NULL;
+            }
             return dequeueAccessUnitMPEGAudio();
     }
 }
@@ -540,7 +543,10 @@
     memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
 
     int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
-    CHECK_GE(timeUs, 0ll);
+    if (timeUs < 0ll) {
+        ALOGE("negative timeUs");
+        return NULL;
+    }
     accessUnit->meta()->setInt64("timeUs", timeUs);
     accessUnit->meta()->setInt32("isSync", 1);
 
@@ -560,15 +566,24 @@
     }
 
     ABitReader bits(mBuffer->data(), 4);
-    CHECK_EQ(bits.getBits(8), 0xa0);
+    if (bits.getBits(8) != 0xa0) {
+        ALOGE("Unexpected bit values");
+        return NULL;
+    }
     unsigned numAUs = bits.getBits(8);
     bits.skipBits(8);
     unsigned quantization_word_length __unused = bits.getBits(2);
     unsigned audio_sampling_frequency = bits.getBits(3);
     unsigned num_channels = bits.getBits(3);
 
-    CHECK_EQ(audio_sampling_frequency, 2);  // 48kHz
-    CHECK_EQ(num_channels, 1u);  // stereo!
+    if (audio_sampling_frequency != 2) {
+        ALOGE("Wrong sampling freq");
+        return NULL;
+    }
+    if (num_channels != 1u) {
+        ALOGE("Wrong channel #");
+        return NULL;
+    }
 
     if (mFormat == NULL) {
         mFormat = new MetaData;
@@ -590,7 +605,10 @@
     memcpy(accessUnit->data(), mBuffer->data() + 4, payloadSize);
 
     int64_t timeUs = fetchTimestamp(payloadSize + 4);
-    CHECK_GE(timeUs, 0ll);
+    if (timeUs < 0ll) {
+        ALOGE("Negative timeUs");
+        return NULL;
+    }
     accessUnit->meta()->setInt64("timeUs", timeUs);
     accessUnit->meta()->setInt32("isSync", 1);
 
@@ -614,14 +632,19 @@
         return NULL;
     }
 
-    CHECK(!mRangeInfos.empty());
+    if (mRangeInfos.empty()) {
+        return NULL;
+    }
 
     const RangeInfo &info = *mRangeInfos.begin();
     if (mBuffer->size() < info.mLength) {
         return NULL;
     }
 
-    CHECK_GE(info.mTimestampUs, 0ll);
+    if (info.mTimestampUs < 0ll) {
+        ALOGE("Negative info.mTimestampUs");
+        return NULL;
+    }
 
     // The idea here is consume all AAC frames starting at offsets before
     // info.mLength so we can assign a meaningful timestamp without
@@ -638,17 +661,26 @@
 
         // adts_fixed_header
 
-        CHECK_EQ(bits.getBits(12), 0xfffu);
+        if (bits.getBits(12) != 0xfffu) {
+            ALOGE("Wrong atds_fixed_header");
+            return NULL;
+        }
         bits.skipBits(3);  // ID, layer
         bool protection_absent __unused = bits.getBits(1) != 0;
 
         if (mFormat == NULL) {
             unsigned profile = bits.getBits(2);
-            CHECK_NE(profile, 3u);
+            if (profile == 3u) {
+                ALOGE("profile should not be 3");
+                return NULL;
+            }
             unsigned sampling_freq_index = bits.getBits(4);
             bits.getBits(1);  // private_bit
             unsigned channel_configuration = bits.getBits(3);
-            CHECK_NE(channel_configuration, 0u);
+            if (channel_configuration == 0u) {
+                ALOGE("channel_config should not be 0");
+                return NULL;
+            }
             bits.skipBits(2);  // original_copy, home
 
             mFormat = MakeAACCodecSpecificData(
@@ -658,8 +690,14 @@
 
             int32_t sampleRate;
             int32_t numChannels;
-            CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate));
-            CHECK(mFormat->findInt32(kKeyChannelCount, &numChannels));
+            if (!mFormat->findInt32(kKeySampleRate, &sampleRate)) {
+                ALOGE("SampleRate not found");
+                return NULL;
+            }
+            if (!mFormat->findInt32(kKeyChannelCount, &numChannels)) {
+                ALOGE("ChannelCount not found");
+                return NULL;
+            }
 
             ALOGI("found AAC codec config (%d Hz, %d channels)",
                  sampleRate, numChannels);
@@ -682,7 +720,8 @@
 
         if (number_of_raw_data_blocks_in_frame != 0) {
             // To be implemented.
-            TRESPASS();
+            ALOGE("Should not reach here.");
+            return NULL;
         }
 
         if (offset + aac_frame_length > mBuffer->size()) {
@@ -714,7 +753,9 @@
     bool first = true;
 
     while (size > 0) {
-        CHECK(!mRangeInfos.empty());
+        if (mRangeInfos.empty()) {
+            return timeUs;
+        }
 
         RangeInfo *info = &*mRangeInfos.begin();
 
@@ -813,7 +854,10 @@
                 unsigned nalType = mBuffer->data()[pos.nalOffset] & 0x1f;
 
                 if (nalType == 6 && pos.nalSize > 0) {
-                    CHECK_LT(seiIndex, sei->size() / sizeof(NALPosition));
+                    if (seiIndex >= sei->size() / sizeof(NALPosition)) {
+                        ALOGE("Wrong seiIndex");
+                        return NULL;
+                    }
                     NALPosition &seiPos = ((NALPosition *)sei->data())[seiIndex++];
                     seiPos.nalOffset = dstOffset + 4;
                     seiPos.nalSize = pos.nalSize;
@@ -851,7 +895,10 @@
             mBuffer->setRange(0, mBuffer->size() - nextScan);
 
             int64_t timeUs = fetchTimestamp(nextScan);
-            CHECK_GE(timeUs, 0ll);
+            if (timeUs < 0ll) {
+                ALOGE("Negative timeUs");
+                return NULL;
+            }
 
             accessUnit->meta()->setInt64("timeUs", timeUs);
             if (foundIDR) {
@@ -873,7 +920,10 @@
 
         totalSize += nalSize;
     }
-    CHECK_EQ(err, (status_t)-EAGAIN);
+    if (err != (status_t)-EAGAIN) {
+        ALOGE("Unexpeted err");
+        return NULL;
+    }
 
     return NULL;
 }
@@ -890,9 +940,12 @@
 
     size_t frameSize;
     int samplingRate, numChannels, bitrate, numSamples;
-    CHECK(GetMPEGAudioFrameSize(
+    if (!GetMPEGAudioFrameSize(
                 header, &frameSize, &samplingRate, &numChannels,
-                &bitrate, &numSamples));
+                &bitrate, &numSamples)) {
+        ALOGE("Failed to get audio frame size");
+        return NULL;
+    }
 
     if (size < frameSize) {
         return NULL;
@@ -910,7 +963,10 @@
     mBuffer->setRange(0, mBuffer->size() - frameSize);
 
     int64_t timeUs = fetchTimestamp(frameSize);
-    CHECK_GE(timeUs, 0ll);
+    if (timeUs < 0ll) {
+        ALOGE("Negative timeUs");
+        return NULL;
+    }
 
     accessUnit->meta()->setInt64("timeUs", timeUs);
     accessUnit->meta()->setInt32("isSync", 1);
@@ -932,7 +988,7 @@
                         kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
                 break;
             default:
-                TRESPASS();
+                return NULL;
         }
 
         mFormat->setInt32(kKeySampleRate, samplingRate);
@@ -943,7 +999,10 @@
 }
 
 static void EncodeSize14(uint8_t **_ptr, size_t size) {
-    CHECK_LE(size, 0x3fff);
+    if (size > 0x3fff) {
+        ALOGE("Wrong size");
+        return;
+    }
 
     uint8_t *ptr = *_ptr;
 
@@ -1018,7 +1077,10 @@
             // seqHeader without/with extension
 
             if (mFormat == NULL) {
-                CHECK_GE(size, 7u);
+                if (size < 7u) {
+                    ALOGE("Size too small");
+                    return NULL;
+                }
 
                 unsigned width =
                     (data[4] << 4) | data[5] >> 4;
@@ -1078,7 +1140,10 @@
                 mBuffer->setRange(0, mBuffer->size() - offset);
 
                 int64_t timeUs = fetchTimestamp(offset);
-                CHECK_GE(timeUs, 0ll);
+                if (timeUs < 0ll) {
+                    ALOGE("Negative timeUs");
+                    return NULL;
+                }
 
                 offset = 0;
 
@@ -1111,7 +1176,7 @@
     }
 
     if (memcmp(kStartCode, data, 3)) {
-        TRESPASS();
+        return -EAGAIN;
     }
 
     size_t offset = 3;
@@ -1171,25 +1236,37 @@
 
             case EXPECT_VISUAL_OBJECT_START:
             {
-                CHECK_EQ(chunkType, 0xb5);
+                if (chunkType != 0xb5) {
+                    ALOGE("Unexpected chunkType");
+                    return NULL;
+                }
                 state = EXPECT_VO_START;
                 break;
             }
 
             case EXPECT_VO_START:
             {
-                CHECK_LE(chunkType, 0x1f);
+                if (chunkType > 0x1f) {
+                    ALOGE("Unexpected chunkType");
+                    return NULL;
+                }
                 state = EXPECT_VOL_START;
                 break;
             }
 
             case EXPECT_VOL_START:
             {
-                CHECK((chunkType & 0xf0) == 0x20);
+                if ((chunkType & 0xf0) != 0x20) {
+                    ALOGE("Wrong chunkType");
+                    return NULL;
+                }
 
-                CHECK(ExtractDimensionsFromVOLHeader(
+                if (!ExtractDimensionsFromVOLHeader(
                             &data[offset], chunkSize,
-                            &width, &height));
+                            &width, &height)) {
+                    ALOGE("Failed to get dimension");
+                    return NULL;
+                }
 
                 state = WAIT_FOR_VOP_START;
                 break;
@@ -1242,7 +1319,10 @@
                     mBuffer->setRange(0, size);
 
                     int64_t timeUs = fetchTimestamp(offset);
-                    CHECK_GE(timeUs, 0ll);
+                    if (timeUs < 0ll) {
+                        ALOGE("Negative timeus");
+                        return NULL;
+                    }
 
                     offset = 0;
 
@@ -1266,7 +1346,8 @@
             }
 
             default:
-                TRESPASS();
+                ALOGE("Unknown state: %d", state);
+                return NULL;
         }
 
         if (discard) {
diff --git a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
index 85859f7..6d9fe9d 100644
--- a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
@@ -265,7 +265,10 @@
     }
 
     unsigned PES_packet_length = U16_AT(mBuffer->data() + 4);
-    CHECK_NE(PES_packet_length, 0u);
+    if (PES_packet_length == 0u) {
+        ALOGE("PES_packet_length is 0");
+        return -EAGAIN;
+    }
 
     size_t n = PES_packet_length + 6;
 
@@ -286,7 +289,10 @@
         return ERROR_MALFORMED;
     }
 
-    CHECK_EQ(packet_startcode_prefix, 0x000001u);
+    if (packet_startcode_prefix != 0x000001u) {
+        ALOGE("Wrong PES prefix");
+        return ERROR_MALFORMED;
+    }
 
     unsigned stream_id = br.getBits(8);
     ALOGV("stream_id = 0x%02x", stream_id);
@@ -366,8 +372,7 @@
             && stream_id != 0xff  // program_stream_directory
             && stream_id != 0xf2  // DSMCC
             && stream_id != 0xf8) {  // H.222.1 type E
-        CHECK_EQ(br.getBits(2), 2u);
-
+        /* unsigned PES_marker_bits = */br.getBits(2);  // should be 0x2(hex)
         /* unsigned PES_scrambling_control = */br.getBits(2);
         /* unsigned PES_priority = */br.getBits(1);
         /* unsigned data_alignment_indicator = */br.getBits(1);
@@ -400,16 +405,26 @@
         uint64_t PTS = 0, DTS = 0;
 
         if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
-            CHECK_GE(optional_bytes_remaining, 5u);
+            if (optional_bytes_remaining < 5u) {
+                return ERROR_MALFORMED;
+            }
 
-            CHECK_EQ(br.getBits(4), PTS_DTS_flags);
+            if (br.getBits(4) != PTS_DTS_flags) {
+                return ERROR_MALFORMED;
+            }
 
             PTS = ((uint64_t)br.getBits(3)) << 30;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= ((uint64_t)br.getBits(15)) << 15;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= br.getBits(15);
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("PTS = %" PRIu64, PTS);
             // ALOGI("PTS = %.2f secs", PTS / 90000.0f);
@@ -417,16 +432,26 @@
             optional_bytes_remaining -= 5;
 
             if (PTS_DTS_flags == 3) {
-                CHECK_GE(optional_bytes_remaining, 5u);
+                if (optional_bytes_remaining < 5u) {
+                    return ERROR_MALFORMED;
+                }
 
-                CHECK_EQ(br.getBits(4), 1u);
+                if (br.getBits(4) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 DTS = ((uint64_t)br.getBits(3)) << 30;
-                CHECK_EQ(br.getBits(1), 1u);
+                if (br.getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= ((uint64_t)br.getBits(15)) << 15;
-                CHECK_EQ(br.getBits(1), 1u);
+                if (br.getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= br.getBits(15);
-                CHECK_EQ(br.getBits(1), 1u);
+                if (br.getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 ALOGV("DTS = %" PRIu64, DTS);
 
@@ -435,40 +460,62 @@
         }
 
         if (ESCR_flag) {
-            CHECK_GE(optional_bytes_remaining, 6u);
+            if (optional_bytes_remaining < 6u) {
+                return ERROR_MALFORMED;
+            }
 
             br.getBits(2);
 
             uint64_t ESCR = ((uint64_t)br.getBits(3)) << 30;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= ((uint64_t)br.getBits(15)) << 15;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= br.getBits(15);
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("ESCR = %" PRIu64, ESCR);
             /* unsigned ESCR_extension = */br.getBits(9);
 
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 6;
         }
 
         if (ES_rate_flag) {
-            CHECK_GE(optional_bytes_remaining, 3u);
+            if (optional_bytes_remaining < 3u) {
+                return ERROR_MALFORMED;
+            }
 
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             /* unsigned ES_rate = */br.getBits(22);
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 3;
         }
 
+        if (br.numBitsLeft() < optional_bytes_remaining * 8) {
+            return ERROR_MALFORMED;
+        }
+
         br.skipBits(optional_bytes_remaining * 8);
 
         // ES data follows.
 
-        CHECK_GE(PES_packet_length, PES_header_data_length + 3);
+        if (PES_packet_length < PES_header_data_length + 3) {
+            return ERROR_MALFORMED;
+        }
 
         unsigned dataLength =
             PES_packet_length - 3 - PES_header_data_length;
@@ -481,7 +528,9 @@
             return ERROR_MALFORMED;
         }
 
-        CHECK_GE(br.numBitsLeft(), dataLength * 8);
+        if (br.numBitsLeft() < dataLength * 8) {
+            return ERROR_MALFORMED;
+        }
 
         ssize_t index = mTracks.indexOfKey(stream_id);
         if (index < 0 && mScanning) {
@@ -521,10 +570,14 @@
             return err;
         }
     } else if (stream_id == 0xbe) {  // padding_stream
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br.skipBits(PES_packet_length * 8);
     } else {
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br.skipBits(PES_packet_length * 8);
     }
 
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index 74cb5d8..f5c33cf 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -131,7 +131,10 @@
 
     bool seekable = true;
     if (mSourceImpls.size() > 1) {
-        CHECK_EQ(mSourceImpls.size(), 2u);
+        if (mSourceImpls.size() != 2u) {
+            ALOGE("Wrong size");
+            return NULL;
+        }
 
         sp<MetaData> meta = mSourceImpls.editItemAt(index)->getFormat();
         const char *mime;
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 477cfc6..01cd8f0 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -38,13 +38,19 @@
 static const bool EXTRA_CHECK = true;
 
 
-GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
-        uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount,
-        bool useGraphicBufferInMeta) :
+GraphicBufferSource::GraphicBufferSource(
+        OMXNodeInstance* nodeInstance,
+        uint32_t bufferWidth,
+        uint32_t bufferHeight,
+        uint32_t bufferCount,
+        bool useGraphicBufferInMeta,
+        const sp<IGraphicBufferConsumer> &consumer) :
     mInitCheck(UNKNOWN_ERROR),
     mNodeInstance(nodeInstance),
     mExecuting(false),
     mSuspended(false),
+    mIsPersistent(false),
+    mConsumer(consumer),
     mNumFramesAvailable(0),
     mEndOfStream(false),
     mEndOfStreamSent(false),
@@ -74,20 +80,22 @@
         return;
     }
 
-    String8 name("GraphicBufferSource");
+    if (mConsumer == NULL) {
+        String8 name("GraphicBufferSource");
 
-    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
-    mConsumer->setConsumerName(name);
-    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
-
-    mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
-    if (mInitCheck != NO_ERROR) {
-        ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
-                bufferCount, mInitCheck);
-        return;
+        BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+        mConsumer->setConsumerName(name);
+        mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
+        mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
+        if (mInitCheck != NO_ERROR) {
+            ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+                    bufferCount, mInitCheck);
+            return;
+        }
+    } else {
+        mIsPersistent = true;
     }
-
+    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
     // Note that we can't create an sp<...>(this) in a ctor that will not keep a
     // reference once the ctor ends, as that would cause the refcount of 'this'
     // dropping to 0 at the end of the ctor.  Since all we need is a wp<...>
@@ -107,7 +115,7 @@
 
 GraphicBufferSource::~GraphicBufferSource() {
     ALOGV("~GraphicBufferSource");
-    if (mConsumer != NULL) {
+    if (mConsumer != NULL && !mIsPersistent) {
         status_t err = mConsumer->consumerDisconnect();
         if (err != NO_ERROR) {
             ALOGW("consumerDisconnect failed: %d", err);
@@ -292,8 +300,16 @@
         if (id == mLatestBufferId) {
             CHECK_GT(mLatestBufferUseCount--, 0);
         } else {
-            mConsumer->releaseBuffer(id, codecBuffer.mFrameNumber,
-                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            if (mIsPersistent) {
+                mConsumer->detachBuffer(id);
+                int outSlot;
+                mConsumer->attachBuffer(&outSlot, mBufferSlot[id]);
+                mConsumer->releaseBuffer(outSlot, 0,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            } else {
+                mConsumer->releaseBuffer(id, codecBuffer.mFrameNumber,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            }
         }
     } else {
         ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d",
@@ -375,8 +391,15 @@
 
             --mNumFramesAvailable;
 
-            mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
-                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+            if (mIsPersistent) {
+                mConsumer->detachBuffer(item.mBuf);
+                mConsumer->attachBuffer(&item.mBuf, item.mGraphicBuffer);
+                mConsumer->releaseBuffer(item.mBuf, 0,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            } else {
+                mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+            }
         }
         return;
     }
@@ -463,8 +486,15 @@
 
     if (err != OK) {
         ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
-        mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
-                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+        if (mIsPersistent) {
+            mConsumer->detachBuffer(item.mBuf);
+            mConsumer->attachBuffer(&item.mBuf, item.mGraphicBuffer);
+            mConsumer->releaseBuffer(item.mBuf, 0,
+                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+        } else {
+            mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
+                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+        }
     } else {
         ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi);
         setLatestBuffer_l(item, dropped);
@@ -540,12 +570,19 @@
 
     if (mLatestBufferId >= 0) {
         if (mLatestBufferUseCount == 0) {
-            mConsumer->releaseBuffer(
-                    mLatestBufferId,
-                    mLatestBufferFrameNum,
-                    EGL_NO_DISPLAY,
-                    EGL_NO_SYNC_KHR,
-                    Fence::NO_FENCE);
+            if (mIsPersistent) {
+                mConsumer->detachBuffer(mLatestBufferId);
+
+                int outSlot;
+                mConsumer->attachBuffer(&outSlot, mBufferSlot[mLatestBufferId]);
+
+                mConsumer->releaseBuffer(outSlot, 0,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            } else {
+                mConsumer->releaseBuffer(
+                        mLatestBufferId, mLatestBufferFrameNum,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            }
         }
     }
 
@@ -787,8 +824,16 @@
                 ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mBuf);
                 mBufferSlot[item.mBuf] = item.mGraphicBuffer;
             }
-            mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
-                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+
+            if (mIsPersistent) {
+                mConsumer->detachBuffer(item.mBuf);
+                mConsumer->attachBuffer(&item.mBuf, item.mGraphicBuffer);
+                mConsumer->releaseBuffer(item.mBuf, 0,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            } else {
+                mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
+                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+            }
         }
         return;
     }
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 718d2ee..1047fb3 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -50,9 +50,15 @@
  */
 class GraphicBufferSource : public BufferQueue::ConsumerListener {
 public:
-    GraphicBufferSource(OMXNodeInstance* nodeInstance,
-            uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount,
-            bool useGraphicBufferInMeta = false);
+    GraphicBufferSource(
+            OMXNodeInstance* nodeInstance,
+            uint32_t bufferWidth,
+            uint32_t bufferHeight,
+            uint32_t bufferCount,
+            bool useGraphicBufferInMeta = false,
+            const sp<IGraphicBufferConsumer> &consumer = NULL
+    );
+
     virtual ~GraphicBufferSource();
 
     // We can't throw an exception if the constructor fails, so we just set
@@ -219,6 +225,7 @@
     // Our BufferQueue interfaces. mProducer is passed to the producer through
     // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
     // the buffers queued by the producer.
+    bool mIsPersistent;
     sp<IGraphicBufferProducer> mProducer;
     sp<IGraphicBufferConsumer> mConsumer;
 
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index f8d38ff..876abb8 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -32,6 +32,7 @@
 
 #include "OMXMaster.h"
 
+#include <OMX_AsString.h>
 #include <OMX_Component.h>
 
 namespace android {
@@ -233,11 +234,11 @@
             instance, &handle);
 
     if (err != OMX_ErrorNone) {
-        ALOGE("FAILED to allocate omx component '%s'", name);
+        ALOGE("FAILED to allocate omx component '%s' err=%s(%#x)", name, asString(err), err);
 
         instance->onGetHandleFailed();
 
-        return UNKNOWN_ERROR;
+        return StatusFromOMXError(err);
     }
 
     *node = makeNodeID(instance);
@@ -377,6 +378,21 @@
             port_index, bufferProducer);
 }
 
+status_t OMX::createPersistentInputSurface(
+        sp<IGraphicBufferProducer> *bufferProducer,
+        sp<IGraphicBufferConsumer> *bufferConsumer) {
+    return OMXNodeInstance::createPersistentInputSurface(
+            bufferProducer, bufferConsumer);
+}
+
+status_t OMX::usePersistentInputSurface(
+        node_id node, OMX_U32 port_index,
+        const sp<IGraphicBufferConsumer> &bufferConsumer) {
+    return findInstance(node)->usePersistentInputSurface(
+            port_index, bufferConsumer);
+}
+
+
 status_t OMX::signalEndOfInputStream(node_id node) {
     return findInstance(node)->signalEndOfInputStream();
 }
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 4779d6a..04293d6 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -220,13 +220,15 @@
     return mNodeID;
 }
 
-static status_t StatusFromOMXError(OMX_ERRORTYPE err) {
+status_t StatusFromOMXError(OMX_ERRORTYPE err) {
     switch (err) {
         case OMX_ErrorNone:
             return OK;
         case OMX_ErrorUnsupportedSetting:
         case OMX_ErrorUnsupportedIndex:
             return ERROR_UNSUPPORTED;
+        case OMX_ErrorInsufficientResources:
+            return NO_MEMORY;
         default:
             return UNKNOWN_ERROR;
     }
@@ -787,9 +789,8 @@
     return OK;
 }
 
-status_t OMXNodeInstance::createInputSurface(
-        OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer) {
-    Mutex::Autolock autolock(mLock);
+status_t OMXNodeInstance::createGraphicBufferSource(
+        OMX_U32 portIndex, sp<IGraphicBufferConsumer> bufferConsumer) {
     status_t err;
 
     const sp<GraphicBufferSource>& surfaceCheck = getGraphicBufferSource();
@@ -827,19 +828,75 @@
         return INVALID_OPERATION;
     }
 
-    GraphicBufferSource* bufferSource = new GraphicBufferSource(
-            this, def.format.video.nFrameWidth, def.format.video.nFrameHeight,
-            def.nBufferCountActual, usingGraphicBuffer);
+    sp<GraphicBufferSource> bufferSource = new GraphicBufferSource(this,
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            def.nBufferCountActual,
+            usingGraphicBuffer,
+            bufferConsumer);
+
     if ((err = bufferSource->initCheck()) != OK) {
-        delete bufferSource;
         return err;
     }
     setGraphicBufferSource(bufferSource);
 
-    *bufferProducer = bufferSource->getIGraphicBufferProducer();
     return OK;
 }
 
+status_t OMXNodeInstance::createInputSurface(
+        OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer) {
+    Mutex::Autolock autolock(mLock);
+    status_t err = createGraphicBufferSource(portIndex);
+
+    if (err != OK) {
+        return err;
+    }
+
+    *bufferProducer = mGraphicBufferSource->getIGraphicBufferProducer();
+    return OK;
+}
+
+//static
+status_t OMXNodeInstance::createPersistentInputSurface(
+        sp<IGraphicBufferProducer> *bufferProducer,
+        sp<IGraphicBufferConsumer> *bufferConsumer) {
+    String8 name("GraphicBufferSource");
+
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    consumer->setConsumerName(name);
+    consumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
+
+    status_t err = consumer->setMaxAcquiredBufferCount(
+            BufferQueue::MAX_MAX_ACQUIRED_BUFFERS);
+    if (err != NO_ERROR) {
+        ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+                BufferQueue::MAX_MAX_ACQUIRED_BUFFERS, err);
+        return err;
+    }
+
+    sp<BufferQueue::ProxyConsumerListener> proxy =
+        new BufferQueue::ProxyConsumerListener(NULL);
+    err = consumer->consumerConnect(proxy, false);
+    if (err != NO_ERROR) {
+        ALOGE("Error connecting to BufferQueue: %s (%d)",
+                strerror(-err), err);
+        return err;
+    }
+
+    *bufferProducer = producer;
+    *bufferConsumer = consumer;
+
+    return OK;
+}
+
+status_t OMXNodeInstance::usePersistentInputSurface(
+        OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer) {
+    Mutex::Autolock autolock(mLock);
+    return createGraphicBufferSource(portIndex, bufferConsumer);
+}
+
 status_t OMXNodeInstance::signalEndOfInputStream() {
     // For non-Surface input, the MediaCodec should convert the call to a
     // pair of requests (dequeue input buffer, queue input buffer with EOS
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 00f071b..ba17e90 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1673,21 +1673,11 @@
         }
 
         size_t n = strlen(baseURL);
-        if (baseURL[n - 1] == '/') {
-            out->setTo(baseURL);
-            out->append(url);
-        } else {
-            const char *slashPos = strrchr(baseURL, '/');
-
-            if (slashPos > &baseURL[6]) {
-                out->setTo(baseURL, slashPos - baseURL);
-            } else {
-                out->setTo(baseURL);
-            }
-
+        out->setTo(baseURL);
+        if (baseURL[n - 1] != '/') {
             out->append("/");
-            out->append(url);
         }
+        out->append(url);
 
         return true;
     }
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index fd889f9..3860e9b 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -19,6 +19,7 @@
 
 #include <gtest/gtest.h>
 #include <utils/String8.h>
+#include <utils/String16.h>
 #include <utils/Errors.h>
 #include <fcntl.h>
 #include <unistd.h>
@@ -466,7 +467,7 @@
 // Set up the MediaRecorder which runs in the same process as mediaserver
 sp<MediaRecorder> SurfaceMediaSourceGLTest::setUpMediaRecorder(int fd, int videoSource,
         int outputFormat, int videoEncoder, int width, int height, int fps) {
-    sp<MediaRecorder> mr = new MediaRecorder();
+    sp<MediaRecorder> mr = new MediaRecorder(String16());
     mr->setVideoSource(videoSource);
     mr->setOutputFormat(outputFormat);
     mr->setVideoEncoder(videoEncoder);
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 5e2f0bf..ed5a404 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -345,12 +345,14 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 WifiDisplaySource::PlaybackSession::PlaybackSession(
+        const String16 &opPackageName,
         const sp<ANetworkSession> &netSession,
         const sp<AMessage> &notify,
         const in_addr &interfaceAddr,
         const sp<IHDCP> &hdcp,
         const char *path)
-    : mNetSession(netSession),
+    : mOpPackageName(opPackageName),
+      mNetSession(netSession),
       mNotify(notify),
       mInterfaceAddr(interfaceAddr),
       mHDCP(hdcp),
@@ -1069,6 +1071,7 @@
 status_t WifiDisplaySource::PlaybackSession::addAudioSource(bool usePCMAudio) {
     sp<AudioSource> audioSource = new AudioSource(
             AUDIO_SOURCE_REMOTE_SUBMIX,
+            mOpPackageName,
             48000 /* sampleRate */,
             2 /* channelCount */);
 
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 4cd1a75..f6673df 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -22,6 +22,8 @@
 #include "VideoFormats.h"
 #include "WifiDisplaySource.h"
 
+#include <utils/String16.h>
+
 namespace android {
 
 struct ABuffer;
@@ -36,6 +38,7 @@
 // display.
 struct WifiDisplaySource::PlaybackSession : public AHandler {
     PlaybackSession(
+            const String16 &opPackageName,
             const sp<ANetworkSession> &netSession,
             const sp<AMessage> &notify,
             const struct in_addr &interfaceAddr,
@@ -96,6 +99,8 @@
         kWhatPullExtractorSample,
     };
 
+    String16 mOpPackageName;
+
     sp<ANetworkSession> mNetSession;
     sp<AMessage> mNotify;
     in_addr mInterfaceAddr;
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 332fe16..e26165e 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -50,10 +50,12 @@
 const AString WifiDisplaySource::sUserAgent = MakeUserAgent();
 
 WifiDisplaySource::WifiDisplaySource(
+        const String16 &opPackageName,
         const sp<ANetworkSession> &netSession,
         const sp<IRemoteDisplayClient> &client,
         const char *path)
-    : mState(INITIALIZED),
+    : mOpPackageName(opPackageName),
+      mState(INITIALIZED),
       mNetSession(netSession),
       mClient(client),
       mSessionID(0),
@@ -1245,7 +1247,7 @@
 
     sp<PlaybackSession> playbackSession =
         new PlaybackSession(
-                mNetSession, notify, mInterfaceAddr, mHDCP, mMediaPath.c_str());
+                mOpPackageName, mNetSession, notify, mInterfaceAddr, mHDCP, mMediaPath.c_str());
 
     looper()->registerHandler(playbackSession);
 
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index c417cf5..c25a675 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -25,6 +25,8 @@
 
 #include <netinet/in.h>
 
+#include <utils/String16.h>
+
 namespace android {
 
 struct AReplyToken;
@@ -38,6 +40,7 @@
     static const unsigned kWifiDisplayDefaultPort = 7236;
 
     WifiDisplaySource(
+            const String16 &opPackageName,
             const sp<ANetworkSession> &netSession,
             const sp<IRemoteDisplayClient> &client,
             const char *path = NULL);
@@ -114,6 +117,8 @@
 
     static const AString sUserAgent;
 
+    String16 mOpPackageName;
+
     State mState;
     VideoFormats mSupportedSourceVideoFormats;
     sp<ANetworkSession> mNetSession;
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 80c1c2f..cd0c462 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -154,6 +154,10 @@
     } else {
         mData->mCodec = android::MediaCodec::CreateByComponentName(mData->mLooper, name);
     }
+    if (mData->mCodec == NULL) {  // failed to create codec
+        AMediaCodec_delete(mData);
+        return NULL;
+    }
     mData->mHandler = new CodecHandler(mData);
     mData->mLooper->registerHandler(mData->mHandler);
     mData->mGeneration = 1;
@@ -180,17 +184,21 @@
 
 EXPORT
 media_status_t AMediaCodec_delete(AMediaCodec *mData) {
-    if (mData->mCodec != NULL) {
-        mData->mCodec->release();
-        mData->mCodec.clear();
-    }
+    if (mData != NULL) {
+        if (mData->mCodec != NULL) {
+            mData->mCodec->release();
+            mData->mCodec.clear();
+        }
 
-    if (mData->mLooper != NULL) {
-        mData->mLooper->unregisterHandler(mData->mHandler->id());
-        mData->mLooper->stop();
-        mData->mLooper.clear();
+        if (mData->mLooper != NULL) {
+            if (mData->mHandler != NULL) {
+                mData->mLooper->unregisterHandler(mData->mHandler->id());
+            }
+            mData->mLooper->stop();
+            mData->mLooper.clear();
+        }
+        delete mData;
     }
-    delete mData;
     return AMEDIA_OK;
 }
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 5002099..485e320 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1013,6 +1013,14 @@
     return streamMute_l(stream);
 }
 
+
+void AudioFlinger::broacastParametersToRecordThreads_l(const String8& keyValuePairs)
+{
+    for (size_t i = 0; i < mRecordThreads.size(); i++) {
+        mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
+    }
+}
+
 status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
 {
     ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
@@ -1087,9 +1095,7 @@
             int value;
             if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
                     (value != 0)) {
-                for (size_t i = 0; i < mRecordThreads.size(); i++) {
-                    mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
-                }
+                broacastParametersToRecordThreads_l(keyValuePairs);
             }
         }
     }
@@ -1262,11 +1268,11 @@
         // the config change is always sent from playback or record threads to avoid deadlock
         // with AudioSystem::gLock
         for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-            mPlaybackThreads.valueAt(i)->sendIoConfigEvent(AudioSystem::OUTPUT_OPENED);
+            mPlaybackThreads.valueAt(i)->sendIoConfigEvent(AUDIO_OUTPUT_OPENED);
         }
 
         for (size_t i = 0; i < mRecordThreads.size(); i++) {
-            mRecordThreads.valueAt(i)->sendIoConfigEvent(AudioSystem::INPUT_OPENED);
+            mRecordThreads.valueAt(i)->sendIoConfigEvent(AUDIO_INPUT_OPENED);
         }
     }
 }
@@ -1300,14 +1306,13 @@
     }
 }
 
-void AudioFlinger::audioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2)
+void AudioFlinger::ioConfigChanged(audio_io_config_event event,
+                                   const sp<AudioIoDescriptor>& ioDesc)
 {
     Mutex::Autolock _l(mClientLock);
     size_t size = mNotificationClients.size();
     for (size_t i = 0; i < size; i++) {
-        mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event,
-                                                                              ioHandle,
-                                                                              param2);
+        mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioDesc);
     }
 }
 
@@ -1416,9 +1421,11 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
+        const String16& opPackageName,
         size_t *frameCount,
         IAudioFlinger::track_flags_t *flags,
         pid_t tid,
+        int clientUid,
         int *sessionId,
         size_t *notificationFrames,
         sp<IMemory>& cblk,
@@ -1435,7 +1442,7 @@
     buffers.clear();
 
     // check calling permissions
-    if (!recordingAllowed()) {
+    if (!recordingAllowed(opPackageName)) {
         ALOGE("openRecord() permission denied: recording not allowed");
         lStatus = PERMISSION_DENIED;
         goto Exit;
@@ -1488,8 +1495,7 @@
         // TODO: the uid should be passed in as a parameter to openRecord
         recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
                                                   frameCount, lSessionId, notificationFrames,
-                                                  IPCThreadState::self()->getCallingUid(),
-                                                  flags, tid, &lStatus);
+                                                  clientUid, flags, tid, &lStatus);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
 
         if (lStatus == NO_ERROR) {
@@ -1825,7 +1831,7 @@
         *latencyMs = thread->latency();
 
         // notify client processes of the new output creation
-        thread->audioConfigChanged(AudioSystem::OUTPUT_OPENED);
+        thread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
 
         // the first primary output opened designates the primary hw device
         if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
@@ -1863,7 +1869,7 @@
     thread->addOutputTrack(thread2);
     mPlaybackThreads.add(id, thread);
     // notify client processes of the new output creation
-    thread->audioConfigChanged(AudioSystem::OUTPUT_OPENED);
+    thread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
     return id;
 }
 
@@ -1913,7 +1919,9 @@
                 }
             }
         }
-        audioConfigChanged(AudioSystem::OUTPUT_CLOSED, output, NULL);
+        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
+        ioDesc->mIoHandle = output;
+        ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
     }
     thread->exit();
     // The thread entity (active unit of execution) is no longer running here,
@@ -1991,7 +1999,7 @@
 
     if (thread != 0) {
         // notify client processes of the new input creation
-        thread->audioConfigChanged(AudioSystem::INPUT_OPENED);
+        thread->ioConfigChanged(AUDIO_INPUT_OPENED);
         return NO_ERROR;
     }
     return NO_INIT;
@@ -2174,7 +2182,9 @@
                 putOrphanEffectChain_l(chain);
             }
         }
-        audioConfigChanged(AudioSystem::INPUT_CLOSED, input, NULL);
+        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
+        ioDesc->mIoHandle = input;
+        ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
         mRecordThreads.removeItem(input);
     }
     // FIXME: calling thread->exit() without mLock held should not be needed anymore now that
@@ -2447,6 +2457,7 @@
         int32_t priority,
         audio_io_handle_t io,
         int sessionId,
+        const String16& opPackageName,
         status_t *status,
         int *id,
         int *enabled)
@@ -2543,7 +2554,7 @@
 
         // check recording permission for visualizer
         if ((memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) &&
-            !recordingAllowed()) {
+            !recordingAllowed(opPackageName)) {
             lStatus = PERMISSION_DENIED;
             goto Exit;
         }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index e1ddcbc..51b2610 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -120,9 +120,11 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
+                                const String16& opPackageName,
                                 size_t *pFrameCount,
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
+                                int clientUid,
                                 int *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
@@ -216,6 +218,7 @@
                         int32_t priority,
                         audio_io_handle_t io,
                         int sessionId,
+                        const String16& opPackageName,
                         status_t *status /*non-NULL*/,
                         int *id,
                         int *enabled);
@@ -543,7 +546,8 @@
               // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held
               float streamVolume_l(audio_stream_type_t stream) const
                                 { return mStreamTypes[stream].volume; }
-              void audioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+              void ioConfigChanged(audio_io_config_event event,
+                                   const sp<AudioIoDescriptor>& ioDesc);
 
               // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t.
               // They all share the same ID space, but the namespaces are actually independent
@@ -588,6 +592,7 @@
                 // Return true if the effect was found in mOrphanEffectChains, false otherwise.
                 bool            updateOrphanEffectChains(const sp<EffectModule>& effect);
 
+                void broacastParametersToRecordThreads_l(const String8& keyValuePairs);
 
     // AudioStreamIn is immutable, so their fields are const.
     // For emphasis, we could also make all pointers to them be "const *",
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 7040af4..193fd64 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -66,9 +66,9 @@
 #define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
 #endif
 
-// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
-// original code will be used for stereo sinks, the new mixer for multichannel.
-static const bool kUseNewMixer = true;
+// Set kUseNewMixer to true to use the new mixer engine. Otherwise the
+// original code will be used.  This is false for now.
+static const bool kUseNewMixer = false;
 
 // Set kUseFloat to true to allow floating input into the mixer engine.
 // If kUseNewMixer is false, this is ignored or may be overridden internally
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 834947f..9248bba 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -274,57 +274,29 @@
                     goto exit;
                 }
             } else {
-                if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                    if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                        sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                  patch->sinks[0].ext.mix.handle);
-                        if (thread == 0) {
-                            ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                            status = BAD_VALUE;
-                            goto exit;
-                        }
-                        status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
-                    } else {
-                        audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                        status = hwDevice->create_audio_patch(hwDevice,
-                                                               patch->num_sources,
-                                                               patch->sources,
-                                                               patch->num_sinks,
-                                                               patch->sinks,
-                                                               &halHandle);
+                if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
+                                                              patch->sinks[0].ext.mix.handle);
+                    if (thread == 0) {
+                        ALOGW("createAudioPatch() bad capture I/O handle %d",
+                                                              patch->sinks[0].ext.mix.handle);
+                        status = BAD_VALUE;
+                        goto exit;
                     }
+                    status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
                 } else {
-                    if (patch->sinks[0].type != AUDIO_PORT_TYPE_MIX) {
+                    if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
                         status = INVALID_OPERATION;
                         goto exit;
                     }
 
-                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
-                    if (thread == 0) {
-                        ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                    patch->sinks[0].ext.mix.handle);
-                        status = BAD_VALUE;
-                        goto exit;
-                    }
-                    char *address;
-                    if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
-                        address = audio_device_address_to_parameter(
-                                                            patch->sources[0].ext.device.type,
-                                                            patch->sources[0].ext.device.address);
-                    } else {
-                        address = (char *)calloc(1, 1);
-                    }
-                    AudioParameter param = AudioParameter(String8(address));
-                    free(address);
-                    param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING),
-                                 (int)patch->sources[0].ext.device.type);
-                    param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE),
-                                                     (int)patch->sinks[0].ext.mix.usecase.source);
-                    ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
-                                                                      param.toString().string());
-                    status = thread->setParameters(param.toString());
+                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
+                    status = hwDevice->create_audio_patch(hwDevice,
+                                                           patch->num_sources,
+                                                           patch->sources,
+                                                           patch->num_sinks,
+                                                           patch->sinks,
+                                                           &halHandle);
                 }
             }
         } break;
@@ -337,6 +309,7 @@
                 goto exit;
             }
             // limit to connections between devices and output streams
+            audio_devices_t type = AUDIO_DEVICE_NONE;
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
                     ALOGW("createAudioPatch() invalid sink type %d for mix source",
@@ -349,8 +322,8 @@
                     status = BAD_VALUE;
                     goto exit;
                 }
+                type |= patch->sinks[i].ext.device.type;
             }
-            AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
             sp<ThreadBase> thread =
                             audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
             if (thread == 0) {
@@ -359,28 +332,14 @@
                 status = BAD_VALUE;
                 goto exit;
             }
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
-            } else {
-                audio_devices_t type = AUDIO_DEVICE_NONE;
-                for (unsigned int i = 0; i < patch->num_sinks; i++) {
-                    type |= patch->sinks[i].ext.device.type;
-                }
-                char *address;
-                if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
-                    //FIXME: we only support address on first sink with HAL version < 3.0
-                    address = audio_device_address_to_parameter(
-                                                                patch->sinks[0].ext.device.type,
-                                                                patch->sinks[0].ext.device.address);
-                } else {
-                    address = (char *)calloc(1, 1);
-                }
-                AudioParameter param = AudioParameter(String8(address));
-                free(address);
+            if (thread == audioflinger->primaryPlaybackThread_l()) {
+                AudioParameter param = AudioParameter();
                 param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
-                status = thread->setParameters(param.toString());
+
+                audioflinger->broacastParametersToRecordThreads_l(param.toString());
             }
 
+            status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
         } break;
         default:
             status = BAD_VALUE;
@@ -581,36 +540,24 @@
                 break;
             }
 
-            AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
-                    if (thread == 0) {
-                        ALOGW("releaseAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                        status = BAD_VALUE;
-                        break;
-                    }
-                    status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
-                } else {
-                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                    status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
-                }
-            } else {
+            if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
                 sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
+                                                                patch->sinks[0].ext.mix.handle);
                 if (thread == 0) {
                     ALOGW("releaseAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
+                                                              patch->sinks[0].ext.mix.handle);
                     status = BAD_VALUE;
                     break;
                 }
-                AudioParameter param;
-                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
-                ALOGV("releaseAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
-                                                                      param.toString().string());
-                status = thread->setParameters(param.toString());
+                status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+            } else {
+                AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
+                if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
+                    status = INVALID_OPERATION;
+                    break;
+                }
+                audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
+                status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
             }
         } break;
         case AUDIO_PORT_TYPE_MIX: {
@@ -629,14 +576,7 @@
                 status = BAD_VALUE;
                 break;
             }
-            AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
-            } else {
-                AudioParameter param;
-                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
-                status = thread->setParameters(param.toString());
-            }
+            status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
         } break;
         default:
             status = BAD_VALUE;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index c51021b..7bc6f0c 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -156,11 +156,6 @@
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
 
-    // for last call to getTimestamp
-    bool                mPreviousTimestampValid;
-    // This is either the first timestamp or one that has passed
-    // the check to prevent retrograde motion.
-    AudioTimestamp      mPreviousTimestamp;
 };  // end of Track
 
 class TimedTrack : public Track {
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
index 8246fef..0a718fb 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -14,38 +14,97 @@
  * limitations under the License.
  */
 
+#include <binder/AppOpsManager.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/PermissionCache.h>
 #include "ServiceUtilities.h"
 
+/* When performing permission checks we do not use permission cache for
+ * runtime permissions (protection level dangerous) as they may change at
+ * runtime. All other permissions (protection level normal and dangerous)
+ * can be cached as they never change. Of course all permission checked
+ * here are platform defined.
+ */
+
 namespace android {
 
 // Not valid until initialized by AudioFlinger constructor.  It would have to be
 // re-initialized if the process containing AudioFlinger service forks (which it doesn't).
 pid_t getpid_cached;
 
-bool recordingAllowed() {
+bool recordingAllowed(const String16& opPackageName) {
+    // Note: We are getting the UID from the calling IPC thread state because all
+    // clients that perform recording create AudioRecord in their own processes
+    // and the system does not create AudioRecord objects on behalf of apps. This
+    // differs from playback where in some situations the system recreates AudioTrack
+    // instances associated with a client's MediaPlayer on behalf of this client.
+    // In the latter case we have to store the client UID and pass in along for
+    // security checks.
+
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sRecordAudio("android.permission.RECORD_AUDIO");
-    // don't use PermissionCache; this is not a system permission
-    bool ok = checkCallingPermission(sRecordAudio);
-    if (!ok) ALOGE("Request requires android.permission.RECORD_AUDIO");
-    return ok;
+
+    // IMPORTANT: Don't use PermissionCache - a runtime permission and may change.
+    const bool ok = checkCallingPermission(sRecordAudio);
+    if (!ok) {
+        ALOGE("Request requires android.permission.RECORD_AUDIO");
+        return false;
+    }
+
+    const uid_t uid = IPCThreadState::self()->getCallingUid();
+    String16 checkedOpPackageName = opPackageName;
+
+    // In some cases the calling code has no access to the package it runs under.
+    // For example, code using the wilhelm framework's OpenSL-ES APIs. In this
+    // case we will get the packages for the calling UID and pick the first one
+    // for attributing the app op. This will work correctly for runtime permissions
+    // as for legacy apps we will toggle the app op for all packages in the UID.
+    // The caveat is that the operation may be attributed to the wrong package and
+    // stats based on app ops may be slightly off.
+    if (checkedOpPackageName.size() <= 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder = sm->getService(String16("permission"));
+        if (binder == 0) {
+            ALOGE("Cannot get permission service");
+            return false;
+        }
+
+        sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
+        Vector<String16> packages;
+
+        permCtrl->getPackagesForUid(uid, packages);
+
+        if (packages.isEmpty()) {
+            ALOGE("No packages for calling UID");
+            return false;
+        }
+        checkedOpPackageName = packages[0];
+    }
+
+    AppOpsManager appOps;
+    if (appOps.noteOp(AppOpsManager::OP_RECORD_AUDIO, uid, opPackageName)
+            != AppOpsManager::MODE_ALLOWED) {
+        ALOGE("Request denied by app op OP_RECORD_AUDIO");
+        return false;
+    }
+
+    return true;
 }
 
 bool captureAudioOutputAllowed() {
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
-    // don't use PermissionCache; this is not a system permission
-    bool ok = checkCallingPermission(sCaptureAudioOutput);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sCaptureAudioOutput);
     if (!ok) ALOGE("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
     return ok;
 }
 
 bool captureHotwordAllowed() {
     static const String16 sCaptureHotwordAllowed("android.permission.CAPTURE_AUDIO_HOTWORD");
-    bool ok = checkCallingPermission(sCaptureHotwordAllowed);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sCaptureHotwordAllowed);
     if (!ok) ALOGE("android.permission.CAPTURE_AUDIO_HOTWORD");
     return ok;
 }
@@ -53,15 +112,16 @@
 bool settingsAllowed() {
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
-    // don't use PermissionCache; this is not a system permission
-    bool ok = checkCallingPermission(sAudioSettings);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sAudioSettings);
     if (!ok) ALOGE("Request requires android.permission.MODIFY_AUDIO_SETTINGS");
     return ok;
 }
 
 bool modifyAudioRoutingAllowed() {
     static const String16 sModifyAudioRoutingAllowed("android.permission.MODIFY_AUDIO_ROUTING");
-    bool ok = checkCallingPermission(sModifyAudioRoutingAllowed);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sModifyAudioRoutingAllowed);
     if (!ok) ALOGE("android.permission.MODIFY_AUDIO_ROUTING");
     return ok;
 }
@@ -69,7 +129,7 @@
 bool dumpAllowed() {
     // don't optimize for same pid, since mediaserver never dumps itself
     static const String16 sDump("android.permission.DUMP");
-    // OK to use PermissionCache; this is a system permission
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
     bool ok = PermissionCache::checkCallingPermission(sDump);
     // convention is for caller to dump an error message to fd instead of logging here
     //if (!ok) ALOGE("Request requires android.permission.DUMP");
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
index df6f6f4..fba6dce 100644
--- a/services/audioflinger/ServiceUtilities.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -20,11 +20,10 @@
 
 extern pid_t getpid_cached;
 
-bool recordingAllowed();
+bool recordingAllowed(const String16& opPackageName);
 bool captureAudioOutputAllowed();
 bool captureHotwordAllowed();
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
 bool dumpAllowed();
-
 }
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 4039564..2c4d801 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -500,6 +500,7 @@
         // mName will be set by concrete (non-virtual) subclass
         mDeathRecipient(new PMDeathRecipient(this))
 {
+    memset(&mPatch, 0, sizeof(struct audio_patch));
 }
 
 AudioFlinger::ThreadBase::~ThreadBase()
@@ -584,16 +585,16 @@
     return status;
 }
 
-void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param)
+void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event event)
 {
     Mutex::Autolock _l(mLock);
-    sendIoConfigEvent_l(event, param);
+    sendIoConfigEvent_l(event);
 }
 
 // sendIoConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param)
+void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event)
 {
-    sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, param);
+    sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event);
     sendConfigEvent_l(configEvent);
 }
 
@@ -657,7 +658,7 @@
         } break;
         case CFG_EVENT_IO: {
             IoConfigEventData *data = (IoConfigEventData *)event->mData.get();
-            audioConfigChanged(data->mEvent, data->mParam);
+            ioConfigChanged(data->mEvent);
         } break;
         case CFG_EVENT_SET_PARAMETER: {
             SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get();
@@ -1921,32 +1922,29 @@
     return out_s8;
 }
 
-void AudioFlinger::PlaybackThread::audioConfigChanged(int event, int param) {
-    AudioSystem::OutputDescriptor desc;
-    void *param2 = NULL;
+void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event) {
+    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
+    ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
 
-    ALOGV("PlaybackThread::audioConfigChanged, thread %p, event %d, param %d", this, event,
-            param);
+    desc->mIoHandle = mId;
 
     switch (event) {
-    case AudioSystem::OUTPUT_OPENED:
-    case AudioSystem::OUTPUT_CONFIG_CHANGED:
-        desc.channelMask = mChannelMask;
-        desc.samplingRate = mSampleRate;
-        desc.format = mFormat;
-        desc.frameCount = mNormalFrameCount; // FIXME see
+    case AUDIO_OUTPUT_OPENED:
+    case AUDIO_OUTPUT_CONFIG_CHANGED:
+        desc->mPatch = mPatch;
+        desc->mChannelMask = mChannelMask;
+        desc->mSamplingRate = mSampleRate;
+        desc->mFormat = mFormat;
+        desc->mFrameCount = mNormalFrameCount; // FIXME see
                                              // AudioFlinger::frameCount(audio_io_handle_t)
-        desc.latency = latency_l();
-        param2 = &desc;
+        desc->mLatency = latency_l();
         break;
 
-    case AudioSystem::STREAM_CONFIG_CHANGED:
-        param2 = &param;
-    case AudioSystem::OUTPUT_CLOSED:
+    case AUDIO_OUTPUT_CLOSED:
     default:
         break;
     }
-    mAudioFlinger->audioConfigChanged(event, mId, param2);
+    mAudioFlinger->ioConfigChanged(event, desc);
 }
 
 void AudioFlinger::PlaybackThread::writeCallback()
@@ -2055,6 +2053,9 @@
             ALOGW("direct output implements resume but not pause");
         }
     }
+    if (!mHwSupportsPause && mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
+        LOG_ALWAYS_FATAL("HW_AV_SYNC requested but HAL does not implement pause and resume");
+    }
 
     if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) {
         // For best precision, we use float instead of the associated output
@@ -2933,21 +2934,79 @@
     return INVALID_OPERATION;
 }
 
+status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
+                                                          audio_patch_handle_t *handle)
+{
+    // if !&IDLE, holds the FastMixer state to restore after new parameters processed
+    FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
+    if (mFastMixer != 0) {
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        if (!(state->mCommand & FastMixerState::IDLE)) {
+            previousCommand = state->mCommand;
+            state->mCommand = FastMixerState::HOT_IDLE;
+            sq->end();
+            sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
+        } else {
+            sq->end(false /*didModify*/);
+        }
+    }
+    status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
+
+    if (!(previousCommand & FastMixerState::IDLE)) {
+        ALOG_ASSERT(mFastMixer != 0);
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
+        state->mCommand = previousCommand;
+        sq->end();
+        sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+    }
+
+    return status;
+}
+
 status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,
                                                           audio_patch_handle_t *handle)
 {
     status_t status = NO_ERROR;
-    if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        // store new device and send to effects
-        audio_devices_t type = AUDIO_DEVICE_NONE;
-        for (unsigned int i = 0; i < patch->num_sinks; i++) {
-            type |= patch->sinks[i].ext.device.type;
-        }
-        mOutDevice = type;
-        for (size_t i = 0; i < mEffectChains.size(); i++) {
-            mEffectChains[i]->setDevice_l(mOutDevice);
+
+    // store new device and send to effects
+    audio_devices_t type = AUDIO_DEVICE_NONE;
+    for (unsigned int i = 0; i < patch->num_sinks; i++) {
+        type |= patch->sinks[i].ext.device.type;
+    }
+
+#ifdef ADD_BATTERY_DATA
+    // when changing the audio output device, call addBatteryData to notify
+    // the change
+    if (mOutDevice != type) {
+        uint32_t params = 0;
+        // check whether speaker is on
+        if (type & AUDIO_DEVICE_OUT_SPEAKER) {
+            params |= IMediaPlayerService::kBatteryDataSpeakerOn;
         }
 
+        audio_devices_t deviceWithoutSpeaker
+            = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER;
+        // check if any other device (except speaker) is on
+        if (type & deviceWithoutSpeaker) {
+            params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
+        }
+
+        if (params != 0) {
+            addBatteryData(params);
+        }
+    }
+#endif
+
+    for (size_t i = 0; i < mEffectChains.size(); i++) {
+        mEffectChains[i]->setDevice_l(type);
+    }
+    mOutDevice = type;
+    mPatch = *patch;
+
+    if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
         status = hwDevice->create_audio_patch(hwDevice,
                                                patch->num_sources,
@@ -2956,19 +3015,72 @@
                                                patch->sinks,
                                                handle);
     } else {
-        ALOG_ASSERT(false, "createAudioPatch_l() called on a pre 3.0 HAL");
+        char *address;
+        if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
+            //FIXME: we only support address on first sink with HAL version < 3.0
+            address = audio_device_address_to_parameter(
+                                                        patch->sinks[0].ext.device.type,
+                                                        patch->sinks[0].ext.device.address);
+        } else {
+            address = (char *)calloc(1, 1);
+        }
+        AudioParameter param = AudioParameter(String8(address));
+        free(address);
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
+        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+                param.toString().string());
+        *handle = AUDIO_PATCH_HANDLE_NONE;
     }
+    sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
+    return status;
+}
+
+status_t AudioFlinger::MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
+{
+    // if !&IDLE, holds the FastMixer state to restore after new parameters processed
+    FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
+    if (mFastMixer != 0) {
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        if (!(state->mCommand & FastMixerState::IDLE)) {
+            previousCommand = state->mCommand;
+            state->mCommand = FastMixerState::HOT_IDLE;
+            sq->end();
+            sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
+        } else {
+            sq->end(false /*didModify*/);
+        }
+    }
+
+    status_t status = PlaybackThread::releaseAudioPatch_l(handle);
+
+    if (!(previousCommand & FastMixerState::IDLE)) {
+        ALOG_ASSERT(mFastMixer != 0);
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
+        state->mCommand = previousCommand;
+        sq->end();
+        sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+    }
+
     return status;
 }
 
 status_t AudioFlinger::PlaybackThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
 {
     status_t status = NO_ERROR;
+
+    mOutDevice = AUDIO_DEVICE_NONE;
+
     if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
         status = hwDevice->release_audio_patch(hwDevice, handle);
     } else {
-        ALOG_ASSERT(false, "releaseAudioPatch_l() called on a pre 3.0 HAL");
+        AudioParameter param;
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
+        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+                param.toString().string());
     }
     return status;
 }
@@ -4052,7 +4164,7 @@
             audio_devices_t deviceWithoutSpeaker
                 = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER;
             // check if any other device (except speaker) is on
-            if (value & deviceWithoutSpeaker ) {
+            if (value & deviceWithoutSpeaker) {
                 params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
             }
 
@@ -4094,7 +4206,7 @@
                 }
                 mTracks[i]->mName = name;
             }
-            sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
+            sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
         }
     }
 
@@ -4266,9 +4378,9 @@
         sp<Track> l = mLatestActiveTrack.promote();
         bool last = l.get() == track;
 
-        if (mHwSupportsPause && track->isPausing()) {
+        if (track->isPausing()) {
             track->setPaused();
-            if (last && !mHwPaused) {
+            if (mHwSupportsPause && last && !mHwPaused) {
                 doHwPause = true;
                 mHwPaused = true;
             }
@@ -4278,13 +4390,11 @@
             if (last) {
                 flushPending = true;
             }
-        } else if (mHwSupportsPause && track->isResumePending()){
+        } else if (track->isResumePending()) {
             track->resumeAck();
-            if (last) {
-                if (mHwPaused) {
-                    doHwResume = true;
-                    mHwPaused = false;
-                }
+            if (last && mHwPaused) {
+                doHwResume = true;
+                mHwPaused = false;
             }
         }
 
@@ -4546,7 +4656,7 @@
         }
         if (status == NO_ERROR && reconfig) {
             readOutputParameters_l();
-            sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
+            sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
         }
     }
 
@@ -6592,7 +6702,7 @@
             }
             if (status == NO_ERROR) {
                 readInputParameters_l();
-                sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED);
+                sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
             }
         }
     }
@@ -6613,26 +6723,27 @@
     return out_s8;
 }
 
-void AudioFlinger::RecordThread::audioConfigChanged(int event, int param __unused) {
-    AudioSystem::OutputDescriptor desc;
-    const void *param2 = NULL;
+void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event) {
+    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
+
+    desc->mIoHandle = mId;
 
     switch (event) {
-    case AudioSystem::INPUT_OPENED:
-    case AudioSystem::INPUT_CONFIG_CHANGED:
-        desc.channelMask = mChannelMask;
-        desc.samplingRate = mSampleRate;
-        desc.format = mFormat;
-        desc.frameCount = mFrameCount;
-        desc.latency = 0;
-        param2 = &desc;
+    case AUDIO_INPUT_OPENED:
+    case AUDIO_INPUT_CONFIG_CHANGED:
+        desc->mPatch = mPatch;
+        desc->mChannelMask = mChannelMask;
+        desc->mSamplingRate = mSampleRate;
+        desc->mFormat = mFormat;
+        desc->mFrameCount = mFrameCount;
+        desc->mLatency = 0;
         break;
 
-    case AudioSystem::INPUT_CLOSED:
+    case AUDIO_INPUT_CLOSED:
     default:
         break;
     }
-    mAudioFlinger->audioConfigChanged(event, mId, param2);
+    mAudioFlinger->ioConfigChanged(event, desc);
 }
 
 void AudioFlinger::RecordThread::readInputParameters_l()
@@ -6775,33 +6886,35 @@
                                                           audio_patch_handle_t *handle)
 {
     status_t status = NO_ERROR;
-    if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        // store new device and send to effects
-        mInDevice = patch->sources[0].ext.device.type;
+
+    // store new device and send to effects
+    mInDevice = patch->sources[0].ext.device.type;
+    mPatch = *patch;
+    for (size_t i = 0; i < mEffectChains.size(); i++) {
+        mEffectChains[i]->setDevice_l(mInDevice);
+    }
+
+    // disable AEC and NS if the device is a BT SCO headset supporting those
+    // pre processings
+    if (mTracks.size() > 0) {
+        bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
+                            mAudioFlinger->btNrecIsOff();
+        for (size_t i = 0; i < mTracks.size(); i++) {
+            sp<RecordTrack> track = mTracks[i];
+            setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId());
+            setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId());
+        }
+    }
+
+    // store new source and send to effects
+    if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
+        mAudioSource = patch->sinks[0].ext.mix.usecase.source;
         for (size_t i = 0; i < mEffectChains.size(); i++) {
-            mEffectChains[i]->setDevice_l(mInDevice);
+            mEffectChains[i]->setAudioSource_l(mAudioSource);
         }
+    }
 
-        // disable AEC and NS if the device is a BT SCO headset supporting those
-        // pre processings
-        if (mTracks.size() > 0) {
-            bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
-                                mAudioFlinger->btNrecIsOff();
-            for (size_t i = 0; i < mTracks.size(); i++) {
-                sp<RecordTrack> track = mTracks[i];
-                setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId());
-                setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId());
-            }
-        }
-
-        // store new source and send to effects
-        if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
-            mAudioSource = patch->sinks[0].ext.mix.usecase.source;
-            for (size_t i = 0; i < mEffectChains.size(); i++) {
-                mEffectChains[i]->setAudioSource_l(mAudioSource);
-            }
-        }
-
+    if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
         status = hwDevice->create_audio_patch(hwDevice,
                                                patch->num_sources,
@@ -6810,19 +6923,44 @@
                                                patch->sinks,
                                                handle);
     } else {
-        ALOG_ASSERT(false, "createAudioPatch_l() called on a pre 3.0 HAL");
+        char *address;
+        if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
+            address = audio_device_address_to_parameter(
+                                                patch->sources[0].ext.device.type,
+                                                patch->sources[0].ext.device.address);
+        } else {
+            address = (char *)calloc(1, 1);
+        }
+        AudioParameter param = AudioParameter(String8(address));
+        free(address);
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING),
+                     (int)patch->sources[0].ext.device.type);
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE),
+                                         (int)patch->sinks[0].ext.mix.usecase.source);
+        status = mInput->stream->common.set_parameters(&mInput->stream->common,
+                param.toString().string());
+        *handle = AUDIO_PATCH_HANDLE_NONE;
     }
+
+    sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
+
     return status;
 }
 
 status_t AudioFlinger::RecordThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
 {
     status_t status = NO_ERROR;
+
+    mInDevice = AUDIO_DEVICE_NONE;
+
     if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
         status = hwDevice->release_audio_patch(hwDevice, handle);
     } else {
-        ALOG_ASSERT(false, "releaseAudioPatch_l() called on a pre 3.0 HAL");
+        AudioParameter param;
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
+        status = mInput->stream->common.set_parameters(&mInput->stream->common,
+                param.toString().string());
     }
     return status;
 }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index b7c1ed1..0a5597f 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -100,22 +100,21 @@
 
     class IoConfigEventData : public ConfigEventData {
     public:
-        IoConfigEventData(int event, int param) :
-            mEvent(event), mParam(param) {}
+        IoConfigEventData(audio_io_config_event event) :
+            mEvent(event) {}
 
         virtual  void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "IO event: event %d, param %d\n", mEvent, mParam);
+            snprintf(buffer, size, "IO event: event %d\n", mEvent);
         }
 
-        const int mEvent;
-        const int mParam;
+        const audio_io_config_event mEvent;
     };
 
     class IoConfigEvent : public ConfigEvent {
     public:
-        IoConfigEvent(int event, int param) :
+        IoConfigEvent(audio_io_config_event event) :
             ConfigEvent(CFG_EVENT_IO) {
-            mData = new IoConfigEventData(event, param);
+            mData = new IoConfigEventData(event);
         }
         virtual ~IoConfigEvent() {}
     };
@@ -250,13 +249,13 @@
                                                     status_t& status) = 0;
     virtual     status_t    setParameters(const String8& keyValuePairs);
     virtual     String8     getParameters(const String8& keys) = 0;
-    virtual     void        audioConfigChanged(int event, int param = 0) = 0;
+    virtual     void        ioConfigChanged(audio_io_config_event event) = 0;
                 // sendConfigEvent_l() must be called with ThreadBase::mLock held
                 // Can temporarily release the lock if waiting for a reply from
                 // processConfigEvents_l().
                 status_t    sendConfigEvent_l(sp<ConfigEvent>& event);
-                void        sendIoConfigEvent(int event, int param = 0);
-                void        sendIoConfigEvent_l(int event, int param = 0);
+                void        sendIoConfigEvent(audio_io_config_event event);
+                void        sendIoConfigEvent_l(audio_io_config_event event);
                 void        sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
                 status_t    sendSetParameterConfigEvent_l(const String8& keyValuePair);
                 status_t    sendCreateAudioPatchConfigEvent(const struct audio_patch *patch,
@@ -427,6 +426,7 @@
                 bool                    mStandby;     // Whether thread is currently in standby.
                 audio_devices_t         mOutDevice;   // output device
                 audio_devices_t         mInDevice;    // input device
+                struct audio_patch      mPatch;
                 audio_source_t          mAudioSource;
 
                 const audio_io_handle_t mId;
@@ -560,7 +560,7 @@
                                 { return android_atomic_acquire_load(&mSuspended) > 0; }
 
     virtual     String8     getParameters(const String8& keys);
-    virtual     void        audioConfigChanged(int event, int param = 0);
+    virtual     void        ioConfigChanged(audio_io_config_event event);
                 status_t    getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
                 // FIXME rename mixBuffer() to sinkBuffer() and remove int16_t* dependency.
                 // Consider also removing and passing an explicit mMainBuffer initialization
@@ -713,8 +713,9 @@
                                    audio_patch_handle_t *handle);
     virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
 
-                bool        usesHwAvSync() const { return (mType == DIRECT) && (mOutput != NULL) &&
-                                                (mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC); }
+                bool        usesHwAvSync() const { return (mType == DIRECT) && (mOutput != NULL)
+                                    && mHwSupportsPause
+                                    && (mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC); }
 
 private:
 
@@ -865,6 +866,10 @@
     virtual     void        threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
     virtual     uint32_t    correctLatency_l(uint32_t latency) const;
 
+    virtual     status_t    createAudioPatch_l(const struct audio_patch *patch,
+                                   audio_patch_handle_t *handle);
+    virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
+
                 AudioMixer* mAudioMixer;    // normal mixer
 private:
                 // one-time initialization, no locks required
@@ -1226,7 +1231,7 @@
                                                status_t& status);
     virtual void        cacheParameters_l() {}
     virtual String8     getParameters(const String8& keys);
-    virtual void        audioConfigChanged(int event, int param = 0);
+    virtual void        ioConfigChanged(audio_io_config_event event);
     virtual status_t    createAudioPatch_l(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle);
     virtual status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index c6e9745..1b03060 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -404,8 +404,7 @@
     mIsInvalid(false),
     mAudioTrackServerProxy(NULL),
     mResumeToStopping(false),
-    mFlushHwPending(false),
-    mPreviousTimestampValid(false)
+    mFlushHwPending(false)
 {
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -863,7 +862,6 @@
         if (mState == FLUSHED) {
             mState = IDLE;
         }
-        mPreviousTimestampValid = false;
     }
 }
 
@@ -885,12 +883,10 @@
 {
     // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
     if (isFastTrack()) {
-        // FIXME no lock held to set mPreviousTimestampValid = false
         return INVALID_OPERATION;
     }
     sp<ThreadBase> thread = mThread.promote();
     if (thread == 0) {
-        // FIXME no lock held to set mPreviousTimestampValid = false
         return INVALID_OPERATION;
     }
 
@@ -900,7 +896,6 @@
     status_t result = INVALID_OPERATION;
     if (!isOffloaded() && !isDirect()) {
         if (!playbackThread->mLatchQValid) {
-            mPreviousTimestampValid = false;
             return INVALID_OPERATION;
         }
         // FIXME Not accurate under dynamic changes of sample rate and speed.
@@ -919,10 +914,7 @@
         uint32_t framesWritten = i >= 0 ?
                 playbackThread->mLatchQ.mFramesReleased[i] :
                 mAudioTrackServerProxy->framesReleased();
-        if (framesWritten < unpresentedFrames) {
-            mPreviousTimestampValid = false;
-            // return invalid result
-        } else {
+        if (framesWritten >= unpresentedFrames) {
             timestamp.mPosition = framesWritten - unpresentedFrames;
             timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
             result = NO_ERROR;
@@ -931,41 +923,6 @@
         result = playbackThread->getTimestamp_l(timestamp);
     }
 
-    // Prevent retrograde motion in timestamp.
-    if (result == NO_ERROR) {
-        if (mPreviousTimestampValid) {
-            if (timestamp.mTime.tv_sec < mPreviousTimestamp.mTime.tv_sec ||
-                    (timestamp.mTime.tv_sec == mPreviousTimestamp.mTime.tv_sec &&
-                    timestamp.mTime.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) {
-                ALOGW("WARNING - retrograde timestamp time");
-                // FIXME Consider blocking this from propagating upwards.
-            }
-
-            // Looking at signed delta will work even when the timestamps
-            // are wrapping around.
-            int32_t deltaPosition = static_cast<int32_t>(timestamp.mPosition
-                    - mPreviousTimestamp.mPosition);
-            // position can bobble slightly as an artifact; this hides the bobble
-            static const int32_t MINIMUM_POSITION_DELTA = 8;
-            if (deltaPosition < 0) {
-#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * 1000000000 + time.tv_nsec)
-                ALOGW("WARNING - retrograde timestamp position corrected,"
-                        " %d = %u - %u, (at %llu, %llu nanos)",
-                        deltaPosition,
-                        timestamp.mPosition,
-                        mPreviousTimestamp.mPosition,
-                        TIME_TO_NANOS(timestamp.mTime),
-                        TIME_TO_NANOS(mPreviousTimestamp.mTime));
-#undef TIME_TO_NANOS
-            }
-            if (deltaPosition < MINIMUM_POSITION_DELTA) {
-                // Current timestamp is bad. Use last valid timestamp.
-                timestamp = mPreviousTimestamp;
-            }
-        }
-        mPreviousTimestamp = timestamp;
-        mPreviousTimestampValid = true;
-    }
     return result;
 }
 
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 58c65fa..8523fc5 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -106,6 +106,7 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
@@ -129,10 +130,12 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
                                      audio_input_flags_t flags,
+                                     audio_port_handle_t selectedDeviceId,
                                      input_type_t *inputType) = 0;
     // indicates to the audio policy manager that the input starts being used.
     virtual status_t startInput(audio_io_handle_t input,
@@ -208,7 +211,7 @@
                                       struct audio_patch *patches,
                                       unsigned int *generation) = 0;
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
-    virtual void clearAudioPatches(uid_t uid) = 0;
+    virtual void releaseResourcesForUid(uid_t uid) = 0;
 
     virtual status_t acquireSoundTriggerSession(audio_session_t *session,
                                            audio_io_handle_t *ioHandle,
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index a2327ee..e6a767f 100755
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -60,7 +60,7 @@
  *
  * @return true if the device is a virtual one, false otherwise.
  */
-static bool is_virtual_input_device(audio_devices_t device)
+static inline bool is_virtual_input_device(audio_devices_t device)
 {
     if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
         device &= ~AUDIO_DEVICE_BIT_IN;
@@ -78,7 +78,7 @@
  *
  * @return true if the device needs distinguish on address, false otherwise..
  */
-static bool device_distinguishes_on_address(audio_devices_t device)
+static inline bool device_distinguishes_on_address(audio_devices_t device)
 {
     return ((device & APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL & ~AUDIO_DEVICE_BIT_IN) != 0);
 }
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index aa37eec..d1a2f4f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -29,7 +29,7 @@
 class DeviceDescriptor : public AudioPort, public AudioPortConfig
 {
 public:
-    DeviceDescriptor(const String8& name, audio_devices_t type);
+    DeviceDescriptor(audio_devices_t type);
 
     virtual ~DeviceDescriptor() {}
 
@@ -50,10 +50,9 @@
     status_t dump(int fd, int spaces, int index) const;
     void log() const;
 
+    String8 mTag;
     String8 mAddress;
 
-    static String8  emptyNameStr;
-
 private:
     audio_devices_t     mDeviceType;
     audio_port_handle_t mId;
@@ -73,12 +72,12 @@
     audio_devices_t types() const { return mDeviceTypes; }
 
     void loadDevicesFromType(audio_devices_t types);
-    void loadDevicesFromName(char *name, const DeviceVector& declaredDevices);
+    void loadDevicesFromTag(char *tag, const DeviceVector& declaredDevices);
 
     sp<DeviceDescriptor> getDevice(audio_devices_t type, String8 address) const;
     DeviceVector getDevicesFromType(audio_devices_t types) const;
     sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
-    sp<DeviceDescriptor> getDeviceFromName(const String8& name) const;
+    sp<DeviceDescriptor> getDeviceFromTag(const String8& tag) const;
     DeviceVector getDevicesFromTypeAddr(audio_devices_t type, String8 address) const;
 
     audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 144d8ad..a278375 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -315,13 +315,15 @@
         mGlobalRefCount += delta;
     }
     if ((oldGlobalRefCount == 0) && (mGlobalRefCount > 0)) {
-        if ((mPolicyMix != NULL) && ((mPolicyMix->mFlags & MIX_FLAG_NOTIFY_ACTIVITY) != 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
             mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
                     MIX_STATE_MIXING);
         }
 
     } else if ((oldGlobalRefCount > 0) && (mGlobalRefCount == 0)) {
-        if ((mPolicyMix != NULL) && ((mPolicyMix->mFlags & MIX_FLAG_NOTIFY_ACTIVITY) != 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
             mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
                     MIX_STATE_IDLE);
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 77fc0b9..6f1998c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -176,14 +176,14 @@
 
     ssize_t index = indexOfKey(address);
     if (index < 0) {
-        ALOGW("getInputForAttr() no policy for address %s", address.string());
+        ALOGW("getInputMixForAttr() no policy for address %s", address.string());
         return BAD_VALUE;
     }
     sp<AudioPolicyMix> audioPolicyMix = valueAt(index);
     AudioMix *mix = audioPolicyMix->getMix();
 
     if (mix->mMixType != MIX_TYPE_PLAYERS) {
-        ALOGW("getInputForAttr() bad policy mix type for address %s", address.string());
+        ALOGW("getInputMixForAttr() bad policy mix type for address %s", address.string());
         return BAD_VALUE;
     }
     *policyMix = mix;
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index 9ab1d61..89ef045 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -218,7 +218,7 @@
     node = node->first_child;
     while (node) {
         if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
-            availableOutputDevices.loadDevicesFromName((char *)node->value,
+            availableOutputDevices.loadDevicesFromTag((char *)node->value,
                                                         declaredDevices);
             ALOGV("loadGlobalConfig() Attached Output Devices %08x",
                   availableOutputDevices.types());
@@ -228,13 +228,13 @@
                     ARRAY_SIZE(sDeviceTypeToEnumTable),
                     (char *)node->value);
             if (device != AUDIO_DEVICE_NONE) {
-                defaultOutputDevice = new DeviceDescriptor(String8("default-output"), device);
+                defaultOutputDevice = new DeviceDescriptor(device);
             } else {
                 ALOGW("loadGlobalConfig() default device not specified");
             }
             ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", defaultOutputDevice->type());
         } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
-            availableInputDevices.loadDevicesFromName((char *)node->value,
+            availableInputDevices.loadDevicesFromTag((char *)node->value,
                                                        declaredDevices);
             ALOGV("loadGlobalConfig() Available InputDevices %08x", availableInputDevices.types());
         } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 0715eea..797077a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -24,13 +24,11 @@
 
 namespace android {
 
-String8 DeviceDescriptor::emptyNameStr = String8("");
-
-DeviceDescriptor::DeviceDescriptor(const String8& name, audio_devices_t type) :
-    AudioPort(name, AUDIO_PORT_TYPE_DEVICE,
+DeviceDescriptor::DeviceDescriptor(audio_devices_t type) :
+    AudioPort(String8(""), AUDIO_PORT_TYPE_DEVICE,
               audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
                                              AUDIO_PORT_ROLE_SOURCE),
-    mAddress(""), mDeviceType(type), mId(0)
+    mTag(""), mAddress(""), mDeviceType(type), mId(0)
 {
 
 }
@@ -142,24 +140,21 @@
         uint32_t i = 31 - __builtin_clz(types);
         uint32_t type = 1 << i;
         types &= ~type;
-        add(new DeviceDescriptor(String8("device_type"), type | role_bit));
+        add(new DeviceDescriptor(type | role_bit));
     }
 }
 
-void DeviceVector::loadDevicesFromName(char *name,
+void DeviceVector::loadDevicesFromTag(char *tag,
                                        const DeviceVector& declaredDevices)
 {
-    char *devName = strtok(name, "|");
-    while (devName != NULL) {
-        if (strlen(devName) != 0) {
+    char *devTag = strtok(tag, "|");
+    while (devTag != NULL) {
+        if (strlen(devTag) != 0) {
             audio_devices_t type = ConfigParsingUtils::stringToEnum(sDeviceTypeToEnumTable,
                                  ARRAY_SIZE(sDeviceTypeToEnumTable),
-                                 devName);
+                                 devTag);
             if (type != AUDIO_DEVICE_NONE) {
-                devName = (char *)ConfigParsingUtils::enumToString(sDeviceNameToEnumTable,
-                                                           ARRAY_SIZE(sDeviceNameToEnumTable),
-                                                           type);
-                sp<DeviceDescriptor> dev = new DeviceDescriptor(String8(devName), type);
+                sp<DeviceDescriptor> dev = new DeviceDescriptor(type);
                 if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX ||
                         type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
                     dev->mAddress = String8("0");
@@ -167,13 +162,13 @@
                 add(dev);
             } else {
                 sp<DeviceDescriptor> deviceDesc =
-                        declaredDevices.getDeviceFromName(String8(devName));
+                        declaredDevices.getDeviceFromTag(String8(devTag));
                 if (deviceDesc != 0) {
                     add(deviceDesc);
                 }
             }
          }
-         devName = strtok(NULL, "|");
+         devTag = strtok(NULL, "|");
      }
 }
 
@@ -239,11 +234,11 @@
     return devices;
 }
 
-sp<DeviceDescriptor> DeviceVector::getDeviceFromName(const String8& name) const
+sp<DeviceDescriptor> DeviceVector::getDeviceFromTag(const String8& tag) const
 {
     sp<DeviceDescriptor> device;
     for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mName == name) {
+        if (itemAt(i)->mTag == tag) {
             device = itemAt(i);
             break;
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index e955447..7e2050b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -58,7 +58,7 @@
         } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
             profile->loadInChannels((char *)node->value);
         } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices.loadDevicesFromName((char *)node->value,
+            profile->mSupportedDevices.loadDevicesFromTag((char *)node->value,
                                                            mDeclaredDevices);
         } else if (strcmp(node->name, FLAGS_TAG) == 0) {
             profile->mFlags = ConfigParsingUtils::parseInputFlagNames((char *)node->value);
@@ -105,7 +105,7 @@
         } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
             profile->loadOutChannels((char *)node->value);
         } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices.loadDevicesFromName((char *)node->value,
+            profile->mSupportedDevices.loadDevicesFromTag((char *)node->value,
                                                            mDeclaredDevices);
         } else if (strcmp(node->name, FLAGS_TAG) == 0) {
             profile->mFlags = ConfigParsingUtils::parseOutputFlagNames((char *)node->value);
@@ -154,7 +154,8 @@
         ALOGW("loadDevice() bad type %08x", type);
         return BAD_VALUE;
     }
-    sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(String8(root->name), type);
+    sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(type);
+    deviceDesc->mTag = String8(root->name);
 
     node = root->first_child;
     while (node) {
@@ -172,8 +173,8 @@
         node = node->next;
     }
 
-    ALOGV("loadDevice() adding device name %s type %08x address %s",
-          deviceDesc->mName.string(), type, deviceDesc->mAddress.string());
+    ALOGV("loadDevice() adding device tag %s type %08x address %s",
+          deviceDesc->mTag.string(), type, deviceDesc->mAddress.string());
 
     mDeclaredDevices.add(deviceDesc);
 
@@ -189,7 +190,7 @@
     profile->mChannelMasks.add(config->channel_mask);
     profile->mFormats.add(config->format);
 
-    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(name, device);
+    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
     devDesc->mAddress = address;
     profile->mSupportedDevices.add(devDesc);
 
@@ -220,7 +221,7 @@
     profile->mChannelMasks.add(config->channel_mask);
     profile->mFormats.add(config->format);
 
-    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(name, device);
+    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
     devDesc->mAddress = address;
     profile->mSupportedDevices.add(devDesc);
 
@@ -350,7 +351,8 @@
     }
 
     sp<DeviceDescriptor> devDesc =
-            new DeviceDescriptor(String8(device_name != NULL ? device_name : ""), device);
+            new DeviceDescriptor(device);
+    devDesc->mName = device_name;
     devDesc->mAddress = address;
     return devDesc;
 }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index ba3fcaf..0c02d93 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -620,6 +620,7 @@
                                               audio_io_handle_t *output,
                                               audio_session_t session,
                                               audio_stream_type_t *stream,
+                                              uid_t uid,
                                               uint32_t samplingRate,
                                               audio_format_t format,
                                               audio_channel_mask_t channelMask,
@@ -659,8 +660,22 @@
         return BAD_VALUE;
     }
 
-    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x",
-          attributes.usage, attributes.content_type, attributes.tags, attributes.flags);
+    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
+            " session %d selectedDeviceId %d",
+            attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
+            session, selectedDeviceId);
+
+    *stream = streamTypefromAttributesInt(&attributes);
+
+    // Explicit routing?
+    sp<DeviceDescriptor> deviceDesc;
+    for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
+        if (mAvailableOutputDevices[i]->getId() == selectedDeviceId) {
+            deviceDesc = mAvailableOutputDevices[i];
+            break;
+        }
+    }
+    mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
 
     routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
     audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
@@ -672,24 +687,14 @@
     ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
           device, samplingRate, format, channelMask, flags);
 
-    *stream = streamTypefromAttributesInt(&attributes);
     *output = getOutputForDevice(device, session, *stream,
                                  samplingRate, format, channelMask,
                                  flags, offloadInfo);
     if (*output == AUDIO_IO_HANDLE_NONE) {
+        mOutputRoutes.removeRoute(session);
         return INVALID_OPERATION;
     }
 
-    // Explicit routing?
-    sp<DeviceDescriptor> deviceDesc;
-
-    for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
-        if (mAvailableOutputDevices[i]->getId() == selectedDeviceId) {
-            deviceDesc = mAvailableOutputDevices[i];
-            break;
-        }
-    }
-    mOutputRoutes.addRoute(session, *stream, deviceDesc);
     return NO_ERROR;
 }
 
@@ -966,24 +971,26 @@
 
     sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
+    // Routing?
+    mOutputRoutes.incRouteActivity(session);
+
     audio_devices_t newDevice;
     if (outputDesc->mPolicyMix != NULL) {
         newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
     } else if (mOutputRoutes.hasRouteChanged(session)) {
         newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
+        checkStrategyRoute(getStrategy(stream), output);
     } else {
         newDevice = AUDIO_DEVICE_NONE;
     }
 
     uint32_t delayMs = 0;
 
-    // Routing?
-    mOutputRoutes.incRouteActivity(session);
-
     status_t status = startSource(outputDesc, stream, newDevice, &delayMs);
 
     if (status != NO_ERROR) {
         mOutputRoutes.decRouteActivity(session);
+        return status;
     }
     // Automatically enable the remote submix input when output is started on a re routing mix
     // of type MIX_TYPE_RECORDERS
@@ -1112,15 +1119,22 @@
     }
 
     // Routing?
+    bool forceDeviceUpdate = false;
     if (outputDesc->mRefCount[stream] > 0) {
-        mOutputRoutes.decRouteActivity(session);
+        int activityCount = mOutputRoutes.decRouteActivity(session);
+        forceDeviceUpdate = (mOutputRoutes.hasRoute(session) && (activityCount == 0));
+
+        if (forceDeviceUpdate) {
+            checkStrategyRoute(getStrategy(stream), AUDIO_IO_HANDLE_NONE);
+        }
     }
 
-    return stopSource(outputDesc, stream);
+    return stopSource(outputDesc, stream, forceDeviceUpdate);
 }
 
 status_t AudioPolicyManager::stopSource(sp<AudioOutputDescriptor> outputDesc,
-                                            audio_stream_type_t stream)
+                                            audio_stream_type_t stream,
+                                            bool forceDeviceUpdate)
 {
     // always handle stream stop, check which stream type is stopping
     handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
@@ -1135,7 +1149,7 @@
         outputDesc->changeRefCount(stream, -1);
 
         // store time at which the stream was stopped - see isStreamActive()
-        if (outputDesc->mRefCount[stream] == 0) {
+        if (outputDesc->mRefCount[stream] == 0 || forceDeviceUpdate) {
             outputDesc->mStopTime[stream] = systemTime();
             audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
             // delay the device switch by twice the latency because stopOutput() is executed when
@@ -1222,10 +1236,12 @@
 status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *input,
                                              audio_session_t session,
+                                             uid_t uid,
                                              uint32_t samplingRate,
                                              audio_format_t format,
                                              audio_channel_mask_t channelMask,
                                              audio_input_flags_t flags,
+                                             audio_port_handle_t selectedDeviceId,
                                              input_type_t *inputType)
 {
     ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
@@ -1247,6 +1263,16 @@
     }
     halInputSource = inputSource;
 
+    // Explicit routing?
+    sp<DeviceDescriptor> deviceDesc;
+    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+        if (mAvailableInputDevices[i]->getId() == selectedDeviceId) {
+            deviceDesc = mAvailableInputDevices[i];
+            break;
+        }
+    }
+    mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
+
     if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
             strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
         status_t ret = mPolicyMixes.getInputMixForAttr(*attr, &policyMix);
@@ -1378,6 +1404,7 @@
 
     addInput(*input, inputDesc);
     mpClientInterface->onAudioPortListUpdate();
+
     return NO_ERROR;
 }
 
@@ -1419,10 +1446,13 @@
         }
     }
 
-    if (inputDesc->mRefCount == 0) {
+    // Routing?
+    mInputRoutes.incRouteActivity(session);
+
+    if (inputDesc->mRefCount == 0 || mInputRoutes.hasRouteChanged(session)) {
         // if input maps to a dynamic policy with an activity listener, notify of state change
         if ((inputDesc->mPolicyMix != NULL)
-                && ((inputDesc->mPolicyMix->mFlags & MIX_FLAG_NOTIFY_ACTIVITY) != 0)) {
+                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
             mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
                     MIX_STATE_MIXING);
         }
@@ -1479,10 +1509,14 @@
     }
 
     inputDesc->mRefCount--;
+
+    // Routing?
+    mInputRoutes.decRouteActivity(session);
+
     if (inputDesc->mRefCount == 0) {
         // if input maps to a dynamic policy with an activity listener, notify of state change
         if ((inputDesc->mPolicyMix != NULL)
-                && ((inputDesc->mPolicyMix->mFlags & MIX_FLAG_NOTIFY_ACTIVITY) != 0)) {
+                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
             mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
                     MIX_STATE_IDLE);
         }
@@ -1521,6 +1555,10 @@
         ALOGW("releaseInput() releasing unknown input %d", input);
         return;
     }
+
+    // Routing
+    mInputRoutes.removeRoute(session);
+
     sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
     ALOG_ASSERT(inputDesc != 0);
 
@@ -2456,6 +2494,12 @@
     return status;
 }
 
+void AudioPolicyManager::releaseResourcesForUid(uid_t uid)
+{
+    clearAudioPatches(uid);
+    clearSessionRoutes(uid);
+}
+
 void AudioPolicyManager::clearAudioPatches(uid_t uid)
 {
     for (ssize_t i = (ssize_t)mAudioPatches.size() - 1; i >= 0; i--)  {
@@ -2466,6 +2510,82 @@
     }
 }
 
+
+void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy,
+                                            audio_io_handle_t ouptutToSkip)
+{
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+    for (size_t j = 0; j < mOutputs.size(); j++) {
+        if (mOutputs.keyAt(j) == ouptutToSkip) {
+            continue;
+        }
+        sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(j);
+        if (!isStrategyActive(outputDesc, (routing_strategy)strategy)) {
+            continue;
+        }
+        // If the default device for this strategy is on another output mix,
+        // invalidate all tracks in this strategy to force re connection.
+        // Otherwise select new device on the output mix.
+        if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
+            for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+                if (stream == AUDIO_STREAM_PATCH) {
+                    continue;
+                }
+                if (getStrategy((audio_stream_type_t)stream) == strategy) {
+                    mpClientInterface->invalidateStream((audio_stream_type_t)stream);
+                }
+            }
+        } else {
+            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
+            setOutputDevice(outputDesc, newDevice, false);
+        }
+    }
+}
+
+void AudioPolicyManager::clearSessionRoutes(uid_t uid)
+{
+    // remove output routes associated with this uid
+    SortedVector<routing_strategy> affectedStrategies;
+    for (ssize_t i = (ssize_t)mOutputRoutes.size() - 1; i >= 0; i--)  {
+        sp<SessionRoute> route = mOutputRoutes.valueAt(i);
+        if (route->mUid == uid) {
+            mOutputRoutes.removeItemsAt(i);
+            if (route->mDeviceDescriptor != 0) {
+                affectedStrategies.add(getStrategy(route->mStreamType));
+            }
+        }
+    }
+    // reroute outputs if necessary
+    for (size_t i = 0; i < affectedStrategies.size(); i++) {
+        checkStrategyRoute(affectedStrategies[i], AUDIO_IO_HANDLE_NONE);
+    }
+
+    // remove input routes associated with this uid
+    SortedVector<audio_source_t> affectedSources;
+    for (ssize_t i = (ssize_t)mInputRoutes.size() - 1; i >= 0; i--)  {
+        sp<SessionRoute> route = mInputRoutes.valueAt(i);
+        if (route->mUid == uid) {
+            mInputRoutes.removeItemsAt(i);
+            if (route->mDeviceDescriptor != 0) {
+                affectedSources.add(route->mSource);
+            }
+        }
+    }
+    // reroute inputs if necessary
+    SortedVector<audio_io_handle_t> inputsToClose;
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(i);
+        if (affectedSources.indexOf(inputDesc->mInputSource) >= 0) {
+            inputsToClose.add(inputDesc->mIoHandle);
+        }
+    }
+    for (size_t i = 0; i < inputsToClose.size(); i++) {
+        closeInput(inputsToClose[i]);
+    }
+}
+
+
 status_t AudioPolicyManager::acquireSoundTriggerSession(audio_session_t *session,
                                        audio_io_handle_t *ioHandle,
                                        audio_devices_t *device)
@@ -2528,7 +2648,7 @@
     mUidCached = getuid();
     mpClientInterface = clientInterface;
 
-    mDefaultOutputDevice = new DeviceDescriptor(String8("Speaker"), AUDIO_DEVICE_OUT_SPEAKER);
+    mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
     if (ConfigParsingUtils::loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE,
                  mHwModules, mAvailableInputDevices, mAvailableOutputDevices,
                  mDefaultOutputDevice, mSpeakerDrcEnabled) != NO_ERROR) {
@@ -3540,7 +3660,8 @@
     ALOGVV("getOutputsForDevice() device %04x", device);
     for (size_t i = 0; i < openOutputs.size(); i++) {
         ALOGVV("output %d isDuplicated=%d device=%04x",
-                i, openOutputs.valueAt(i)->isDuplicated(), openOutputs.valueAt(i)->supportedDevices());
+                i, openOutputs.valueAt(i)->isDuplicated(),
+                openOutputs.valueAt(i)->supportedDevices());
         if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) {
             ALOGVV("getOutputsForDevice() found output %d", openOutputs.keyAt(i));
             outputs.add(openOutputs.keyAt(i));
@@ -3771,7 +3892,6 @@
 
     audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->mInputSource);
 
-    ALOGV("getNewInputDevice() selected device %x", device);
     return device;
 }
 
@@ -3903,7 +4023,7 @@
     for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) {
         sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
         routing_strategy strat = getStrategy(route->mStreamType);
-        if (strat == strategy && route->mDeviceDescriptor != 0 /*&& route->mActivityCount != 0*/) {
+        if (strat == strategy && route->isActive()) {
             return route->mDeviceDescriptor->type();
         }
     }
@@ -4291,7 +4411,14 @@
 
 audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
 {
-    return mEngine->getDeviceForInputSource(inputSource);
+    for (size_t routeIndex = 0; routeIndex < mInputRoutes.size(); routeIndex++) {
+         sp<SessionRoute> route = mInputRoutes.valueAt(routeIndex);
+         if (inputSource == route->mSource && route->isActive()) {
+             return route->mDeviceDescriptor->type();
+         }
+     }
+
+     return mEngine->getDeviceForInputSource(inputSource);
 }
 
 float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
@@ -4514,8 +4641,8 @@
 
 // --- SessionRoute class implementation
 void AudioPolicyManager::SessionRoute::log(const char* prefix) {
-    ALOGI("%s[SessionRoute strm:0x%X, sess:0x%X, dev:0x%X refs:%d act:%d",
-          prefix, mStreamType, mSession,
+    ALOGI("%s[SessionRoute strm:0x%X, src:%d, sess:0x%X, dev:0x%X refs:%d act:%d",
+          prefix, mStreamType, mSource, mSession,
           mDeviceDescriptor != 0 ? mDeviceDescriptor->type() : AUDIO_DEVICE_NONE,
           mRefCount, mActivityCount);
 }
@@ -4537,28 +4664,6 @@
     return false;
 }
 
-void AudioPolicyManager::SessionRouteMap::addRoute(audio_session_t session,
-                                                   audio_stream_type_t streamType,
-                                                   sp<DeviceDescriptor> deviceDescriptor)
-{
-    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
-    if (route != NULL) {
-        if ((route->mDeviceDescriptor == 0 && deviceDescriptor != 0) ||
-                (!route->mDeviceDescriptor->equals(deviceDescriptor))) {
-            route->mChanged = true;
-        }
-        route->mRefCount++;
-        route->mDeviceDescriptor = deviceDescriptor;
-    } else {
-        route = new AudioPolicyManager::SessionRoute(session, streamType, deviceDescriptor);
-        route->mRefCount++;
-        add(session, route);
-        if (deviceDescriptor != 0) {
-            route->mChanged = true;
-        }
-    }
-}
-
 void AudioPolicyManager::SessionRouteMap::removeRoute(audio_session_t session)
 {
     sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
@@ -4594,12 +4699,46 @@
     }
 }
 
+void AudioPolicyManager::SessionRouteMap::addRoute(audio_session_t session,
+                                                   audio_stream_type_t streamType,
+                                                   audio_source_t source,
+                                                   sp<DeviceDescriptor> descriptor,
+                                                   uid_t uid)
+{
+    if (mMapType == MAPTYPE_INPUT && streamType != SessionRoute::STREAM_TYPE_NA) {
+        ALOGE("Adding Output Route to InputRouteMap");
+        return;
+    } else if (mMapType == MAPTYPE_OUTPUT && source != SessionRoute::SOURCE_TYPE_NA) {
+        ALOGE("Adding Input Route to OutputRouteMap");
+        return;
+    }
+
+    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
+
+    if (route != 0) {
+        if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
+                ((route->mDeviceDescriptor != 0) &&
+                 ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) {
+            route->mChanged = true;
+        }
+        route->mRefCount++;
+        route->mDeviceDescriptor = descriptor;
+    } else {
+        route = new AudioPolicyManager::SessionRoute(session, streamType, source, descriptor, uid);
+        route->mRefCount++;
+        add(session, route);
+        if (descriptor != 0) {
+            route->mChanged = true;
+        }
+    }
+}
+
 void AudioPolicyManager::defaultAudioPolicyConfig(void)
 {
     sp<HwModule> module;
     sp<IOProfile> profile;
     sp<DeviceDescriptor> defaultInputDevice =
-                    new DeviceDescriptor(String8("builtin-mic"), AUDIO_DEVICE_IN_BUILTIN_MIC);
+                    new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
     mAvailableOutputDevices.add(mDefaultOutputDevice);
     mAvailableInputDevices.add(defaultInputDevice);
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 521f6c4..ea16864 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -109,6 +109,7 @@
                                           audio_io_handle_t *output,
                                           audio_session_t session,
                                           audio_stream_type_t *stream,
+                                          uid_t uid,
                                           uint32_t samplingRate,
                                           audio_format_t format,
                                           audio_channel_mask_t channelMask,
@@ -127,10 +128,12 @@
         virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                          audio_io_handle_t *input,
                                          audio_session_t session,
+                                         uid_t uid,
                                          uint32_t samplingRate,
                                          audio_format_t format,
                                          audio_channel_mask_t channelMask,
                                          audio_input_flags_t flags,
+                                         audio_port_handle_t selectedDeviceId,
                                          input_type_t *inputType);
 
         // indicates to the audio policy manager that the input starts being used.
@@ -206,7 +209,6 @@
                                           struct audio_patch *patches,
                                           unsigned int *generation);
         virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-        virtual void clearAudioPatches(uid_t uid);
 
         virtual status_t acquireSoundTriggerSession(audio_session_t *session,
                                                audio_io_handle_t *ioHandle,
@@ -225,6 +227,8 @@
                                           audio_io_handle_t *handle);
         virtual status_t stopAudioSource(audio_io_handle_t handle);
 
+        virtual void     releaseResourcesForUid(uid_t uid);
+
         // Audio policy configuration file parsing (audio_policy.conf)
         // TODO candidates to be moved to ConfigParsingUtils
                 void defaultAudioPolicyConfig(void);
@@ -233,45 +237,91 @@
         routing_strategy getStrategy(audio_stream_type_t stream) const;
 
 protected:
-        class SessionRoute : public RefBase
-        {
+        class SessionRoute : public RefBase {
         public:
-            friend class SessionRouteMap;
+            // For Input (Source) routes, use STREAM_TYPE_NA ("NA" = "not applicable)for the
+            // streamType argument
+            static const audio_stream_type_t STREAM_TYPE_NA = AUDIO_STREAM_DEFAULT;
+
+            // For Output (Sink) routes, use SOURCE_TYPE_NA ("NA" = "not applicable") for the
+            // source argument
+
+            static const audio_source_t SOURCE_TYPE_NA = AUDIO_SOURCE_DEFAULT;
+
             SessionRoute(audio_session_t session,
                          audio_stream_type_t streamType,
-                         sp<DeviceDescriptor> deviceDescriptor)
-                : mSession(session),
-                  mStreamType(streamType),
-                  mDeviceDescriptor(deviceDescriptor),
-                  mRefCount(0),
-                  mActivityCount(0),
-                  mChanged(false) {}
+                         audio_source_t source,
+                         sp<DeviceDescriptor> deviceDescriptor,
+                         uid_t uid)
+               : mUid(uid),
+                 mSession(session),
+                 mDeviceDescriptor(deviceDescriptor),
+                 mRefCount(0),
+                 mActivityCount(0),
+                 mChanged(false),
+                 mStreamType(streamType),
+                 mSource(source)
+                  {}
 
             void log(const char* prefix);
 
-            audio_session_t         mSession;
-            audio_stream_type_t     mStreamType;
+            bool isActive() {
+                return (mDeviceDescriptor != 0) && (mChanged || (mActivityCount > 0));
+            }
 
-            sp<DeviceDescriptor>    mDeviceDescriptor;
+            uid_t                       mUid;
+            audio_session_t             mSession;
+            sp<DeviceDescriptor>        mDeviceDescriptor;
 
             // "reference" counting
-            int                     mRefCount;      // +/- on references
-            int                     mActivityCount; // +/- on start/stop
-            bool                    mChanged;
+            int                         mRefCount;      // +/- on references
+            int                         mActivityCount; // +/- on start/stop
+            bool                        mChanged;
+            // for outputs
+            const audio_stream_type_t   mStreamType;
+            // for inputs
+            const audio_source_t        mSource;
         };
 
-        class SessionRouteMap: public KeyedVector<audio_session_t, sp<SessionRoute>>
-        {
-         public:
+        class SessionRouteMap: public KeyedVector<audio_session_t, sp<SessionRoute>> {
+        public:
+            // These constants identify the SessionRoutMap as holding EITHER input routes,
+            // or output routes.  An error will occur if an attempt is made to add a SessionRoute
+            // object with mStreamType == STREAM_TYPE_NA (i.e. an input SessionRoute) to a
+            // SessionRoutMap that is marked for output (i.e. mMapType == SESSION_ROUTE_MAP_OUTPUT)
+            // and similarly  for output SessionRoutes and Input SessionRouteMaps.
+            typedef enum {
+              MAPTYPE_INPUT = 0,
+              MAPTYPE_OUTPUT = 1
+            } session_route_map_type_t;
+
+            SessionRouteMap(session_route_map_type_t mapType) :
+                mMapType(mapType) {
+            }
+
             bool hasRoute(audio_session_t session);
-            void addRoute(audio_session_t session, audio_stream_type_t streamType,
-                          sp<DeviceDescriptor> deviceDescriptor);
+
             void removeRoute(audio_session_t session);
 
             int incRouteActivity(audio_session_t session);
             int decRouteActivity(audio_session_t session);
             bool hasRouteChanged(audio_session_t session); // also clears the changed flag
             void log(const char* caption);
+
+            // Specify an Output(Sink) route by passing SessionRoute::SOURCE_TYPE_NA in the
+            // source argument.
+            // Specify an Input(Source) rout by passing SessionRoute::AUDIO_STREAM_DEFAULT
+            // in the streamType argument.
+            void addRoute(audio_session_t session,
+                          audio_stream_type_t streamType,
+                          audio_source_t source,
+                          sp<DeviceDescriptor> deviceDescriptor,
+                          uid_t uid);
+
+        private:
+            // Used to mark a SessionRoute as for either inputs (mMapType == kSessionRouteMap_Input)
+            // or outputs (mMapType == kSessionRouteMap_Output)
+            const session_route_map_type_t mMapType;
         };
 
         // From AudioPolicyManagerObserver
@@ -519,7 +569,12 @@
                              audio_devices_t device,
                              uint32_t *delayMs);
         status_t stopSource(sp<AudioOutputDescriptor> outputDesc,
-                            audio_stream_type_t stream);
+                            audio_stream_type_t stream,
+                            bool forceDeviceUpdate);
+
+        void clearAudioPatches(uid_t uid);
+        void clearSessionRoutes(uid_t uid);
+        void checkStrategyRoute(routing_strategy strategy, audio_io_handle_t ouptutToSkip);
 
         uid_t mUidCached;
         AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
@@ -535,8 +590,8 @@
         DeviceVector  mAvailableOutputDevices; // all available output devices
         DeviceVector  mAvailableInputDevices;  // all available input devices
 
-        SessionRouteMap mOutputRoutes;
-        SessionRouteMap mInputRoutes;
+        SessionRouteMap mOutputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_OUTPUT);
+        SessionRouteMap mInputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_INPUT);
 
         StreamDescriptorCollection mStreams; // stream descriptors for volume control
         bool    mLimitRingtoneVolume;        // limit ringtone volume to music volume if headset connected
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index e6ace20..282ddeb 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -109,8 +109,8 @@
         Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
         for (size_t i = 0; i < effects.size(); i++) {
             EffectDesc *effect = effects[i];
-            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0,
-                                                 audioSession, input);
+            sp<AudioEffect> fx = new AudioEffect(NULL, String16("android"), &effect->mUuid, -1, 0,
+                                                 0, audioSession, input);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGW("addInputEffects(): failed to create Fx %s on source %d",
@@ -254,7 +254,7 @@
         Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
         for (size_t i = 0; i < effects.size(); i++) {
             EffectDesc *effect = effects[i];
-            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, 0, 0, 0,
+            sp<AudioEffect> fx = new AudioEffect(NULL, String16("android"), &effect->mUuid, 0, 0, 0,
                                                  audioSession, output);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index e764eda..65639c3 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -146,11 +146,12 @@
                                               audio_io_handle_t *output,
                                               audio_session_t session,
                                               audio_stream_type_t *stream,
+                                              uid_t uid,
                                               uint32_t samplingRate,
                                               audio_format_t format,
                                               audio_channel_mask_t channelMask,
                                               audio_output_flags_t flags,
-                                              int mSelectedDeviceId,
+                                              audio_port_handle_t selectedDeviceId,
                                               const audio_offload_info_t *offloadInfo)
 {
     if (mAudioPolicyManager == NULL) {
@@ -158,8 +159,17 @@
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
-    return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, samplingRate,
-                                    format, channelMask, flags, mSelectedDeviceId, offloadInfo);
+
+    // if the caller is us, trust the specified uid
+    if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
+        uid_t newclientUid = IPCThreadState::self()->getCallingUid();
+        if (uid != (uid_t)-1 && uid != newclientUid) {
+            ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
+        }
+        uid = newclientUid;
+    }
+    return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, samplingRate,
+                                    format, channelMask, flags, selectedDeviceId, offloadInfo);
 }
 
 status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -248,10 +258,12 @@
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *input,
                                              audio_session_t session,
+                                             uid_t uid,
                                              uint32_t samplingRate,
                                              audio_format_t format,
                                              audio_channel_mask_t channelMask,
-                                             audio_input_flags_t flags)
+                                             audio_input_flags_t flags,
+                                             audio_port_handle_t selectedDeviceId)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -268,12 +280,22 @@
     sp<AudioPolicyEffects>audioPolicyEffects;
     status_t status;
     AudioPolicyInterface::input_type_t inputType;
+    // if the caller is us, trust the specified uid
+    if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
+        uid_t newclientUid = IPCThreadState::self()->getCallingUid();
+        if (uid != (uid_t)-1 && uid != newclientUid) {
+            ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
+        }
+        uid = newclientUid;
+    }
+
     {
         Mutex::Autolock _l(mLock);
         // the audio_in_acoustics_t parameter is ignored by get_input()
-        status = mAudioPolicyManager->getInputForAttr(attr, input, session,
+        status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
                                                      samplingRate, format, channelMask,
-                                                     flags, &inputType);
+                                                     flags, selectedDeviceId,
+                                                     &inputType);
         audioPolicyEffects = mAudioPolicyEffects;
 
         if (status == NO_ERROR) {
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index f783437..13af3ef 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -234,10 +234,12 @@
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *input,
                                              audio_session_t session,
+                                             uid_t uid __unused,
                                              uint32_t samplingRate,
                                              audio_format_t format,
                                              audio_channel_mask_t channelMask,
-                                             audio_input_flags_t flags __unused)
+                                             audio_input_flags_t flags __unused,
+                                             audio_port_handle_t selectedDeviceId __unused)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
@@ -564,11 +566,12 @@
                                               audio_io_handle_t *output,
                                               audio_session_t session __unused,
                                               audio_stream_type_t *stream,
+                                              uid_t uid __unused,
                                               uint32_t samplingRate,
                                               audio_format_t format,
                                               audio_channel_mask_t channelMask,
                                               audio_output_flags_t flags,
-                                              int selectedDeviceId __unused,
+                                              audio_port_handle_t selectedDeviceId __unused,
                                               const audio_offload_info_t *offloadInfo)
 {
     if (attr != NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index ccf9f9b..c5f4fb7 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -177,7 +177,7 @@
     {
         Mutex::Autolock _l(mLock);
         if (mAudioPolicyManager) {
-            mAudioPolicyManager->clearAudioPatches(uid);
+            mAudioPolicyManager->releaseResourcesForUid(uid);
         }
     }
 #endif
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 4e25d33..eb50cdd 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -80,11 +80,12 @@
                                       audio_io_handle_t *output,
                                       audio_session_t session,
                                       audio_stream_type_t *stream,
+                                      uid_t uid,
                                       uint32_t samplingRate = 0,
                                       audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                       audio_channel_mask_t channelMask = 0,
                                       audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                      int selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                      audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                                       const audio_offload_info_t *offloadInfo = NULL);
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
@@ -98,10 +99,12 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
-                                     audio_input_flags_t flags);
+                                     audio_input_flags_t flags,
+                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
     virtual status_t startInput(audio_io_handle_t input,
                                 audio_session_t session);
     virtual status_t stopInput(audio_io_handle_t input,
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 8c5c43a..3f80faf 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -890,9 +890,12 @@
             if (current != nullptr) {
                 auto clientSp = current->getValue();
                 if (clientSp.get() != nullptr) { // should never be needed
-                    if (clientSp->getRemote() == remoteCallback) {
+                    if (!clientSp->canCastToApiClient(effectiveApiLevel)) {
+                        ALOGW("CameraService connect called from same client, but with a different"
+                                " API level, evicting prior client...");
+                    } else if (clientSp->getRemote() == remoteCallback) {
                         ALOGI("CameraService::connect X (PID %d) (second call from same"
-                                "app binder, returning the same client)", clientPid);
+                                " app binder, returning the same client)", clientPid);
                         *client = clientSp;
                         return NO_ERROR;
                     }
@@ -1054,19 +1057,24 @@
 status_t CameraService::connect(
         const sp<ICameraClient>& cameraClient,
         int cameraId,
-        const String16& clientPackageName,
+        const String16& opPackageName,
         int clientUid,
         /*out*/
         sp<ICamera>& device) {
 
+    const status_t result = checkCameraAccess(opPackageName);
+    if (result != NO_ERROR) {
+        return result;
+    }
+
     status_t ret = NO_ERROR;
     String8 id = String8::format("%d", cameraId);
     sp<Client> client = nullptr;
     ret = connectHelper<ICameraClient,Client>(cameraClient, id, CAMERA_HAL_API_VERSION_UNSPECIFIED,
-            clientPackageName, clientUid, API_1, false, false, /*out*/client);
+            opPackageName, clientUid, API_1, false, false, /*out*/client);
 
     if(ret != NO_ERROR) {
-        logRejected(id, getCallingPid(), String8(clientPackageName),
+        logRejected(id, getCallingPid(), String8(opPackageName),
                 String8::format("%s (%d)", strerror(-ret), ret));
         return ret;
     }
@@ -1078,11 +1086,16 @@
 status_t CameraService::connectLegacy(
         const sp<ICameraClient>& cameraClient,
         int cameraId, int halVersion,
-        const String16& clientPackageName,
+        const String16& opPackageName,
         int clientUid,
         /*out*/
         sp<ICamera>& device) {
 
+    const status_t result = checkCameraAccess(opPackageName);
+    if (result != NO_ERROR) {
+        return result;
+    }
+
     String8 id = String8::format("%d", cameraId);
     int apiVersion = mModule->getModuleApiVersion();
     if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
@@ -1095,18 +1108,18 @@
          */
         ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!",
                 __FUNCTION__, apiVersion);
-        logRejected(id, getCallingPid(), String8(clientPackageName),
+        logRejected(id, getCallingPid(), String8(opPackageName),
                 String8("HAL module version doesn't support legacy HAL connections"));
         return INVALID_OPERATION;
     }
 
     status_t ret = NO_ERROR;
     sp<Client> client = nullptr;
-    ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion, clientPackageName,
+    ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion, opPackageName,
             clientUid, API_1, true, false, /*out*/client);
 
     if(ret != NO_ERROR) {
-        logRejected(id, getCallingPid(), String8(clientPackageName),
+        logRejected(id, getCallingPid(), String8(opPackageName),
                 String8::format("%s (%d)", strerror(-ret), ret));
         return ret;
     }
@@ -1118,20 +1131,25 @@
 status_t CameraService::connectDevice(
         const sp<ICameraDeviceCallbacks>& cameraCb,
         int cameraId,
-        const String16& clientPackageName,
+        const String16& opPackageName,
         int clientUid,
         /*out*/
         sp<ICameraDeviceUser>& device) {
 
+    const status_t result = checkCameraAccess(opPackageName);
+    if (result != NO_ERROR) {
+        return result;
+    }
+
     status_t ret = NO_ERROR;
     String8 id = String8::format("%d", cameraId);
     sp<CameraDeviceClient> client = nullptr;
     ret = connectHelper<ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
-            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, API_2, false, false,
+            CAMERA_HAL_API_VERSION_UNSPECIFIED, opPackageName, clientUid, API_2, false, false,
             /*out*/client);
 
     if(ret != NO_ERROR) {
-        logRejected(id, getCallingPid(), String8(clientPackageName),
+        logRejected(id, getCallingPid(), String8(opPackageName),
                 String8::format("%s (%d)", strerror(-ret), ret));
         return ret;
     }
@@ -1526,24 +1544,24 @@
 }
 
 void CameraService::logDisconnected(const char* cameraId, int clientPid,
-        const char* clientPackage) {
+        const char* opPackageName) {
     // Log the clients evicted
     logEvent(String8::format("DISCONNECT device %s client for package %s (PID %d)", cameraId,
-            clientPackage, clientPid));
+            opPackageName, clientPid));
 }
 
 void CameraService::logConnected(const char* cameraId, int clientPid,
-        const char* clientPackage) {
+        const char* opPackageName) {
     // Log the clients evicted
     logEvent(String8::format("CONNECT device %s client for package %s (PID %d)", cameraId,
-            clientPackage, clientPid));
+            opPackageName, clientPid));
 }
 
 void CameraService::logRejected(const char* cameraId, int clientPid,
-        const char* clientPackage, const char* reason) {
+        const char* opPackageName, const char* reason) {
     // Log the client rejected
     logEvent(String8::format("REJECT device %s client for package %s (PID %d), reason: (%s)",
-            cameraId, clientPackage, clientPid, reason));
+            cameraId, opPackageName, clientPid, reason));
 }
 
 void CameraService::logUserSwitch(int oldUserId, int newUserId) {
@@ -1580,21 +1598,6 @@
 
     // Permission checks
     switch (code) {
-        case BnCameraService::CONNECT:
-        case BnCameraService::CONNECT_DEVICE:
-        case BnCameraService::CONNECT_LEGACY: {
-            if (pid != selfPid) {
-                // we're called from a different process, do the real check
-                if (!checkCallingPermission(
-                        String16("android.permission.CAMERA"))) {
-                    const int uid = getCallingUid();
-                    ALOGE("Permission Denial: "
-                         "can't use the camera pid=%d, uid=%d", pid, uid);
-                    return PERMISSION_DENIED;
-                }
-            }
-            break;
-        }
         case BnCameraService::NOTIFY_SYSTEM_EVENT: {
             if (pid != selfPid) {
                 // Ensure we're being called by system_server, or similar process with
@@ -1614,6 +1617,38 @@
     return BnCameraService::onTransact(code, data, reply, flags);
 }
 
+status_t CameraService::checkCameraAccess(const String16& opPackageName) {
+    const int pid = getCallingPid();
+
+    if (pid == getpid()) {
+        return NO_ERROR;
+    }
+
+    const int uid = getCallingUid();
+
+    if (!checkCallingPermission(String16("android.permission.CAMERA"))) {
+        ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", pid, uid);
+        return PERMISSION_DENIED;
+    }
+
+    AppOpsManager appOps;
+    const int32_t result = appOps.noteOp(AppOpsManager::OP_CAMERA, uid, opPackageName);
+
+    switch (result) {
+        case AppOpsManager::MODE_ERRORED: {
+            ALOGE("App op OP_CAMERA errored: can't use the camera pid=%d, uid=%d", pid, uid);
+            return PERMISSION_DENIED;
+        } break;
+
+        case AppOpsManager::MODE_IGNORED: {
+             ALOGE("App op OP_CAMERA ignored: can't use the camera pid=%d, uid=%d", pid, uid);
+             return INVALID_OPERATION;
+        } break;
+    }
+
+    return NO_ERROR;
+}
+
 // We share the media players for shutter and recording sound for all clients.
 // A reference count is kept to determine when we will actually release the
 // media players.
@@ -1666,13 +1701,13 @@
 
 CameraService::Client::Client(const sp<CameraService>& cameraService,
         const sp<ICameraClient>& cameraClient,
-        const String16& clientPackageName,
+        const String16& opPackageName,
         int cameraId, int cameraFacing,
         int clientPid, uid_t clientUid,
         int servicePid) :
         CameraService::BasicClient(cameraService,
                 IInterface::asBinder(cameraClient),
-                clientPackageName,
+                opPackageName,
                 cameraId, cameraFacing,
                 clientPid, clientUid,
                 servicePid)
@@ -1699,11 +1734,11 @@
 
 CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
         const sp<IBinder>& remoteCallback,
-        const String16& clientPackageName,
+        const String16& opPackageName,
         int cameraId, int cameraFacing,
         int clientPid, uid_t clientUid,
         int servicePid):
-        mClientPackageName(clientPackageName), mDisconnected(false)
+        mOpPackageName(opPackageName), mDisconnected(false)
 {
     mCameraService = cameraService;
     mRemoteBinder = remoteCallback;
@@ -1731,7 +1766,7 @@
 
     mCameraService->removeByClient(this);
     mCameraService->logDisconnected(String8::format("%d", mCameraId), mClientPid,
-            String8(mClientPackageName));
+            String8(mOpPackageName));
 
     sp<IBinder> remote = getRemote();
     if (remote != nullptr) {
@@ -1746,7 +1781,7 @@
 }
 
 String16 CameraService::BasicClient::getPackageName() const {
-    return mClientPackageName;
+    return mOpPackageName;
 }
 
 
@@ -1754,6 +1789,11 @@
     return mClientPid;
 }
 
+bool CameraService::BasicClient::canCastToApiClient(apiLevel level) const {
+    // Defaults to API2.
+    return level == API_2;
+}
+
 status_t CameraService::BasicClient::startCameraOps() {
     int32_t res;
     // Notify app ops that the camera is not available
@@ -1761,17 +1801,17 @@
 
     {
         ALOGV("%s: Start camera ops, package name = %s, client UID = %d",
-              __FUNCTION__, String8(mClientPackageName).string(), mClientUid);
+              __FUNCTION__, String8(mOpPackageName).string(), mClientUid);
     }
 
     mAppOpsManager.startWatchingMode(AppOpsManager::OP_CAMERA,
-            mClientPackageName, mOpsCallback);
+            mOpPackageName, mOpsCallback);
     res = mAppOpsManager.startOp(AppOpsManager::OP_CAMERA,
-            mClientUid, mClientPackageName);
+            mClientUid, mOpPackageName);
 
     if (res != AppOpsManager::MODE_ALLOWED) {
         ALOGI("Camera %d: Access for \"%s\" has been revoked",
-                mCameraId, String8(mClientPackageName).string());
+                mCameraId, String8(mOpPackageName).string());
         return PERMISSION_DENIED;
     }
 
@@ -1789,7 +1829,7 @@
     if (mOpsActive) {
         // Notify app ops that the camera is available again
         mAppOpsManager.finishOp(AppOpsManager::OP_CAMERA, mClientUid,
-                mClientPackageName);
+                mOpPackageName);
         mOpsActive = false;
 
         auto rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
@@ -1814,7 +1854,7 @@
 
 void CameraService::BasicClient::opChanged(int32_t op, const String16& packageName) {
     String8 name(packageName);
-    String8 myName(mClientPackageName);
+    String8 myName(mOpPackageName);
 
     if (op != AppOpsManager::OP_CAMERA) {
         ALOGW("Unexpected app ops notification received: %d", op);
@@ -1823,7 +1863,7 @@
 
     int32_t res;
     res = mAppOpsManager.checkOp(AppOpsManager::OP_CAMERA,
-            mClientUid, mClientPackageName);
+            mClientUid, mOpPackageName);
     ALOGV("checkOp returns: %d, %s ", res,
             res == AppOpsManager::MODE_ALLOWED ? "ALLOWED" :
             res == AppOpsManager::MODE_IGNORED ? "IGNORED" :
@@ -1866,6 +1906,10 @@
     BasicClient::disconnect();
 }
 
+bool CameraService::Client::canCastToApiClient(apiLevel level) const {
+    return level == API_1;
+}
+
 CameraService::Client::OpsCallback::OpsCallback(wp<BasicClient> client):
         mClient(client) {
 }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 84e61c5..502fcfa 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -65,6 +65,7 @@
     class Client;
     class BasicClient;
 
+    // The effective API level.  The Camera2 API running in LEGACY mode counts as API_1.
     enum apiLevel {
         API_1 = 1,
         API_2 = 2
@@ -125,19 +126,19 @@
     virtual status_t    getCameraVendorTagDescriptor(/*out*/ sp<VendorTagDescriptor>& desc);
 
     virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
-            const String16& clientPackageName, int clientUid,
+            const String16& opPackageName, int clientUid,
             /*out*/
             sp<ICamera>& device);
 
     virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
-            int halVersion, const String16& clientPackageName, int clientUid,
+            int halVersion, const String16& opPackageName, int clientUid,
             /*out*/
             sp<ICamera>& device);
 
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
             int cameraId,
-            const String16& clientPackageName,
+            const String16& opPackageName,
             int clientUid,
             /*out*/
             sp<ICameraDeviceUser>& device);
@@ -215,10 +216,14 @@
 
         // Get the PID of the application client using this
         virtual int getClientPid() const;
+
+        // Check what API level is used for this client. This is used to determine which
+        // superclass this can be cast to.
+        virtual bool canCastToApiClient(apiLevel level) const;
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
-                const String16& clientPackageName,
+                const String16& opPackageName,
                 int cameraId,
                 int cameraFacing,
                 int clientPid,
@@ -237,7 +242,7 @@
         sp<CameraService>               mCameraService;  // immutable after constructor
         int                             mCameraId;       // immutable after constructor
         int                             mCameraFacing;   // immutable after constructor
-        const String16                  mClientPackageName;
+        const String16                  mOpPackageName;
         pid_t                           mClientPid;
         uid_t                           mClientUid;      // immutable after constructor
         pid_t                           mServicePid;     // immutable after constructor
@@ -304,7 +309,7 @@
         // Interface used by CameraService
         Client(const sp<CameraService>& cameraService,
                 const sp<ICameraClient>& cameraClient,
-                const String16& clientPackageName,
+                const String16& opPackageName,
                 int cameraId,
                 int cameraFacing,
                 int clientPid,
@@ -323,6 +328,10 @@
 
         virtual void         notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
                                          const CaptureResultExtras& resultExtras);
+
+        // Check what API level is used for this client. This is used to determine which
+        // superclass this can be cast to.
+        virtual bool canCastToApiClient(apiLevel level) const;
     protected:
         // Convert client from cookie.
         static sp<CameraService::Client> getClientFromCookie(void* user);
@@ -471,7 +480,7 @@
     // Single implementation shared between the various connect calls
     template<class CALLBACK, class CLIENT>
     status_t connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId, int halVersion,
-            const String16& clientPackageName, int clientUid, apiLevel effectiveApiLevel,
+            const String16& opPackageName, int clientUid, apiLevel effectiveApiLevel,
             bool legacyMode, bool shimUpdateOnly, /*out*/sp<CLIENT>& device);
 
 
@@ -704,6 +713,8 @@
             int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
             int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
             /*out*/sp<BasicClient>* client);
+
+    status_t checkCameraAccess(const String16& opPackageName);
 };
 
 template<class Func>
@@ -752,11 +763,11 @@
 
 template<class CALLBACK, class CLIENT>
 status_t CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
-        int halVersion, const String16& clientPackageName, int clientUid,
+        int halVersion, const String16& opPackageName, int clientUid,
         apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
         /*out*/sp<CLIENT>& device) {
     status_t ret = NO_ERROR;
-    String8 clientName8(clientPackageName);
+    String8 clientName8(opPackageName);
     int clientPid = getCallingPid();
 
     ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
@@ -827,7 +838,7 @@
         int facing = -1;
         int deviceVersion = getDeviceVersion(id, /*out*/&facing);
         sp<BasicClient> tmp = nullptr;
-        if((ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
+        if((ret = makeClient(this, cameraCb, opPackageName, cameraId, facing, clientPid,
                 clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
                 /*out*/&tmp)) != NO_ERROR) {
             return ret;
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 5c8f750..88c5811 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -395,7 +395,7 @@
 
         heapIdx = mCallbackHeapHead;
 
-        mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount;
+        mCallbackHeapHead = (mCallbackHeapHead + 1) % kCallbackHeapCount;
         mCallbackHeapFree--;
 
         // TODO: Get rid of this copy by passing the gralloc queue all the way
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 6b0f8b5..c3a6842 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -2100,12 +2100,7 @@
 
     delete[] reqMeteringAreas;
 
-    /* don't include jpeg thumbnail size - it's valid for
-       it to be set to (0,0), meaning 'no thumbnail' */
-    CropRegion crop = calculateCropRegion( (CropRegion::Outputs)(
-            CropRegion::OUTPUT_PREVIEW     |
-            CropRegion::OUTPUT_VIDEO       |
-            CropRegion::OUTPUT_PICTURE    ));
+    CropRegion crop = calculateCropRegion(/*previewOnly*/ false);
     int32_t reqCropRegion[4] = {
         static_cast<int32_t>(crop.left),
         static_cast<int32_t>(crop.top),
@@ -2603,7 +2598,7 @@
     ALOG_ASSERT(x >= 0, "Crop-relative X coordinate = '%d' is out of bounds"
                          "(lower = 0)", x);
 
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     ALOG_ASSERT(x < previewCrop.width, "Crop-relative X coordinate = '%d' "
                     "is out of bounds (upper = %f)", x, previewCrop.width);
 
@@ -2619,7 +2614,7 @@
     ALOG_ASSERT(y >= 0, "Crop-relative Y coordinate = '%d' is out of bounds "
         "(lower = 0)", y);
 
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     ALOG_ASSERT(y < previewCrop.height, "Crop-relative Y coordinate = '%d' is "
                 "out of bounds (upper = %f)", y, previewCrop.height);
 
@@ -2634,12 +2629,12 @@
 }
 
 int Parameters::normalizedXToCrop(int x) const {
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     return (x + 1000) * (previewCrop.width - 1) / 2000;
 }
 
 int Parameters::normalizedYToCrop(int y) const {
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     return (y + 1000) * (previewCrop.height - 1) / 2000;
 }
 
@@ -2855,8 +2850,7 @@
     return jpegSizes;
 }
 
-Parameters::CropRegion Parameters::calculateCropRegion(
-                            Parameters::CropRegion::Outputs outputs) const {
+Parameters::CropRegion Parameters::calculateCropRegion(bool previewOnly) const {
 
     float zoomLeft, zoomTop, zoomWidth, zoomHeight;
 
@@ -2880,90 +2874,45 @@
           maxDigitalZoom.data.f[0], zoomIncrement, zoomRatio, previewWidth,
           previewHeight, fastInfo.arrayWidth, fastInfo.arrayHeight);
 
-    /*
-     * Assumption: On the HAL side each stream buffer calculates its crop
-     * rectangle as follows:
-     *   cropRect = (zoomLeft, zoomRight,
-     *               zoomWidth, zoomHeight * zoomWidth / outputWidth);
-     *
-     * Note that if zoomWidth > bufferWidth, the new cropHeight > zoomHeight
-     *      (we can then get into trouble if the cropHeight > arrayHeight).
-     * By selecting the zoomRatio based on the smallest outputRatio, we
-     * guarantee this will never happen.
-     */
+    if (previewOnly) {
+        // Calculate a tight crop region for the preview stream only
+        float previewRatio = static_cast<float>(previewWidth) / previewHeight;
 
-    // Enumerate all possible output sizes, select the one with the smallest
-    // aspect ratio
-    float minOutputWidth, minOutputHeight, minOutputRatio;
-    {
-        float outputSizes[][2] = {
-            { static_cast<float>(previewWidth),
-              static_cast<float>(previewHeight) },
-            { static_cast<float>(videoWidth),
-              static_cast<float>(videoHeight) },
-            { static_cast<float>(jpegThumbSize[0]),
-              static_cast<float>(jpegThumbSize[1]) },
-            { static_cast<float>(pictureWidth),
-              static_cast<float>(pictureHeight) },
-        };
+        /* Ensure that the width/height never go out of bounds
+         * by scaling across a diffent dimension if an out-of-bounds
+         * possibility exists.
+         *
+         * e.g. if the previewratio < arrayratio and e.g. zoomratio = 1.0, then by
+         * calculating the zoomWidth from zoomHeight we'll actually get a
+         * zoomheight > arrayheight
+         */
+        float arrayRatio = 1.f * fastInfo.arrayWidth / fastInfo.arrayHeight;
+        if (previewRatio >= arrayRatio) {
+            // Adjust the height based on the width
+            zoomWidth =  fastInfo.arrayWidth / zoomRatio;
+            zoomHeight = zoomWidth *
+                    previewHeight / previewWidth;
 
-        minOutputWidth = outputSizes[0][0];
-        minOutputHeight = outputSizes[0][1];
-        minOutputRatio = minOutputWidth / minOutputHeight;
-        for (unsigned int i = 0;
-             i < sizeof(outputSizes) / sizeof(outputSizes[0]);
-             ++i) {
-
-            // skip over outputs we don't want to consider for the crop region
-            if ( !((1 << i) & outputs) ) {
-                continue;
-            }
-
-            float outputWidth = outputSizes[i][0];
-            float outputHeight = outputSizes[i][1];
-            float outputRatio = outputWidth / outputHeight;
-
-            if (minOutputRatio > outputRatio) {
-                minOutputRatio = outputRatio;
-                minOutputWidth = outputWidth;
-                minOutputHeight = outputHeight;
-            }
-
-            // and then use this output ratio instead of preview output ratio
-            ALOGV("Enumerating output ratio %f = %f / %f, min is %f",
-                  outputRatio, outputWidth, outputHeight, minOutputRatio);
+        } else {
+            // Adjust the width based on the height
+            zoomHeight = fastInfo.arrayHeight / zoomRatio;
+            zoomWidth = zoomHeight *
+                    previewWidth / previewHeight;
         }
-    }
-
-    /* Ensure that the width/height never go out of bounds
-     * by scaling across a diffent dimension if an out-of-bounds
-     * possibility exists.
-     *
-     * e.g. if the previewratio < arrayratio and e.g. zoomratio = 1.0, then by
-     * calculating the zoomWidth from zoomHeight we'll actually get a
-     * zoomheight > arrayheight
-     */
-    float arrayRatio = 1.f * fastInfo.arrayWidth / fastInfo.arrayHeight;
-    if (minOutputRatio >= arrayRatio) {
-        // Adjust the height based on the width
-        zoomWidth =  fastInfo.arrayWidth / zoomRatio;
-        zoomHeight = zoomWidth *
-                minOutputHeight / minOutputWidth;
-
     } else {
-        // Adjust the width based on the height
+        // Calculate the global crop region with a shape matching the active
+        // array.
+        zoomWidth = fastInfo.arrayWidth / zoomRatio;
         zoomHeight = fastInfo.arrayHeight / zoomRatio;
-        zoomWidth = zoomHeight *
-                minOutputWidth / minOutputHeight;
     }
-    // centering the zoom area within the active area
+
+    // center the zoom area within the active area
     zoomLeft = (fastInfo.arrayWidth - zoomWidth) / 2;
     zoomTop = (fastInfo.arrayHeight - zoomHeight) / 2;
 
     ALOGV("Crop region calculated (x=%d,y=%d,w=%f,h=%f) for zoom=%d",
         (int32_t)zoomLeft, (int32_t)zoomTop, zoomWidth, zoomHeight, this->zoom);
 
-
     CropRegion crop = { zoomLeft, zoomTop, zoomWidth, zoomHeight };
     return crop;
 }
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index e628a7e..46d48bc 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -271,21 +271,16 @@
     // if video snapshot size is currently overridden
     bool isJpegSizeOverridden();
 
-    // Calculate the crop region rectangle based on current stream sizes
+    // Calculate the crop region rectangle, either tightly about the preview
+    // resolution, or a region just based on the active array; both take
+    // into account the current zoom level.
     struct CropRegion {
         float left;
         float top;
         float width;
         float height;
-
-        enum Outputs {
-            OUTPUT_PREVIEW         = 0x01,
-            OUTPUT_VIDEO           = 0x02,
-            OUTPUT_JPEG_THUMBNAIL  = 0x04,
-            OUTPUT_PICTURE         = 0x08,
-        };
     };
-    CropRegion calculateCropRegion(CropRegion::Outputs outputs) const;
+    CropRegion calculateCropRegion(bool previewOnly) const;
 
     // Calculate the field of view of the high-resolution JPEG capture
     status_t calculatePictureFovs(float *horizFov, float *vertFov) const;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index bf1692d..9c4f9cd 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -128,7 +128,6 @@
     List<const CameraMetadata> metadataRequestList;
     int32_t requestId = mRequestIdCounter;
     uint32_t loopCounter = 0;
-    bool isReprocess = false;
 
     for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) {
         sp<CaptureRequest> request = *it;
@@ -136,18 +135,15 @@
             ALOGE("%s: Camera %d: Sent null request.",
                     __FUNCTION__, mCameraId);
             return BAD_VALUE;
-        } else if (it == requests.begin()) {
-            isReprocess = request->mIsReprocess;
-            if (isReprocess && !mInputStream.configured) {
-                ALOGE("%s: Camera %d: no input stream is configured.");
+        } else if (request->mIsReprocess) {
+            if (!mInputStream.configured) {
+                ALOGE("%s: Camera %d: no input stream is configured.", __FUNCTION__, mCameraId);
                 return BAD_VALUE;
-            } else if (isReprocess && streaming) {
-                ALOGE("%s: Camera %d: streaming reprocess requests not supported.");
+            } else if (streaming) {
+                ALOGE("%s: Camera %d: streaming reprocess requests not supported.", __FUNCTION__,
+                        mCameraId);
                 return BAD_VALUE;
             }
-        } else if (isReprocess != request->mIsReprocess) {
-            ALOGE("%s: Camera %d: Sent regular and reprocess requests.");
-            return BAD_VALUE;
         }
 
         CameraMetadata metadata(request->mMetadata);
@@ -196,7 +192,7 @@
         metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
                         outputStreamIds.size());
 
-        if (isReprocess) {
+        if (request->mIsReprocess) {
             metadata.update(ANDROID_REQUEST_INPUT_STREAMS, &mInputStream.id, 1);
         }
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ba0b264..9b2e143 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -118,7 +118,7 @@
 
     ALOGI("Closed Camera %d. Client was: %s (PID %d, UID %u)",
             TClientBase::mCameraId,
-            String8(TClientBase::mClientPackageName).string(),
+            String8(TClientBase::mOpPackageName).string(),
             mInitialClientPid, TClientBase::mClientUid);
 }
 
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
index ac4d9a6..064ff71 100644
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -36,12 +36,47 @@
         chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/1);
         data = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE;
         chars.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, &data, /*count*/1);
-        controlModes.push(ANDROID_CONTROL_MODE_OFF);
         controlModes.push(ANDROID_CONTROL_MODE_AUTO);
         camera_metadata_entry entry = chars.find(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
         if (entry.count > 1 || entry.data.u8[0] != ANDROID_CONTROL_SCENE_MODE_DISABLED) {
             controlModes.push(ANDROID_CONTROL_MODE_USE_SCENE_MODE);
         }
+
+        // Only advertise CONTROL_OFF mode if 3A manual controls are supported.
+        bool isManualAeSupported = false;
+        bool isManualAfSupported = false;
+        bool isManualAwbSupported = false;
+        entry = chars.find(ANDROID_CONTROL_AE_AVAILABLE_MODES);
+        if (entry.count > 0) {
+            for (size_t i = 0; i < entry.count; i++) {
+                if (entry.data.u8[i] == ANDROID_CONTROL_AE_MODE_OFF) {
+                    isManualAeSupported = true;
+                    break;
+                }
+            }
+        }
+        entry = chars.find(ANDROID_CONTROL_AF_AVAILABLE_MODES);
+        if (entry.count > 0) {
+            for (size_t i = 0; i < entry.count; i++) {
+                if (entry.data.u8[i] == ANDROID_CONTROL_AF_MODE_OFF) {
+                    isManualAfSupported = true;
+                    break;
+                }
+            }
+        }
+        entry = chars.find(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
+        if (entry.count > 0) {
+            for (size_t i = 0; i < entry.count; i++) {
+                if (entry.data.u8[i] == ANDROID_CONTROL_AWB_MODE_OFF) {
+                    isManualAwbSupported = true;
+                    break;
+                }
+            }
+        }
+        if (isManualAeSupported && isManualAfSupported && isManualAwbSupported) {
+            controlModes.push(ANDROID_CONTROL_MODE_OFF);
+        }
+
         chars.update(ANDROID_CONTROL_AVAILABLE_MODES, controlModes);
     }
     return;
@@ -86,7 +121,7 @@
         if (ret != 0) {
             return ret;
         }
-        int deviceVersion = cameraInfo.device_version;
+        int deviceVersion = rawInfo.device_version;
         if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
             // static_camera_characteristics is invalid
             *info = rawInfo;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 84c5754..2504bfd 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -187,6 +187,8 @@
 
     assert(mBuffersInFlight.size() == 0);
 
+    mConsumer->abandon();
+
     /**
      *  no-op since we can't disconnect the producer from the consumer-side
      */
diff --git a/soundtrigger/ISoundTriggerHwService.cpp b/soundtrigger/ISoundTriggerHwService.cpp
index 75f68b8..e14a771 100644
--- a/soundtrigger/ISoundTriggerHwService.cpp
+++ b/soundtrigger/ISoundTriggerHwService.cpp
@@ -40,6 +40,8 @@
     SET_CAPTURE_STATE,
 };
 
+#define MAX_ITEMS_PER_LIST 1024
+
 class BpSoundTriggerHwService: public BpInterface<ISoundTriggerHwService>
 {
 public:
@@ -116,10 +118,18 @@
         case LIST_MODULES: {
             CHECK_INTERFACE(ISoundTriggerHwService, data, reply);
             unsigned int numModulesReq = data.readInt32();
+            if (numModulesReq > MAX_ITEMS_PER_LIST) {
+                numModulesReq = MAX_ITEMS_PER_LIST;
+            }
             unsigned int numModules = numModulesReq;
             struct sound_trigger_module_descriptor *modules =
                     (struct sound_trigger_module_descriptor *)calloc(numModulesReq,
                                                    sizeof(struct sound_trigger_module_descriptor));
+            if (modules == NULL) {
+                reply->writeInt32(NO_MEMORY);
+                reply->writeInt32(0);
+                return NO_ERROR;
+            }
             status_t status = listModules(modules, &numModules);
             reply->writeInt32(status);
             reply->writeInt32(numModules);