merge in jb-mr2-release history after reset to master
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index a59186a..d583e65 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -165,3 +165,26 @@
 
 include $(BUILD_EXECUTABLE)
 
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:=               \
+        muxer.cpp            \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright liblog libutils libbinder libstagefright_foundation \
+        libmedia libgui libcutils libui libc
+
+LOCAL_C_INCLUDES:= \
+	frameworks/av/media/libstagefright \
+	$(TOP)/frameworks/native/include/media/openmax
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_MODULE_TAGS := debug
+
+LOCAL_MODULE:= muxer
+
+include $(BUILD_EXECUTABLE)
+
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index 93de112..5d2d721 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -20,7 +20,7 @@
 
 #include "SimplePlayer.h"
 
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <media/AudioTrack.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -67,13 +67,13 @@
 status_t SimplePlayer::setSurface(const sp<IGraphicBufferProducer> &bufferProducer) {
     sp<AMessage> msg = new AMessage(kWhatSetSurface, id());
 
-    sp<SurfaceTextureClient> surfaceTextureClient;
+    sp<Surface> surface;
     if (bufferProducer != NULL) {
-        surfaceTextureClient = new SurfaceTextureClient(bufferProducer);
+        surface = new Surface(bufferProducer);
     }
 
     msg->setObject(
-            "native-window", new NativeWindowWrapper(surfaceTextureClient));
+            "native-window", new NativeWindowWrapper(surface));
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index 723a6e5..fdfefdf 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -36,6 +36,7 @@
 #include <media/stagefright/NuMediaExtractor.h>
 #include <gui/ISurfaceComposer.h>
 #include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
 #include <ui/DisplayInfo.h>
 
 static void usage(const char *me) {
@@ -413,7 +414,7 @@
         looper->registerHandler(player);
 
         player->setDataSource(argv[0]);
-        player->setSurface(surface->getSurfaceTexture());
+        player->setSurface(surface->getIGraphicBufferProducer());
         player->start();
         sleep(60);
         player->stop();
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
new file mode 100644
index 0000000..1b127c7
--- /dev/null
+++ b/cmds/stagefright/muxer.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "muxer"
+#include <utils/Log.h>
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaMuxer.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/NuMediaExtractor.h>
+
+static void usage(const char *me) {
+    fprintf(stderr, "usage: %s [-a] [-v] [-s <trim start time>]"
+                    " [-e <trim end time>] [-o <output file>]"
+                    " <input video file>\n", me);
+    fprintf(stderr, "       -h help\n");
+    fprintf(stderr, "       -a use audio\n");
+    fprintf(stderr, "       -v use video\n");
+    fprintf(stderr, "       -s Time in milli-seconds when the trim should start\n");
+    fprintf(stderr, "       -e Time in milli-seconds when the trim should end\n");
+    fprintf(stderr, "       -o output file name. Default is /sdcard/muxeroutput.mp4\n");
+
+    exit(1);
+}
+
+using namespace android;
+
+static int muxing(
+        const android::sp<android::ALooper> &looper,
+        const char *path,
+        bool useAudio,
+        bool useVideo,
+        const char *outputFileName,
+        bool enableTrim,
+        int trimStartTimeMs,
+        int trimEndTimeMs) {
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    if (extractor->setDataSource(path) != OK) {
+        fprintf(stderr, "unable to instantiate extractor. %s\n", path);
+        return 1;
+    }
+
+    if (outputFileName == NULL) {
+        outputFileName = "/sdcard/muxeroutput.mp4";
+    }
+
+    ALOGV("input file %s, output file %s", path, outputFileName);
+    ALOGV("useAudio %d, useVideo %d", useAudio, useVideo);
+
+    sp<MediaMuxer> muxer = new MediaMuxer(outputFileName);
+
+    size_t trackCount = extractor->countTracks();
+    // Map the extractor's track index to the muxer's track index.
+    KeyedVector<size_t, ssize_t> trackIndexMap;
+    size_t bufferSize = 1 * 1024 * 1024;  // default buffer size is 1MB.
+
+    bool haveAudio = false;
+    bool haveVideo = false;
+
+    int64_t trimStartTimeUs = trimStartTimeMs * 1000;
+    int64_t trimEndTimeUs = trimEndTimeMs * 1000;
+    bool trimStarted = false;
+    int64_t trimOffsetTimeUs = 0;
+
+    for (size_t i = 0; i < trackCount; ++i) {
+        sp<AMessage> format;
+        status_t err = extractor->getTrackFormat(i, &format);
+        CHECK_EQ(err, (status_t)OK);
+        ALOGV("extractor getTrackFormat: %s", format->debugString().c_str());
+
+        AString mime;
+        CHECK(format->findString("mime", &mime));
+
+        bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6);
+        bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
+
+        if (useAudio && !haveAudio && isAudio) {
+            haveAudio = true;
+        } else if (useVideo && !haveVideo && isVideo) {
+            haveVideo = true;
+        } else {
+            continue;
+        }
+
+        if (isVideo) {
+            int width , height;
+            CHECK(format->findInt32("width", &width));
+            CHECK(format->findInt32("height", &height));
+            bufferSize = width * height * 4;  // Assuming it is maximally 4BPP
+        }
+
+        int64_t duration;
+        CHECK(format->findInt64("durationUs", &duration));
+
+        // Since we got the duration now, correct the start time.
+        if (enableTrim) {
+            if (trimStartTimeUs > duration) {
+                fprintf(stderr, "Warning: trimStartTimeUs > duration,"
+                                " reset to 0\n");
+                trimStartTimeUs = 0;
+            }
+        }
+
+        ALOGV("selecting track %d", i);
+
+        err = extractor->selectTrack(i);
+        CHECK_EQ(err, (status_t)OK);
+
+        ssize_t newTrackIndex = muxer->addTrack(format);
+        CHECK_GE(newTrackIndex, 0);
+        trackIndexMap.add(i, newTrackIndex);
+    }
+
+    int64_t muxerStartTimeUs = ALooper::GetNowUs();
+
+    bool sawInputEOS = false;
+
+    size_t trackIndex = -1;
+    sp<ABuffer> newBuffer = new ABuffer(bufferSize);
+
+    muxer->start();
+
+    while (!sawInputEOS) {
+        status_t err = extractor->getSampleTrackIndex(&trackIndex);
+        if (err != OK) {
+            ALOGV("saw input eos, err %d", err);
+            sawInputEOS = true;
+            break;
+        } else {
+            err = extractor->readSampleData(newBuffer);
+            CHECK_EQ(err, (status_t)OK);
+
+            int64_t timeUs;
+            err = extractor->getSampleTime(&timeUs);
+            CHECK_EQ(err, (status_t)OK);
+
+            sp<MetaData> meta;
+            err = extractor->getSampleMeta(&meta);
+            CHECK_EQ(err, (status_t)OK);
+
+            uint32_t sampleFlags = 0;
+            int32_t val;
+            if (meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+                // We only support BUFFER_FLAG_SYNCFRAME in the flag for now.
+                sampleFlags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
+
+                // We turn on trimming at the sync frame.
+                if (enableTrim && timeUs > trimStartTimeUs &&
+                    timeUs <= trimEndTimeUs) {
+                    if (trimStarted == false) {
+                        trimOffsetTimeUs = timeUs;
+                    }
+                    trimStarted = true;
+                }
+            }
+            // Trim can end at any non-sync frame.
+            if (enableTrim && timeUs > trimEndTimeUs) {
+                trimStarted = false;
+            }
+
+            if (!enableTrim || (enableTrim && trimStarted)) {
+                err = muxer->writeSampleData(newBuffer,
+                                             trackIndexMap.valueFor(trackIndex),
+                                             timeUs - trimOffsetTimeUs, sampleFlags);
+            }
+
+            extractor->advance();
+        }
+    }
+
+    muxer->stop();
+    newBuffer.clear();
+    trackIndexMap.clear();
+
+    int64_t elapsedTimeUs = ALooper::GetNowUs() - muxerStartTimeUs;
+    fprintf(stderr, "SUCCESS: muxer generate the video in %lld ms\n",
+            elapsedTimeUs / 1000);
+
+    return 0;
+}
+
+int main(int argc, char **argv) {
+    const char *me = argv[0];
+
+    bool useAudio = false;
+    bool useVideo = false;
+    char *outputFileName = NULL;
+    int trimStartTimeMs = -1;
+    int trimEndTimeMs = -1;
+    // When trimStartTimeMs and trimEndTimeMs seems valid, we turn this switch
+    // to true.
+    bool enableTrim = false;
+
+    int res;
+    while ((res = getopt(argc, argv, "h?avo:s:e:")) >= 0) {
+        switch (res) {
+            case 'a':
+            {
+                useAudio = true;
+                break;
+            }
+
+            case 'v':
+            {
+                useVideo = true;
+                break;
+            }
+
+            case 'o':
+            {
+                outputFileName = optarg;
+                break;
+            }
+
+            case 's':
+            {
+                trimStartTimeMs = atoi(optarg);
+                break;
+            }
+
+            case 'e':
+            {
+                trimEndTimeMs = atoi(optarg);
+                break;
+            }
+
+            case '?':
+            case 'h':
+            default:
+            {
+                usage(me);
+            }
+        }
+    }
+
+    argc -= optind;
+    argv += optind;
+
+    if (argc != 1) {
+        usage(me);
+    }
+
+    if (trimStartTimeMs < 0 || trimEndTimeMs < 0) {
+        // If no input on either 's' or 'e', or they are obviously wrong input,
+        // then turn off trimming.
+        ALOGV("Trimming is disabled, copying the whole length video.");
+        enableTrim = false;
+    } else if (trimStartTimeMs > trimEndTimeMs) {
+        fprintf(stderr, "ERROR: start time is bigger\n");
+        return 1;
+    } else {
+        enableTrim = true;
+    }
+
+    if (!useAudio && !useVideo) {
+        fprintf(stderr, "ERROR: Missing both -a and -v, no track to mux then.\n");
+        return 1;
+    }
+    ProcessState::self()->startThreadPool();
+
+    // Make sure setDataSource() works.
+    DataSource::RegisterDefaultSniffers();
+
+    sp<ALooper> looper = new ALooper;
+    looper->start();
+
+    int result = muxing(looper, argv[0], useAudio, useVideo, outputFileName,
+                        enableTrim, trimStartTimeMs, trimEndTimeMs);
+
+    looper->stop();
+
+    return result;
+}
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 2aae64d..5bdbfbb 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -53,7 +53,7 @@
 
 #include <fcntl.h>
 
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
 
 using namespace android;
@@ -941,7 +941,7 @@
             CHECK(useSurfaceTexAlloc);
 
             sp<GLConsumer> texture = new GLConsumer(0 /* tex */);
-            gSurface = new SurfaceTextureClient(texture->getBufferQueue());
+            gSurface = new Surface(texture->getBufferQueue());
         }
 
         CHECK_EQ((status_t)OK,
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index af6afe0..d49ab4a 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -35,6 +35,7 @@
 #include <media/IMediaPlayerService.h>
 #include <gui/ISurfaceComposer.h>
 #include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
 
 #include <fcntl.h>
 #include <ui/DisplayInfo.h>
@@ -373,7 +374,7 @@
         service->create(client, 0);
 
     if (player != NULL && player->setDataSource(source) == NO_ERROR) {
-        player->setVideoSurfaceTexture(surface->getSurfaceTexture());
+        player->setVideoSurfaceTexture(surface->getISurfaceTexture());
         player->start();
 
         client->waitForEOS();
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 2882c41..da6b507 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -32,7 +32,7 @@
 class ICamera;
 class ICameraRecordingProxy;
 class IGraphicBufferProducer;
-class SurfaceTextureClient;
+class Surface;
 
 typedef void (*media_completion_f)(status_t status, void *cookie);
 
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index 107ba66..8fc417f 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -115,7 +115,7 @@
     virtual ~Writer() { }
 
     virtual void    log(const char *string);
-    virtual void    logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+    virtual void    logf(const char *fmt, ...);
     virtual void    logvf(const char *fmt, va_list ap);
     virtual void    logTimestamp();
     virtual void    logTimestamp(const struct timespec& ts);
@@ -149,7 +149,7 @@
     LockedWriter(size_t size, void *shared);
 
     virtual void    log(const char *string);
-    virtual void    logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+    virtual void    logf(const char *fmt, ...);
     virtual void    logvf(const char *fmt, va_list ap);
     virtual void    logTimestamp();
     virtual void    logTimestamp(const struct timespec& ts);
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 00d583e..b0c1b34 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -54,6 +54,8 @@
 
     // Convenience methods:
     bool getUInt16(off64_t offset, uint16_t *x);
+    bool getUInt32(off64_t offset, uint32_t *x);
+    bool getUInt64(off64_t offset, uint64_t *x);
 
     // May return ERROR_UNSUPPORTED.
     virtual status_t getSize(off64_t *size);
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 3f0d3b3..1002663 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -31,7 +31,7 @@
 struct AString;
 struct ICrypto;
 struct SoftwareRenderer;
-struct SurfaceTextureClient;
+struct Surface;
 
 struct MediaCodec : public AHandler {
     enum ConfigureFlags {
@@ -52,7 +52,7 @@
 
     status_t configure(
             const sp<AMessage> &format,
-            const sp<SurfaceTextureClient> &nativeWindow,
+            const sp<Surface> &nativeWindow,
             const sp<ICrypto> &crypto,
             uint32_t flags);
 
@@ -187,7 +187,7 @@
     AString mComponentName;
     uint32_t mReplyID;
     uint32_t mFlags;
-    sp<SurfaceTextureClient> mNativeWindow;
+    sp<Surface> mNativeWindow;
     SoftwareRenderer *mSoftRenderer;
     sp<AMessage> mOutputFormat;
 
@@ -229,7 +229,7 @@
     status_t queueCSDInputBuffer(size_t bufferIndex);
 
     status_t setNativeWindow(
-            const sp<SurfaceTextureClient> &surfaceTextureClient);
+            const sp<Surface> &surface);
 
     void postActivityNotificationIfPossible();
 
diff --git a/include/media/stagefright/NativeWindowWrapper.h b/include/media/stagefright/NativeWindowWrapper.h
index 97cc0ce..cfeec22 100644
--- a/include/media/stagefright/NativeWindowWrapper.h
+++ b/include/media/stagefright/NativeWindowWrapper.h
@@ -18,29 +18,29 @@
 
 #define NATIVE_WINDOW_WRAPPER_H_
 
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 
 namespace android {
 
-// SurfaceTextureClient derives from ANativeWindow which derives from multiple
+// Surface derives from ANativeWindow which derives from multiple
 // base classes, in order to carry it in AMessages, we'll temporarily wrap it
 // into a NativeWindowWrapper.
 
 struct NativeWindowWrapper : RefBase {
     NativeWindowWrapper(
-            const sp<SurfaceTextureClient> &surfaceTextureClient) :
+            const sp<Surface> &surfaceTextureClient) :
         mSurfaceTextureClient(surfaceTextureClient) { }
 
     sp<ANativeWindow> getNativeWindow() const {
         return mSurfaceTextureClient;
     }
 
-    sp<SurfaceTextureClient> getSurfaceTextureClient() const {
+    sp<Surface> getSurfaceTextureClient() const {
         return mSurfaceTextureClient;
     }
 
 private:
-    const sp<SurfaceTextureClient> mSurfaceTextureClient;
+    const sp<Surface> mSurfaceTextureClient;
 
     DISALLOW_EVIL_CONSTRUCTORS(NativeWindowWrapper);
 };
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 609d84f..5f21da9 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -35,7 +35,7 @@
 // ASSUMPTIONS
 // 1. SurfaceMediaSource is initialized with width*height which
 // can never change.  However, deqeueue buffer does not currently
-// enforce this as in BufferQueue, dequeue can be used by SurfaceTextureClient
+// enforce this as in BufferQueue, dequeue can be used by Surface
 // which can modify the default width and heght.  Also neither the width
 // nor height can be 0.
 // 2. setSynchronousMode is never used (basically no one should call
@@ -122,7 +122,7 @@
 protected:
 
     // Implementation of the BufferQueue::ConsumerListener interface.  These
-    // calls are used to notify the SurfaceTextureClient of asynchronous events in the
+    // calls are used to notify the Surface of asynchronous events in the
     // BufferQueue.
     virtual void onFrameAvailable();
 
@@ -157,7 +157,7 @@
     // mCurrentSlot is the buffer slot index of the buffer that is currently
     // being used by buffer consumer
     // (e.g. StageFrightRecorder in the case of SurfaceMediaSource or GLTexture
-    // in the case of SurfaceTextureClient).
+    // in the case of Surface).
     // It is initialized to INVALID_BUFFER_SLOT,
     // indicating that no buffer slot is currently bound to the texture. Note,
     // however, that a value of INVALID_BUFFER_SLOT does not necessarily mean
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.cpp b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
index 114f0f6..702900b 100755
--- a/libvideoeditor/lvpp/NativeWindowRenderer.cpp
+++ b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
@@ -21,7 +21,7 @@
 #include <GLES2/gl2ext.h>
 #include <cutils/log.h>
 #include <gui/GLConsumer.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -316,7 +316,7 @@
 
 void NativeWindowRenderer::render(RenderInput* input) {
     sp<GLConsumer> ST = input->mST;
-    sp<SurfaceTextureClient> STC = input->mSTC;
+    sp<Surface> STC = input->mSTC;
 
     if (input->mIsExternalBuffer) {
         queueExternalBuffer(STC.get(), input->mBuffer,
@@ -569,7 +569,7 @@
     : mRenderer(renderer)
     , mTextureId(textureId) {
     mST = new GLConsumer(mTextureId);
-    mSTC = new SurfaceTextureClient(mST->getBufferQueue());
+    mSTC = new Surface(mST->getBufferQueue());
     native_window_connect(mSTC.get(), NATIVE_WINDOW_API_MEDIA);
 }
 
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.h b/libvideoeditor/lvpp/NativeWindowRenderer.h
index b0623ba..26b4cba 100755
--- a/libvideoeditor/lvpp/NativeWindowRenderer.h
+++ b/libvideoeditor/lvpp/NativeWindowRenderer.h
@@ -37,16 +37,16 @@
 // we only expect that happens briefly when one clip is about to finish
 // and the next clip is about to start.
 //
-// We allocate a SurfaceTextureClient for each RenderInput and the user can use
+// We allocate a Surface for each RenderInput and the user can use
 // the getTargetWindow() function to get the corresponding ANativeWindow
-// for that SurfaceTextureClient. The intention is that the user can pass that
+// for that Surface. The intention is that the user can pass that
 // ANativeWindow to OMXCodec::Create() so the codec can decode directly
 // to buffers provided by the texture.
 
 namespace android {
 
 class GLConsumer;
-class SurfaceTextureClient;
+class Surface;
 class RenderInput;
 
 class NativeWindowRenderer {
@@ -110,7 +110,7 @@
     // destination aspect ratio.
     GLfloat mPositionCoordinates[8];
 
-    // We use a different GL id for each SurfaceTextureClient.
+    // We use a different GL id for each Surface.
     GLuint mNextTextureId;
 
     // Number of existing RenderInputs, just for debugging.
@@ -146,7 +146,7 @@
 
 class RenderInput {
 public:
-    // Returns the ANativeWindow corresponds to the SurfaceTextureClient.
+    // Returns the ANativeWindow corresponds to the Surface.
     ANativeWindow* getTargetWindow();
 
     // Updates video frame size from the MediaSource's metadata. Specifically
@@ -156,7 +156,7 @@
     // Renders the buffer with the given video effect and rending mode.
     // The video effets are defined in VideoEditorTools.h
     // Set isExternalBuffer to true only when the buffer given is not
-    // provided by the SurfaceTextureClient.
+    // provided by the Surface.
     void render(MediaBuffer *buffer, uint32_t videoEffect,
         M4xVSS_MediaRendering renderingMode, bool isExternalBuffer);
 private:
@@ -165,7 +165,7 @@
     NativeWindowRenderer* mRenderer;
     GLuint mTextureId;
     sp<GLConsumer> mST;
-    sp<SurfaceTextureClient> mSTC;
+    sp<Surface> mSTC;
     int mWidth, mHeight;
 
     // These are only valid during render() calls
diff --git a/libvideoeditor/lvpp/PreviewPlayer.cpp b/libvideoeditor/lvpp/PreviewPlayer.cpp
index 754c5a9..2bd9f84 100755
--- a/libvideoeditor/lvpp/PreviewPlayer.cpp
+++ b/libvideoeditor/lvpp/PreviewPlayer.cpp
@@ -32,7 +32,7 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <gui/Surface.h>
 #include <gui/IGraphicBufferProducer.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 
 #include "VideoEditorPreviewController.h"
 #include "DummyAudioSource.h"
@@ -1780,7 +1780,7 @@
 
     mSurface.clear();
     if (bufferProducer != NULL) {
-        setNativeWindow_l(new SurfaceTextureClient(bufferProducer));
+        setNativeWindow_l(new Surface(bufferProducer));
     }
 }
 
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 14602bf..3defec3 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -27,7 +27,7 @@
 #include <binder/IServiceManager.h>
 #include <binder/IPCThreadState.h>
 
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 
 #include <media/mediaplayer.h>
 #include <media/AudioSystem.h>
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index f932131..16f1317 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -38,7 +38,7 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryHeapBase.h>
 #include <binder/MemoryBase.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <utils/Errors.h>  // for status_t
 #include <utils/String8.h>
 #include <utils/SystemClock.h>
@@ -731,7 +731,7 @@
 
     sp<ANativeWindow> anw;
     if (bufferProducer != NULL) {
-        anw = new SurfaceTextureClient(bufferProducer);
+        anw = new Surface(bufferProducer);
         status_t err = native_window_api_connect(anw.get(),
                 NATIVE_WINDOW_API_MEDIA);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 30eb4b9..2ba6c22 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -224,7 +224,7 @@
         msg->setObject(
                 "native-window",
                 new NativeWindowWrapper(
-                    new SurfaceTextureClient(bufferProducer)));
+                    new Surface(bufferProducer)));
     }
 
     msg->post();
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 6934e59..acc3abf 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -19,7 +19,6 @@
         ESDS.cpp                          \
         FileSource.cpp                    \
         FLACExtractor.cpp                 \
-        FragmentedMP4Extractor.cpp        \
         HTTPBase.cpp                      \
         JPEGSource.cpp                    \
         MP3Extractor.cpp                  \
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 0f4d866..bd28118 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -49,7 +49,7 @@
 #include <media/stagefright/OMXCodec.h>
 
 #include <gui/IGraphicBufferProducer.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -1183,7 +1183,7 @@
 
     status_t err;
     if (bufferProducer != NULL) {
-        err = setNativeWindow_l(new SurfaceTextureClient(bufferProducer));
+        err = setNativeWindow_l(new Surface(bufferProducer));
     } else {
         err = setNativeWindow_l(NULL);
     }
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 9d0eea2..19b38ee 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -23,7 +23,6 @@
 #include "include/AACExtractor.h"
 #include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
-#include "include/FragmentedMP4Extractor.h"
 #include "include/HTTPBase.h"
 #include "include/MP3Extractor.h"
 #include "include/MPEG2PSExtractor.h"
@@ -59,6 +58,32 @@
     return true;
 }
 
+bool DataSource::getUInt32(off64_t offset, uint32_t *x) {
+    *x = 0;
+
+    uint32_t tmp;
+    if (readAt(offset, &tmp, 4) != 4) {
+        return false;
+    }
+
+    *x = ntohl(tmp);
+
+    return true;
+}
+
+bool DataSource::getUInt64(off64_t offset, uint64_t *x) {
+    *x = 0;
+
+    uint64_t tmp;
+    if (readAt(offset, &tmp, 8) != 8) {
+        return false;
+    }
+
+    *x = ntoh64(tmp);
+
+    return true;
+}
+
 status_t DataSource::getSize(off64_t *size) {
     *size = 0;
 
@@ -111,7 +136,6 @@
 // static
 void DataSource::RegisterDefaultSniffers() {
     RegisterSniffer(SniffMPEG4);
-    RegisterSniffer(SniffFragmentedMP4);
     RegisterSniffer(SniffMatroska);
     RegisterSniffer(SniffOgg);
     RegisterSniffer(SniffWAV);
diff --git a/media/libstagefright/FragmentedMP4Extractor.cpp b/media/libstagefright/FragmentedMP4Extractor.cpp
deleted file mode 100644
index 496828d..0000000
--- a/media/libstagefright/FragmentedMP4Extractor.cpp
+++ /dev/null
@@ -1,464 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "FragmentedMP4Extractor"
-#include <utils/Log.h>
-
-#include "include/FragmentedMP4Extractor.h"
-#include "include/SampleTable.h"
-#include "include/ESDS.h"
-
-#include <arpa/inet.h>
-
-#include <ctype.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <cutils/properties.h> // for property_get
-
-#include <media/stagefright/foundation/ABitReader.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class FragmentedMPEG4Source : public MediaSource {
-public:
-    // Caller retains ownership of the Parser
-    FragmentedMPEG4Source(bool audio,
-                const sp<MetaData> &format,
-                const sp<FragmentedMP4Parser> &parser,
-                const sp<FragmentedMP4Extractor> &extractor);
-
-    virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
-    virtual ~FragmentedMPEG4Source();
-
-private:
-    Mutex mLock;
-
-    sp<MetaData> mFormat;
-    sp<FragmentedMP4Parser> mParser;
-    sp<FragmentedMP4Extractor> mExtractor;
-    bool mIsAudioTrack;
-    uint32_t mCurrentSampleIndex;
-
-    bool mIsAVC;
-    size_t mNALLengthSize;
-
-    bool mStarted;
-
-    MediaBufferGroup *mGroup;
-
-    bool mWantsNALFragments;
-
-    uint8_t *mSrcBuffer;
-
-    FragmentedMPEG4Source(const FragmentedMPEG4Source &);
-    FragmentedMPEG4Source &operator=(const FragmentedMPEG4Source &);
-};
-
-
-FragmentedMP4Extractor::FragmentedMP4Extractor(const sp<DataSource> &source)
-    : mLooper(new ALooper),
-      mParser(new FragmentedMP4Parser()),
-      mDataSource(source),
-      mInitCheck(NO_INIT),
-      mFileMetaData(new MetaData) {
-    ALOGV("FragmentedMP4Extractor");
-    mLooper->registerHandler(mParser);
-    mLooper->start(false /* runOnCallingThread */);
-    mParser->start(mDataSource);
-
-    bool hasVideo = mParser->getFormat(false /* audio */, true /* synchronous */) != NULL;
-    bool hasAudio = mParser->getFormat(true /* audio */, true /* synchronous */) != NULL;
-
-    ALOGV("number of tracks: %d", countTracks());
-
-    if (hasVideo) {
-        mFileMetaData->setCString(
-                kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
-    } else if (hasAudio) {
-        mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
-    } else {
-        ALOGE("no audio and no video, no idea what file type this is");
-    }
-    // tracks are numbered such that video track is first, audio track is second
-    if (hasAudio && hasVideo) {
-        mTrackCount = 2;
-        mAudioTrackIndex = 1;
-    } else if (hasAudio) {
-        mTrackCount = 1;
-        mAudioTrackIndex = 0;
-    } else if (hasVideo) {
-        mTrackCount = 1;
-        mAudioTrackIndex = -1;
-    } else {
-        mTrackCount = 0;
-        mAudioTrackIndex = -1;
-    }
-}
-
-FragmentedMP4Extractor::~FragmentedMP4Extractor() {
-    ALOGV("~FragmentedMP4Extractor");
-    mLooper->stop();
-}
-
-uint32_t FragmentedMP4Extractor::flags() const {
-    return CAN_PAUSE |
-            (mParser->isSeekable() ? (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0);
-}
-
-sp<MetaData> FragmentedMP4Extractor::getMetaData() {
-    return mFileMetaData;
-}
-
-size_t FragmentedMP4Extractor::countTracks() {
-    return mTrackCount;
-}
-
-
-sp<MetaData> FragmentedMP4Extractor::getTrackMetaData(
-        size_t index, uint32_t flags) {
-    if (index >= countTracks()) {
-        return NULL;
-    }
-
-    sp<AMessage> msg = mParser->getFormat(index == mAudioTrackIndex, true /* synchronous */);
-
-    if (msg == NULL) {
-        ALOGV("got null format for track %d", index);
-        return NULL;
-    }
-
-    sp<MetaData> meta = new MetaData();
-    convertMessageToMetaData(msg, meta);
-    return meta;
-}
-
-static void MakeFourCCString(uint32_t x, char *s) {
-    s[0] = x >> 24;
-    s[1] = (x >> 16) & 0xff;
-    s[2] = (x >> 8) & 0xff;
-    s[3] = x & 0xff;
-    s[4] = '\0';
-}
-
-sp<MediaSource> FragmentedMP4Extractor::getTrack(size_t index) {
-    if (index >= countTracks()) {
-        return NULL;
-    }
-    return new FragmentedMPEG4Source(index == mAudioTrackIndex, getTrackMetaData(index, 0), mParser, this);
-}
-
-
-////////////////////////////////////////////////////////////////////////////////
-
-FragmentedMPEG4Source::FragmentedMPEG4Source(
-        bool audio,
-        const sp<MetaData> &format,
-        const sp<FragmentedMP4Parser> &parser,
-        const sp<FragmentedMP4Extractor> &extractor)
-    : mFormat(format),
-      mParser(parser),
-      mExtractor(extractor),
-      mIsAudioTrack(audio),
-      mStarted(false),
-      mGroup(NULL),
-      mWantsNALFragments(false),
-      mSrcBuffer(NULL) {
-}
-
-FragmentedMPEG4Source::~FragmentedMPEG4Source() {
-    if (mStarted) {
-        stop();
-    }
-}
-
-status_t FragmentedMPEG4Source::start(MetaData *params) {
-    Mutex::Autolock autoLock(mLock);
-
-    CHECK(!mStarted);
-
-    int32_t val;
-    if (params && params->findInt32(kKeyWantsNALFragments, &val)
-        && val != 0) {
-        mWantsNALFragments = true;
-    } else {
-        mWantsNALFragments = false;
-    }
-    ALOGV("caller wants NAL fragments: %s", mWantsNALFragments ? "yes" : "no");
-
-    mGroup = new MediaBufferGroup;
-
-    // for video, make the buffer big enough for an extremely poorly compressed 1080p frame.
-    int32_t max_size = mIsAudioTrack ? 65536 : 3110400;
-
-    mGroup->add_buffer(new MediaBuffer(max_size));
-
-    mSrcBuffer = new uint8_t[max_size];
-
-    mStarted = true;
-
-    return OK;
-}
-
-status_t FragmentedMPEG4Source::stop() {
-    Mutex::Autolock autoLock(mLock);
-
-    CHECK(mStarted);
-
-    delete[] mSrcBuffer;
-    mSrcBuffer = NULL;
-
-    delete mGroup;
-    mGroup = NULL;
-
-    mStarted = false;
-    mCurrentSampleIndex = 0;
-
-    return OK;
-}
-
-sp<MetaData> FragmentedMPEG4Source::getFormat() {
-    Mutex::Autolock autoLock(mLock);
-
-    return mFormat;
-}
-
-
-status_t FragmentedMPEG4Source::read(
-        MediaBuffer **out, const ReadOptions *options) {
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode mode;
-    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
-        mParser->seekTo(mIsAudioTrack, seekTimeUs);
-    }
-    MediaBuffer *buffer = NULL;
-    mGroup->acquire_buffer(&buffer);
-    sp<ABuffer> parseBuffer;
-
-    status_t ret = mParser->dequeueAccessUnit(mIsAudioTrack, &parseBuffer, true /* synchronous */);
-    if (ret != OK) {
-        buffer->release();
-        ALOGV("returning %d", ret);
-        return ret;
-    }
-    sp<AMessage> meta = parseBuffer->meta();
-    int64_t timeUs;
-    CHECK(meta->findInt64("timeUs", &timeUs));
-    int32_t isSync;
-    if (meta->findInt32("is-sync-frame", &isSync) && isSync != 0) {
-        buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
-    }
-    buffer->meta_data()->setInt64(kKeyTime, timeUs);
-    buffer->set_range(0, parseBuffer->size());
-    memcpy(buffer->data(), parseBuffer->data(), parseBuffer->size());
-    *out = buffer;
-    return OK;
-}
-
-
-static bool isCompatibleBrand(uint32_t fourcc) {
-    static const uint32_t kCompatibleBrands[] = {
-        FOURCC('i', 's', 'o', 'm'),
-        FOURCC('i', 's', 'o', '2'),
-        FOURCC('a', 'v', 'c', '1'),
-        FOURCC('3', 'g', 'p', '4'),
-        FOURCC('m', 'p', '4', '1'),
-        FOURCC('m', 'p', '4', '2'),
-
-        // Won't promise that the following file types can be played.
-        // Just give these file types a chance.
-        FOURCC('q', 't', ' ', ' '),  // Apple's QuickTime
-        FOURCC('M', 'S', 'N', 'V'),  // Sony's PSP
-
-        FOURCC('3', 'g', '2', 'a'),  // 3GPP2
-        FOURCC('3', 'g', '2', 'b'),
-    };
-
-    for (size_t i = 0;
-         i < sizeof(kCompatibleBrands) / sizeof(kCompatibleBrands[0]);
-         ++i) {
-        if (kCompatibleBrands[i] == fourcc) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
-// Attempt to actually parse the 'ftyp' atom and determine if a suitable
-// compatible brand is present.
-// Also try to identify where this file's metadata ends
-// (end of the 'moov' atom) and report it to the caller as part of
-// the metadata.
-static bool Sniff(
-        const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *meta) {
-    // We scan up to 128k bytes to identify this file as an MP4.
-    static const off64_t kMaxScanOffset = 128ll * 1024ll;
-
-    off64_t offset = 0ll;
-    bool foundGoodFileType = false;
-    bool isFragmented = false;
-    off64_t moovAtomEndOffset = -1ll;
-    bool done = false;
-
-    while (!done && offset < kMaxScanOffset) {
-        uint32_t hdr[2];
-        if (source->readAt(offset, hdr, 8) < 8) {
-            return false;
-        }
-
-        uint64_t chunkSize = ntohl(hdr[0]);
-        uint32_t chunkType = ntohl(hdr[1]);
-        off64_t chunkDataOffset = offset + 8;
-
-        if (chunkSize == 1) {
-            if (source->readAt(offset + 8, &chunkSize, 8) < 8) {
-                return false;
-            }
-
-            chunkSize = ntoh64(chunkSize);
-            chunkDataOffset += 8;
-
-            if (chunkSize < 16) {
-                // The smallest valid chunk is 16 bytes long in this case.
-                return false;
-            }
-        } else if (chunkSize < 8) {
-            // The smallest valid chunk is 8 bytes long.
-            return false;
-        }
-
-        off64_t chunkDataSize = offset + chunkSize - chunkDataOffset;
-
-        char chunkstring[5];
-        MakeFourCCString(chunkType, chunkstring);
-        ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset);
-        switch (chunkType) {
-            case FOURCC('f', 't', 'y', 'p'):
-            {
-                if (chunkDataSize < 8) {
-                    return false;
-                }
-
-                uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4;
-                for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
-                    if (i == 1) {
-                        // Skip this index, it refers to the minorVersion,
-                        // not a brand.
-                        continue;
-                    }
-
-                    uint32_t brand;
-                    if (source->readAt(
-                                chunkDataOffset + 4 * i, &brand, 4) < 4) {
-                        return false;
-                    }
-
-                    brand = ntohl(brand);
-                    char brandstring[5];
-                    MakeFourCCString(brand, brandstring);
-                    ALOGV("Brand: %s", brandstring);
-
-                    if (isCompatibleBrand(brand)) {
-                        foundGoodFileType = true;
-                        break;
-                    }
-                }
-
-                if (!foundGoodFileType) {
-                    return false;
-                }
-
-                break;
-            }
-
-            case FOURCC('m', 'o', 'o', 'v'):
-            {
-                moovAtomEndOffset = offset + chunkSize;
-                break;
-            }
-
-            case FOURCC('m', 'o', 'o', 'f'):
-            {
-                // this is kind of broken, since we might not actually find a
-                // moof box in the first 128k.
-                isFragmented = true;
-                done = true;
-                break;
-            }
-
-            default:
-                break;
-        }
-
-        offset += chunkSize;
-    }
-
-    if (!foundGoodFileType || !isFragmented) {
-        return false;
-    }
-
-    *mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4;
-    *confidence = 0.5f; // slightly more than MPEG4Extractor
-
-    if (moovAtomEndOffset >= 0) {
-        *meta = new AMessage;
-        (*meta)->setInt64("meta-data-size", moovAtomEndOffset);
-        (*meta)->setInt32("fragmented", 1); // tell MediaExtractor what to instantiate
-
-        ALOGV("found metadata size: %lld", moovAtomEndOffset);
-    }
-
-    return true;
-}
-
-// used by DataSource::RegisterDefaultSniffers
-bool SniffFragmentedMP4(
-        const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *meta) {
-    ALOGV("SniffFragmentedMP4");
-    char prop[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.use-fragmp4", prop, NULL)
-            && (!strcmp(prop, "1") || !strcasecmp(prop, "true"))) {
-        return Sniff(source, mimeType, confidence, meta);
-    }
-
-    return false;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 1a62f9d..b2e60be 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -22,8 +22,6 @@
 #include "include/SampleTable.h"
 #include "include/ESDS.h"
 
-#include <arpa/inet.h>
-
 #include <ctype.h>
 #include <stdint.h>
 #include <stdlib.h>
@@ -33,13 +31,11 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -50,15 +46,17 @@
     MPEG4Source(const sp<MetaData> &format,
                 const sp<DataSource> &dataSource,
                 int32_t timeScale,
-                const sp<SampleTable> &sampleTable);
+                const sp<SampleTable> &sampleTable,
+                Vector<SidxEntry> &sidx,
+                off64_t firstMoofOffset);
 
     virtual status_t start(MetaData *params = NULL);
     virtual status_t stop();
 
     virtual sp<MetaData> getFormat();
 
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options = NULL);
+    virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL);
+    virtual status_t fragmentedRead(MediaBuffer **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~MPEG4Source();
@@ -71,6 +69,14 @@
     int32_t mTimescale;
     sp<SampleTable> mSampleTable;
     uint32_t mCurrentSampleIndex;
+    uint32_t mCurrentFragmentIndex;
+    Vector<SidxEntry> &mSegments;
+    off64_t mFirstMoofOffset;
+    off64_t mCurrentMoofOffset;
+    off64_t mNextMoofOffset;
+    uint32_t mCurrentTime;
+    int32_t mLastParsedTrackId;
+    int32_t mTrackId;
 
     bool mIsAVC;
     size_t mNALLengthSize;
@@ -86,6 +92,38 @@
     uint8_t *mSrcBuffer;
 
     size_t parseNALSize(const uint8_t *data) const;
+    status_t parseChunk(off64_t *offset);
+    status_t parseTrackFragmentHeader(off64_t offset, off64_t size);
+    status_t parseTrackFragmentRun(off64_t offset, off64_t size);
+
+    struct TrackFragmentHeaderInfo {
+        enum Flags {
+            kBaseDataOffsetPresent         = 0x01,
+            kSampleDescriptionIndexPresent = 0x02,
+            kDefaultSampleDurationPresent  = 0x08,
+            kDefaultSampleSizePresent      = 0x10,
+            kDefaultSampleFlagsPresent     = 0x20,
+            kDurationIsEmpty               = 0x10000,
+        };
+
+        uint32_t mTrackID;
+        uint32_t mFlags;
+        uint64_t mBaseDataOffset;
+        uint32_t mSampleDescriptionIndex;
+        uint32_t mDefaultSampleDuration;
+        uint32_t mDefaultSampleSize;
+        uint32_t mDefaultSampleFlags;
+
+        uint64_t mDataOffset;
+    };
+    TrackFragmentHeaderInfo mTrackFragmentHeaderInfo;
+
+    struct Sample {
+        off64_t offset;
+        size_t size;
+        uint32_t duration;
+    };
+    Vector<Sample> mCurrentSamples;
 
     MPEG4Source(const MPEG4Source &);
     MPEG4Source &operator=(const MPEG4Source &);
@@ -265,7 +303,9 @@
 }
 
 MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source)
-    : mDataSource(source),
+    : mSidxDuration(0),
+      mMoofOffset(0),
+      mDataSource(source),
       mInitCheck(NO_INIT),
       mHasVideo(false),
       mFirstTrack(NULL),
@@ -295,6 +335,12 @@
     mFirstSINF = NULL;
 }
 
+uint32_t MPEG4Extractor::flags() const {
+    return CAN_PAUSE |
+            ((mMoofOffset == 0 || mSidxEntries.size() != 0) ?
+                    (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0);
+}
+
 sp<MetaData> MPEG4Extractor::getMetaData() {
     status_t err;
     if ((err = readMetaData()) != OK) {
@@ -348,15 +394,24 @@
         const char *mime;
         CHECK(track->meta->findCString(kKeyMIMEType, &mime));
         if (!strncasecmp("video/", mime, 6)) {
-            uint32_t sampleIndex;
-            uint32_t sampleTime;
-            if (track->sampleTable->findThumbnailSample(&sampleIndex) == OK
-                    && track->sampleTable->getMetaDataForSample(
-                        sampleIndex, NULL /* offset */, NULL /* size */,
-                        &sampleTime) == OK) {
-                track->meta->setInt64(
-                        kKeyThumbnailTime,
-                        ((int64_t)sampleTime * 1000000) / track->timescale);
+            if (mMoofOffset > 0) {
+                int64_t duration;
+                if (track->meta->findInt64(kKeyDuration, &duration)) {
+                    // nothing fancy, just pick a frame near 1/4th of the duration
+                    track->meta->setInt64(
+                            kKeyThumbnailTime, duration / 4);
+                }
+            } else {
+                uint32_t sampleIndex;
+                uint32_t sampleTime;
+                if (track->sampleTable->findThumbnailSample(&sampleIndex) == OK
+                        && track->sampleTable->getMetaDataForSample(
+                            sampleIndex, NULL /* offset */, NULL /* size */,
+                            &sampleTime) == OK) {
+                    track->meta->setInt64(
+                            kKeyThumbnailTime,
+                            ((int64_t)sampleTime * 1000000) / track->timescale);
+                }
             }
         }
     }
@@ -371,7 +426,25 @@
 
     off64_t offset = 0;
     status_t err;
-    while ((err = parseChunk(&offset, 0)) == OK) {
+    while (true) {
+        err = parseChunk(&offset, 0);
+        if (err == OK) {
+            continue;
+        }
+
+        uint32_t hdr[2];
+        if (mDataSource->readAt(offset, hdr, 8) < 8) {
+            break;
+        }
+        uint32_t chunk_type = ntohl(hdr[1]);
+        if (chunk_type == FOURCC('s', 'i', 'd', 'x')) {
+            // parse the sidx box too
+            continue;
+        } else if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+            // store the offset of the first segment
+            mMoofOffset = offset;
+        }
+        break;
     }
 
     if (mInitCheck == OK) {
@@ -630,7 +703,7 @@
 
     char chunk[5];
     MakeFourCCString(chunk_type, chunk);
-    ALOGV("chunk: %s @ %lld", chunk, *offset);
+    ALOGV("chunk: %s @ %lld, %d", chunk, *offset, depth);
 
 #if 0
     static const char kWhitespace[] = "                                        ";
@@ -816,7 +889,7 @@
 
             mLastTrack->timescale = ntohl(timescale);
 
-            int64_t duration;
+            int64_t duration = 0;
             if (version == 1) {
                 if (mDataSource->readAt(
                             timescale_offset + 4, &duration, sizeof(duration))
@@ -825,13 +898,16 @@
                 }
                 duration = ntoh64(duration);
             } else {
-                int32_t duration32;
+                uint32_t duration32;
                 if (mDataSource->readAt(
                             timescale_offset + 4, &duration32, sizeof(duration32))
                         < (ssize_t)sizeof(duration32)) {
                     return ERROR_IO;
                 }
-                duration = ntohl(duration32);
+                // ffmpeg sets duration to -1, which is incorrect.
+                if (duration32 != 0xffffffff) {
+                    duration = ntohl(duration32);
+                }
             }
             mLastTrack->meta->setInt64(
                     kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
@@ -1075,11 +1151,23 @@
                 return err;
             }
 
-            // Assume that a given buffer only contains at most 10 fragments,
-            // each fragment originally prefixed with a 2 byte length will
-            // have a 4 byte header (0x00 0x00 0x00 0x01) after conversion,
-            // and thus will grow by 2 bytes per fragment.
-            mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size + 10 * 2);
+            if (max_size != 0) {
+                // Assume that a given buffer only contains at most 10 chunks,
+                // each chunk originally prefixed with a 2 byte length will
+                // have a 4 byte header (0x00 0x00 0x00 0x01) after conversion,
+                // and thus will grow by 2 bytes per chunk.
+                mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size + 10 * 2);
+            } else {
+                // No size was specified. Pick a conservatively large size.
+                int32_t width, height;
+                if (mLastTrack->meta->findInt32(kKeyWidth, &width) &&
+                        mLastTrack->meta->findInt32(kKeyHeight, &height)) {
+                    mLastTrack->meta->setInt32(kKeyMaxInputSize, width * height * 3 / 2);
+                } else {
+                    ALOGE("No width or height, assuming worst case 1080p");
+                    mLastTrack->meta->setInt32(kKeyMaxInputSize, 3110400);
+                }
+            }
             *offset += chunk_size;
 
             // Calculate average frame rate.
@@ -1448,6 +1536,13 @@
             break;
         }
 
+        case FOURCC('s', 'i', 'd', 'x'):
+        {
+            parseSegmentIndex(data_offset, chunk_data_size);
+            *offset += chunk_size;
+            return UNKNOWN_ERROR; // stop parsing after sidx
+        }
+
         default:
         {
             *offset += chunk_size;
@@ -1458,6 +1553,125 @@
     return OK;
 }
 
+status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
+  ALOGV("MPEG4Extractor::parseSegmentIndex");
+
+    if (size < 12) {
+      return -EINVAL;
+    }
+
+    uint32_t flags;
+    if (!mDataSource->getUInt32(offset, &flags)) {
+        return ERROR_MALFORMED;
+    }
+
+    uint32_t version = flags >> 24;
+    flags &= 0xffffff;
+
+    ALOGV("sidx version %d", version);
+
+    uint32_t referenceId;
+    if (!mDataSource->getUInt32(offset + 4, &referenceId)) {
+        return ERROR_MALFORMED;
+    }
+
+    uint32_t timeScale;
+    if (!mDataSource->getUInt32(offset + 8, &timeScale)) {
+        return ERROR_MALFORMED;
+    }
+    ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+
+    uint64_t earliestPresentationTime;
+    uint64_t firstOffset;
+
+    offset += 12;
+    size -= 12;
+
+    if (version == 0) {
+        if (size < 8) {
+            return -EINVAL;
+        }
+        uint32_t tmp;
+        if (!mDataSource->getUInt32(offset, &tmp)) {
+            return ERROR_MALFORMED;
+        }
+        earliestPresentationTime = tmp;
+        if (!mDataSource->getUInt32(offset + 4, &tmp)) {
+            return ERROR_MALFORMED;
+        }
+        firstOffset = tmp;
+        offset += 8;
+        size -= 8;
+    } else {
+        if (size < 16) {
+            return -EINVAL;
+        }
+        if (!mDataSource->getUInt64(offset, &earliestPresentationTime)) {
+            return ERROR_MALFORMED;
+        }
+        if (!mDataSource->getUInt64(offset + 8, &firstOffset)) {
+            return ERROR_MALFORMED;
+        }
+        offset += 16;
+        size -= 16;
+    }
+    ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
+
+    if (size < 4) {
+        return -EINVAL;
+    }
+
+    uint16_t referenceCount;
+    if (!mDataSource->getUInt16(offset + 2, &referenceCount)) {
+        return ERROR_MALFORMED;
+    }
+    offset += 4;
+    size -= 4;
+    ALOGV("refcount: %d", referenceCount);
+
+    if (size < referenceCount * 12) {
+        return -EINVAL;
+    }
+
+    uint64_t total_duration = 0;
+    for (unsigned int i = 0; i < referenceCount; i++) {
+        uint32_t d1, d2, d3;
+
+        if (!mDataSource->getUInt32(offset, &d1) ||     // size
+            !mDataSource->getUInt32(offset + 4, &d2) || // duration
+            !mDataSource->getUInt32(offset + 8, &d3)) { // flags
+            return ERROR_MALFORMED;
+        }
+
+        if (d1 & 0x80000000) {
+            ALOGW("sub-sidx boxes not supported yet");
+        }
+        bool sap = d3 & 0x80000000;
+        bool saptype = d3 >> 28;
+        if (!sap || saptype > 2) {
+            ALOGW("not a stream access point, or unsupported type");
+        }
+        total_duration += d2;
+        offset += 12;
+        ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
+        SidxEntry se;
+        se.mSize = d1 & 0x7fffffff;
+        se.mDurationUs = 1000000LL * d2 / timeScale;
+        mSidxEntries.add(se);
+    }
+
+    mSidxDuration = total_duration * 1000000 / timeScale;
+    ALOGV("duration: %lld", mSidxDuration);
+
+    int64_t metaDuration;
+    if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
+        mLastTrack->meta->setInt64(kKeyDuration, mSidxDuration);
+    }
+    return OK;
+}
+
+
+
 status_t MPEG4Extractor::parseTrackHeader(
         off64_t data_offset, off64_t data_size) {
     if (data_size < 4) {
@@ -1755,7 +1969,8 @@
     }
 
     return new MPEG4Source(
-            track->meta, mDataSource, track->timescale, track->sampleTable);
+            track->meta, mDataSource, track->timescale, track->sampleTable,
+            mSidxEntries, mMoofOffset);
 }
 
 // static
@@ -1898,12 +2113,19 @@
         const sp<MetaData> &format,
         const sp<DataSource> &dataSource,
         int32_t timeScale,
-        const sp<SampleTable> &sampleTable)
+        const sp<SampleTable> &sampleTable,
+        Vector<SidxEntry> &sidx,
+        off64_t firstMoofOffset)
     : mFormat(format),
       mDataSource(dataSource),
       mTimescale(timeScale),
       mSampleTable(sampleTable),
       mCurrentSampleIndex(0),
+      mCurrentFragmentIndex(0),
+      mSegments(sidx),
+      mFirstMoofOffset(firstMoofOffset),
+      mCurrentMoofOffset(firstMoofOffset),
+      mCurrentTime(0),
       mIsAVC(false),
       mNALLengthSize(0),
       mStarted(false),
@@ -1931,6 +2153,13 @@
         // The number of bytes used to encode the length of a NAL unit.
         mNALLengthSize = 1 + (ptr[4] & 3);
     }
+
+    CHECK(format->findInt32(kKeyTrackID, &mTrackId));
+
+    if (mFirstMoofOffset != 0) {
+        off64_t offset = mFirstMoofOffset;
+        parseChunk(&offset);
+    }
 }
 
 MPEG4Source::~MPEG4Source() {
@@ -1988,6 +2217,344 @@
     return OK;
 }
 
+status_t MPEG4Source::parseChunk(off64_t *offset) {
+    uint32_t hdr[2];
+    if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+        return ERROR_IO;
+    }
+    uint64_t chunk_size = ntohl(hdr[0]);
+    uint32_t chunk_type = ntohl(hdr[1]);
+    off64_t data_offset = *offset + 8;
+
+    if (chunk_size == 1) {
+        if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
+            return ERROR_IO;
+        }
+        chunk_size = ntoh64(chunk_size);
+        data_offset += 8;
+
+        if (chunk_size < 16) {
+            // The smallest valid chunk is 16 bytes long in this case.
+            return ERROR_MALFORMED;
+        }
+    } else if (chunk_size < 8) {
+        // The smallest valid chunk is 8 bytes long.
+        return ERROR_MALFORMED;
+    }
+
+    char chunk[5];
+    MakeFourCCString(chunk_type, chunk);
+    ALOGV("MPEG4Source chunk %s @ %llx", chunk, *offset);
+
+    off64_t chunk_data_size = *offset + chunk_size - data_offset;
+
+    switch(chunk_type) {
+
+        case FOURCC('t', 'r', 'a', 'f'):
+        case FOURCC('m', 'o', 'o', 'f'): {
+            off64_t stop_offset = *offset + chunk_size;
+            *offset = data_offset;
+            while (*offset < stop_offset) {
+                status_t err = parseChunk(offset);
+                if (err != OK) {
+                    return err;
+                }
+            }
+            if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+                // *offset points to then mdat box following this moof
+                parseChunk(offset); // doesn't actually parse it, just updates offset
+                mNextMoofOffset = *offset;
+            }
+            break;
+        }
+
+        case FOURCC('t', 'f', 'h', 'd'): {
+                status_t err;
+                if ((err = parseTrackFragmentHeader(data_offset, chunk_data_size)) != OK) {
+                    return err;
+                }
+                *offset += chunk_size;
+                break;
+        }
+
+        case FOURCC('t', 'r', 'u', 'n'): {
+                status_t err;
+                if (mLastParsedTrackId == mTrackId) {
+                    if ((err = parseTrackFragmentRun(data_offset, chunk_data_size)) != OK) {
+                        return err;
+                    }
+                }
+
+                *offset += chunk_size;
+                break;
+        }
+
+        default: {
+            *offset += chunk_size;
+            break;
+        }
+    }
+    return OK;
+}
+
+status_t MPEG4Source::parseTrackFragmentHeader(off64_t offset, off64_t size) {
+
+    if (size < 8) {
+        return -EINVAL;
+    }
+
+    uint32_t flags;
+    if (!mDataSource->getUInt32(offset, &flags)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (flags & 0xff000000) {
+        return -EINVAL;
+    }
+
+    if (!mDataSource->getUInt32(offset + 4, (uint32_t*)&mLastParsedTrackId)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (mLastParsedTrackId != mTrackId) {
+        // this is not the right track, skip it
+        return OK;
+    }
+
+    mTrackFragmentHeaderInfo.mFlags = flags;
+    mTrackFragmentHeaderInfo.mTrackID = mLastParsedTrackId;
+    offset += 8;
+    size -= 8;
+
+    ALOGV("fragment header: %08x %08x", flags, mTrackFragmentHeaderInfo.mTrackID);
+
+    if (flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent) {
+        if (size < 8) {
+            return -EINVAL;
+        }
+
+        if (!mDataSource->getUInt64(offset, &mTrackFragmentHeaderInfo.mBaseDataOffset)) {
+            return ERROR_MALFORMED;
+        }
+        offset += 8;
+        size -= 8;
+    }
+
+    if (flags & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent) {
+        if (size < 4) {
+            return -EINVAL;
+        }
+
+        if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mSampleDescriptionIndex)) {
+            return ERROR_MALFORMED;
+        }
+        offset += 4;
+        size -= 4;
+    }
+
+    if (flags & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
+        if (size < 4) {
+            return -EINVAL;
+        }
+
+        if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleDuration)) {
+            return ERROR_MALFORMED;
+        }
+        offset += 4;
+        size -= 4;
+    }
+
+    if (flags & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
+        if (size < 4) {
+            return -EINVAL;
+        }
+
+        if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleSize)) {
+            return ERROR_MALFORMED;
+        }
+        offset += 4;
+        size -= 4;
+    }
+
+    if (flags & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
+        if (size < 4) {
+            return -EINVAL;
+        }
+
+        if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleFlags)) {
+            return ERROR_MALFORMED;
+        }
+        offset += 4;
+        size -= 4;
+    }
+
+    if (!(flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent)) {
+        mTrackFragmentHeaderInfo.mBaseDataOffset = mCurrentMoofOffset;
+    }
+
+    mTrackFragmentHeaderInfo.mDataOffset = 0;
+    return OK;
+}
+
+status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
+
+    ALOGV("MPEG4Extractor::parseTrackFragmentRun");
+    if (size < 8) {
+        return -EINVAL;
+    }
+
+    enum {
+        kDataOffsetPresent                  = 0x01,
+        kFirstSampleFlagsPresent            = 0x04,
+        kSampleDurationPresent              = 0x100,
+        kSampleSizePresent                  = 0x200,
+        kSampleFlagsPresent                 = 0x400,
+        kSampleCompositionTimeOffsetPresent = 0x800,
+    };
+
+    uint32_t flags;
+    if (!mDataSource->getUInt32(offset, &flags)) {
+        return ERROR_MALFORMED;
+    }
+    ALOGV("fragment run flags: %08x", flags);
+
+    if (flags & 0xff000000) {
+        return -EINVAL;
+    }
+
+    if ((flags & kFirstSampleFlagsPresent) && (flags & kSampleFlagsPresent)) {
+        // These two shall not be used together.
+        return -EINVAL;
+    }
+
+    uint32_t sampleCount;
+    if (!mDataSource->getUInt32(offset + 4, &sampleCount)) {
+        return ERROR_MALFORMED;
+    }
+    offset += 8;
+    size -= 8;
+
+    uint64_t dataOffset = mTrackFragmentHeaderInfo.mDataOffset;
+
+    uint32_t firstSampleFlags = 0;
+
+    if (flags & kDataOffsetPresent) {
+        if (size < 4) {
+            return -EINVAL;
+        }
+
+        int32_t dataOffsetDelta;
+        if (!mDataSource->getUInt32(offset, (uint32_t*)&dataOffsetDelta)) {
+            return ERROR_MALFORMED;
+        }
+
+        dataOffset = mTrackFragmentHeaderInfo.mBaseDataOffset + dataOffsetDelta;
+
+        offset += 4;
+        size -= 4;
+    }
+
+    if (flags & kFirstSampleFlagsPresent) {
+        if (size < 4) {
+            return -EINVAL;
+        }
+
+        if (!mDataSource->getUInt32(offset, &firstSampleFlags)) {
+            return ERROR_MALFORMED;
+        }
+        offset += 4;
+        size -= 4;
+    }
+
+    uint32_t sampleDuration = 0, sampleSize = 0, sampleFlags = 0,
+             sampleCtsOffset = 0;
+
+    size_t bytesPerSample = 0;
+    if (flags & kSampleDurationPresent) {
+        bytesPerSample += 4;
+    } else if (mTrackFragmentHeaderInfo.mFlags
+            & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
+        sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
+    } else {
+        sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
+    }
+
+    if (flags & kSampleSizePresent) {
+        bytesPerSample += 4;
+    } else if (mTrackFragmentHeaderInfo.mFlags
+            & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
+        sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
+    } else {
+        sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
+    }
+
+    if (flags & kSampleFlagsPresent) {
+        bytesPerSample += 4;
+    } else if (mTrackFragmentHeaderInfo.mFlags
+            & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
+        sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
+    } else {
+        sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
+    }
+
+    if (flags & kSampleCompositionTimeOffsetPresent) {
+        bytesPerSample += 4;
+    } else {
+        sampleCtsOffset = 0;
+    }
+
+    if (size < sampleCount * bytesPerSample) {
+        return -EINVAL;
+    }
+
+    Sample tmp;
+    for (uint32_t i = 0; i < sampleCount; ++i) {
+        if (flags & kSampleDurationPresent) {
+            if (!mDataSource->getUInt32(offset, &sampleDuration)) {
+                return ERROR_MALFORMED;
+            }
+            offset += 4;
+        }
+
+        if (flags & kSampleSizePresent) {
+            if (!mDataSource->getUInt32(offset, &sampleSize)) {
+                return ERROR_MALFORMED;
+            }
+            offset += 4;
+        }
+
+        if (flags & kSampleFlagsPresent) {
+            if (!mDataSource->getUInt32(offset, &sampleFlags)) {
+                return ERROR_MALFORMED;
+            }
+            offset += 4;
+        }
+
+        if (flags & kSampleCompositionTimeOffsetPresent) {
+            if (!mDataSource->getUInt32(offset, &sampleCtsOffset)) {
+                return ERROR_MALFORMED;
+            }
+            offset += 4;
+        }
+
+        ALOGV("adding sample at offset 0x%08llx, size %u, duration %u, "
+              " flags 0x%08x",
+                dataOffset, sampleSize, sampleDuration,
+                (flags & kFirstSampleFlagsPresent) && i == 0
+                    ? firstSampleFlags : sampleFlags);
+        tmp.offset = dataOffset;
+        tmp.size = sampleSize;
+        tmp.duration = sampleDuration;
+        mCurrentSamples.add(tmp);
+
+        dataOffset += sampleSize;
+    }
+
+    mTrackFragmentHeaderInfo.mDataOffset = dataOffset;
+
+    return OK;
+}
+
 sp<MetaData> MPEG4Source::getFormat() {
     Mutex::Autolock autoLock(mLock);
 
@@ -2019,6 +2586,10 @@
 
     CHECK(mStarted);
 
+    if (mFirstMoofOffset > 0) {
+        return fragmentedRead(out, options);
+    }
+
     *out = NULL;
 
     int64_t targetSampleTimeUs = -1;
@@ -2076,6 +2647,7 @@
                 // we had seeked to the end of stream, ending normally.
                 err = ERROR_END_OF_STREAM;
             }
+            ALOGV("end of stream");
             return err;
         }
 
@@ -2286,6 +2858,255 @@
     }
 }
 
+status_t MPEG4Source::fragmentedRead(
+        MediaBuffer **out, const ReadOptions *options) {
+
+    ALOGV("MPEG4Source::fragmentedRead");
+
+    CHECK(mStarted);
+
+    *out = NULL;
+
+    int64_t targetSampleTimeUs = -1;
+
+    int64_t seekTimeUs;
+    ReadOptions::SeekMode mode;
+    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+
+        int numSidxEntries = mSegments.size();
+        if (numSidxEntries != 0) {
+            int64_t totalTime = 0;
+            off64_t totalOffset = mFirstMoofOffset;
+            for (int i = 0; i < numSidxEntries; i++) {
+                const SidxEntry *se = &mSegments[i];
+                if (totalTime + se->mDurationUs > seekTimeUs) {
+                    // The requested time is somewhere in this segment
+                    if ((mode == ReadOptions::SEEK_NEXT_SYNC) ||
+                        (mode == ReadOptions::SEEK_CLOSEST_SYNC &&
+                        (seekTimeUs - totalTime) > (totalTime + se->mDurationUs - seekTimeUs))) {
+                        // requested next sync, or closest sync and it was closer to the end of
+                        // this segment
+                        totalTime += se->mDurationUs;
+                        totalOffset += se->mSize;
+                    }
+                    break;
+                }
+                totalTime += se->mDurationUs;
+                totalOffset += se->mSize;
+            }
+        mCurrentMoofOffset = totalOffset;
+        mCurrentSamples.clear();
+        mCurrentSampleIndex = 0;
+        parseChunk(&totalOffset);
+        mCurrentTime = totalTime * mTimescale / 1000000ll;
+        }
+
+        if (mBuffer != NULL) {
+            mBuffer->release();
+            mBuffer = NULL;
+        }
+
+        // fall through
+    }
+
+    off64_t offset = 0;
+    size_t size;
+    uint32_t cts = 0;
+    bool isSyncSample = false;
+    bool newBuffer = false;
+    if (mBuffer == NULL) {
+        newBuffer = true;
+
+        if (mCurrentSampleIndex >= mCurrentSamples.size()) {
+            // move to next fragment
+            Sample lastSample = mCurrentSamples[mCurrentSamples.size() - 1];
+            off64_t nextMoof = mNextMoofOffset; // lastSample.offset + lastSample.size;
+            mCurrentMoofOffset = nextMoof;
+            mCurrentSamples.clear();
+            mCurrentSampleIndex = 0;
+            parseChunk(&nextMoof);
+                if (mCurrentSampleIndex >= mCurrentSamples.size()) {
+                    return ERROR_END_OF_STREAM;
+                }
+        }
+
+        const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
+        offset = smpl->offset;
+        size = smpl->size;
+        cts = mCurrentTime;
+        mCurrentTime += smpl->duration;
+        isSyncSample = (mCurrentSampleIndex == 0); // XXX
+
+        status_t err = mGroup->acquire_buffer(&mBuffer);
+
+        if (err != OK) {
+            CHECK(mBuffer == NULL);
+            ALOGV("acquire_buffer returned %d", err);
+            return err;
+        }
+    }
+
+    if (!mIsAVC || mWantsNALFragments) {
+        if (newBuffer) {
+            ssize_t num_bytes_read =
+                mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
+
+            if (num_bytes_read < (ssize_t)size) {
+                mBuffer->release();
+                mBuffer = NULL;
+
+                ALOGV("i/o error");
+                return ERROR_IO;
+            }
+
+            CHECK(mBuffer != NULL);
+            mBuffer->set_range(0, size);
+            mBuffer->meta_data()->clear();
+            mBuffer->meta_data()->setInt64(
+                    kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+
+            if (targetSampleTimeUs >= 0) {
+                mBuffer->meta_data()->setInt64(
+                        kKeyTargetTime, targetSampleTimeUs);
+            }
+
+            if (isSyncSample) {
+                mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
+            }
+
+            ++mCurrentSampleIndex;
+        }
+
+        if (!mIsAVC) {
+            *out = mBuffer;
+            mBuffer = NULL;
+
+            return OK;
+        }
+
+        // Each NAL unit is split up into its constituent fragments and
+        // each one of them returned in its own buffer.
+
+        CHECK(mBuffer->range_length() >= mNALLengthSize);
+
+        const uint8_t *src =
+            (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
+
+        size_t nal_size = parseNALSize(src);
+        if (mBuffer->range_length() < mNALLengthSize + nal_size) {
+            ALOGE("incomplete NAL unit.");
+
+            mBuffer->release();
+            mBuffer = NULL;
+
+            return ERROR_MALFORMED;
+        }
+
+        MediaBuffer *clone = mBuffer->clone();
+        CHECK(clone != NULL);
+        clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
+
+        CHECK(mBuffer != NULL);
+        mBuffer->set_range(
+                mBuffer->range_offset() + mNALLengthSize + nal_size,
+                mBuffer->range_length() - mNALLengthSize - nal_size);
+
+        if (mBuffer->range_length() == 0) {
+            mBuffer->release();
+            mBuffer = NULL;
+        }
+
+        *out = clone;
+
+        return OK;
+    } else {
+        ALOGV("whole NAL");
+        // Whole NAL units are returned but each fragment is prefixed by
+        // the start code (0x00 00 00 01).
+        ssize_t num_bytes_read = 0;
+        int32_t drm = 0;
+        bool usesDRM = (mFormat->findInt32(kKeyIsDRM, &drm) && drm != 0);
+        if (usesDRM) {
+            num_bytes_read =
+                mDataSource->readAt(offset, (uint8_t*)mBuffer->data(), size);
+        } else {
+            num_bytes_read = mDataSource->readAt(offset, mSrcBuffer, size);
+        }
+
+        if (num_bytes_read < (ssize_t)size) {
+            mBuffer->release();
+            mBuffer = NULL;
+
+            ALOGV("i/o error");
+            return ERROR_IO;
+        }
+
+        if (usesDRM) {
+            CHECK(mBuffer != NULL);
+            mBuffer->set_range(0, size);
+
+        } else {
+            uint8_t *dstData = (uint8_t *)mBuffer->data();
+            size_t srcOffset = 0;
+            size_t dstOffset = 0;
+
+            while (srcOffset < size) {
+                bool isMalFormed = (srcOffset + mNALLengthSize > size);
+                size_t nalLength = 0;
+                if (!isMalFormed) {
+                    nalLength = parseNALSize(&mSrcBuffer[srcOffset]);
+                    srcOffset += mNALLengthSize;
+                    isMalFormed = srcOffset + nalLength > size;
+                }
+
+                if (isMalFormed) {
+                    ALOGE("Video is malformed");
+                    mBuffer->release();
+                    mBuffer = NULL;
+                    return ERROR_MALFORMED;
+                }
+
+                if (nalLength == 0) {
+                    continue;
+                }
+
+                CHECK(dstOffset + 4 <= mBuffer->size());
+
+                dstData[dstOffset++] = 0;
+                dstData[dstOffset++] = 0;
+                dstData[dstOffset++] = 0;
+                dstData[dstOffset++] = 1;
+                memcpy(&dstData[dstOffset], &mSrcBuffer[srcOffset], nalLength);
+                srcOffset += nalLength;
+                dstOffset += nalLength;
+            }
+            CHECK_EQ(srcOffset, size);
+            CHECK(mBuffer != NULL);
+            mBuffer->set_range(0, dstOffset);
+        }
+
+        mBuffer->meta_data()->clear();
+        mBuffer->meta_data()->setInt64(
+                kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+
+        if (targetSampleTimeUs >= 0) {
+            mBuffer->meta_data()->setInt64(
+                    kKeyTargetTime, targetSampleTimeUs);
+        }
+
+        if (isSyncSample) {
+            mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
+        }
+
+        ++mCurrentSampleIndex;
+
+        *out = mBuffer;
+        mBuffer = NULL;
+
+        return OK;
+    }
+}
+
 MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
         const char *mimePrefix) {
     for (Track *track = mFirstTrack; track != NULL; track = track->next) {
@@ -2398,6 +3219,9 @@
 
         off64_t chunkDataSize = offset + chunkSize - chunkDataOffset;
 
+        char chunkstring[5];
+        MakeFourCCString(chunkType, chunkstring);
+        ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset);
         switch (chunkType) {
             case FOURCC('f', 't', 'y', 'p'):
             {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 77aceb7..83be0fd 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -22,7 +22,7 @@
 
 #include "include/SoftwareRenderer.h"
 
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -132,7 +132,7 @@
 
 status_t MediaCodec::configure(
         const sp<AMessage> &format,
-        const sp<SurfaceTextureClient> &nativeWindow,
+        const sp<Surface> &nativeWindow,
         const sp<ICrypto> &crypto,
         uint32_t flags) {
     sp<AMessage> msg = new AMessage(kWhatConfigure, id());
@@ -1526,7 +1526,7 @@
 }
 
 status_t MediaCodec::setNativeWindow(
-        const sp<SurfaceTextureClient> &surfaceTextureClient) {
+        const sp<Surface> &surfaceTextureClient) {
     status_t err;
 
     if (mNativeWindow != NULL) {
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index b18c916..9ab6611 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -21,7 +21,6 @@
 #include "include/AMRExtractor.h"
 #include "include/MP3Extractor.h"
 #include "include/MPEG4Extractor.h"
-#include "include/FragmentedMP4Extractor.h"
 #include "include/WAVExtractor.h"
 #include "include/OggExtractor.h"
 #include "include/MPEG2PSExtractor.h"
@@ -94,12 +93,7 @@
     MediaExtractor *ret = NULL;
     if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
             || !strcasecmp(mime, "audio/mp4")) {
-        int fragmented = 0;
-        if (meta != NULL && meta->findInt32("fragmented", &fragmented) && fragmented) {
-            ret = new FragmentedMP4Extractor(source);
-        } else {
-            ret = new MPEG4Extractor(source);
-        }
+        ret = new MPEG4Extractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
         ret = new MP3Extractor(source, meta);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
diff --git a/media/libstagefright/include/FragmentedMP4Extractor.h b/media/libstagefright/include/FragmentedMP4Extractor.h
deleted file mode 100644
index 763cd3a..0000000
--- a/media/libstagefright/include/FragmentedMP4Extractor.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FRAGMENTED_MP4_EXTRACTOR_H_
-
-#define FRAGMENTED_MP4_EXTRACTOR_H_
-
-#include "include/FragmentedMP4Parser.h"
-
-#include <media/stagefright/MediaExtractor.h>
-#include <utils/Vector.h>
-#include <utils/String8.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-class SampleTable;
-class String8;
-
-class FragmentedMP4Extractor : public MediaExtractor {
-public:
-    // Extractor assumes ownership of "source".
-    FragmentedMP4Extractor(const sp<DataSource> &source);
-
-    virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
-    virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-    virtual sp<MetaData> getMetaData();
-    virtual uint32_t flags() const;
-
-protected:
-    virtual ~FragmentedMP4Extractor();
-
-private:
-    sp<ALooper> mLooper;
-    sp<FragmentedMP4Parser> mParser;
-    sp<DataSource> mDataSource;
-    status_t mInitCheck;
-    size_t mAudioTrackIndex;
-    size_t mTrackCount;
-
-    sp<MetaData> mFileMetaData;
-
-    Vector<uint32_t> mPath;
-
-    FragmentedMP4Extractor(const FragmentedMP4Extractor &);
-    FragmentedMP4Extractor &operator=(const FragmentedMP4Extractor &);
-};
-
-bool SniffFragmentedMP4(
-        const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *);
-
-}  // namespace android
-
-#endif  // MPEG4_EXTRACTOR_H_
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 5c549e0..c68623a 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -18,7 +18,12 @@
 
 #define MPEG4_EXTRACTOR_H_
 
+#include <arpa/inet.h>
+
+#include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/Utils.h>
+#include <utils/List.h>
 #include <utils/Vector.h>
 #include <utils/String8.h>
 
@@ -29,6 +34,11 @@
 class SampleTable;
 class String8;
 
+struct SidxEntry {
+    size_t mSize;
+    uint32_t mDurationUs;
+};
+
 class MPEG4Extractor : public MediaExtractor {
 public:
     // Extractor assumes ownership of "source".
@@ -39,6 +49,7 @@
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual uint32_t flags() const;
 
     // for DRM
     virtual char* getDrmTrackInfo(size_t trackID, int *len);
@@ -47,6 +58,7 @@
     virtual ~MPEG4Extractor();
 
 private:
+
     struct Track {
         Track *next;
         sp<MetaData> meta;
@@ -56,6 +68,10 @@
         bool skipTrack;
     };
 
+    Vector<SidxEntry> mSidxEntries;
+    uint64_t mSidxDuration;
+    off64_t mMoofOffset;
+
     sp<DataSource> mDataSource;
     status_t mInitCheck;
     bool mHasVideo;
@@ -93,6 +109,8 @@
 
     status_t parseTrackHeader(off64_t data_offset, off64_t data_size);
 
+    status_t parseSegmentIndex(off64_t data_offset, size_t data_size);
+
     Track *findTrackByMimePrefix(const char *mimePrefix);
 
     MPEG4Extractor(const MPEG4Extractor &);
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index 6a98509..a5459fe 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -27,7 +27,7 @@
 #include <media/mediarecorder.h>
 
 #include <ui/GraphicBuffer.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <gui/ISurfaceComposer.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
@@ -109,7 +109,7 @@
             ALOGV("No actual display. Choosing EGLSurface based on SurfaceMediaSource");
             sp<IGraphicBufferProducer> sms = (new SurfaceMediaSource(
                     getSurfaceWidth(), getSurfaceHeight()))->getBufferQueue();
-            sp<SurfaceTextureClient> stc = new SurfaceTextureClient(sms);
+            sp<Surface> stc = new Surface(sms);
             sp<ANativeWindow> window = stc;
 
             mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
@@ -361,7 +361,7 @@
         mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
 
         // Manual cast is required to avoid constructor ambiguity
-        mSTC = new SurfaceTextureClient(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+        mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
         mANW = mSTC;
     }
 
@@ -375,7 +375,7 @@
     const int mYuvTexHeight;
 
     sp<SurfaceMediaSource> mSMS;
-    sp<SurfaceTextureClient> mSTC;
+    sp<Surface> mSTC;
     sp<ANativeWindow> mANW;
 };
 
@@ -396,7 +396,7 @@
         ALOGV("SMS-GLTest::SetUp()");
         android::ProcessState::self()->startThreadPool();
         mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-        mSTC = new SurfaceTextureClient(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+        mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
         mANW = mSTC;
 
         // Doing the setup related to the GL Side
@@ -416,7 +416,7 @@
     const int mYuvTexHeight;
 
     sp<SurfaceMediaSource> mSMS;
-    sp<SurfaceTextureClient> mSTC;
+    sp<Surface> mSTC;
     sp<ANativeWindow> mANW;
 };
 
@@ -483,7 +483,7 @@
 // query the mediarecorder for a surfacemeidasource and create an egl surface with that
 void SurfaceMediaSourceGLTest::setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr) {
     sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer();
-    mSTC = new SurfaceTextureClient(iST);
+    mSTC = new Surface(iST);
     mANW = mSTC;
 
     if (mEglSurface != EGL_NO_SURFACE) {
@@ -750,7 +750,7 @@
     // get the reference to the surfacemediasource living in
     // mediaserver that is created by stagefrightrecorder
     sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer();
-    mSTC = new SurfaceTextureClient(iST);
+    mSTC = new Surface(iST);
     mANW = mSTC;
     ASSERT_EQ(NO_ERROR, native_window_api_connect(mANW.get(), NATIVE_WINDOW_API_CPU));
     ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
@@ -781,7 +781,7 @@
     ALOGV("Verify creating a surface w/ right config + dummy writer*********");
 
     mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-    mSTC = new SurfaceTextureClient(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+    mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
     mANW = mSTC;
 
     DummyRecorder writer(mSMS);
diff --git a/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp b/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
index 04dbd7b..75f9d73 100644
--- a/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
+++ b/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
@@ -376,7 +376,7 @@
     CHECK_EQ(mPlayer->setDataSource(mStreamSource), (status_t)OK);
 
     mPlayer->setVideoSurfaceTexture(
-            mSurfaceTex != NULL ? mSurfaceTex : mSurface->getSurfaceTexture());
+            mSurfaceTex != NULL ? mSurfaceTex : mSurface->getIGraphicBufferProducer());
 
     mPlayer->start();
 }
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 376b0df..2861aa9 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -23,7 +23,7 @@
 #include "MediaPuller.h"
 
 #include <cutils/properties.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/wifi-display/wfd.cpp b/media/libstagefright/wifi-display/wfd.cpp
index 21d661e..3f4216a 100644
--- a/media/libstagefright/wifi-display/wfd.cpp
+++ b/media/libstagefright/wifi-display/wfd.cpp
@@ -321,7 +321,7 @@
     sp<ALooper> looper = new ALooper;
 
     sp<WifiDisplaySink> sink = new WifiDisplaySink(
-            session, surface->getSurfaceTexture());
+            session, surface->getIGraphicBufferProducer());
     looper->registerHandler(sink);
 
     if (connectToPort >= 0) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index e8852fb..c3f08f6 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -227,7 +227,7 @@
     sp<NBLog::Writer>   newWriter_l(size_t size, const char *name);
     void                unregisterWriter(const sp<NBLog::Writer>& writer);
 private:
-    static const size_t kLogMemorySize = 30 * 1024;
+    static const size_t kLogMemorySize = 10 * 1024;
     sp<MemoryDealer>    mLogMemoryDealer;   // == 0 when NBLog is disabled
 public:
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 529d1b5..08325ad 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -98,7 +98,7 @@
 
 AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTracks)
     :   mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1),
-        mSampleRate(sampleRate), mLog(&mDummyLog)
+        mSampleRate(sampleRate)
 {
     // AudioMixer is not yet capable of multi-channel beyond stereo
     COMPILE_TIME_ASSERT_FUNCTION_SCOPE(2 == MAX_NUM_CHANNELS);
@@ -122,7 +122,6 @@
     mState.hook         = process__nop;
     mState.outputTemp   = NULL;
     mState.resampleTemp = NULL;
-    mState.mLog         = &mDummyLog;
     // mState.reserved
 
     // FIXME Most of the following initialization is probably redundant since
@@ -132,7 +131,6 @@
     for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         t->resampler = NULL;
         t->downmixerBufferProvider = NULL;
-        t->magic = track_t::kMagic;
         t++;
     }
 
@@ -171,12 +169,6 @@
     delete [] mState.resampleTemp;
 }
 
-void AudioMixer::setLog(NBLog::Writer *log)
-{
-    mLog = log;
-    mState.mLog = log;
-}
-
 int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId)
 {
     uint32_t names = (~mTrackNames) & mConfiguredNames;
@@ -217,12 +209,9 @@
         t->mainBuffer = NULL;
         t->auxBuffer = NULL;
         t->downmixerBufferProvider = NULL;
-        t->fastIndex = -1;
-        // t->magic unchanged
 
         status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask);
         if (status == OK) {
-            mLog->logf("getTrackName %d", n);
             return TRACK0 + n;
         }
         ALOGE("AudioMixer::getTrackName(0x%x) failed, error preparing track for downmix",
@@ -377,11 +366,9 @@
 {
     ALOGV("AudioMixer::deleteTrackName(%d)", name);
     name -= TRACK0;
-    mLog->logf("deleteTrackName %d", name);
     ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
     ALOGV("deleteTrackName(%d)", name);
     track_t& track(mState.tracks[ name ]);
-    track.checkMagic();
     if (track.enabled) {
         track.enabled = false;
         invalidateState(1<<name);
@@ -400,10 +387,8 @@
     name -= TRACK0;
     ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
     track_t& track = mState.tracks[name];
-    track.checkMagic();
 
     if (!track.enabled) {
-        mLog->logf("enable %d", name);
         track.enabled = true;
         ALOGV("enable(%d)", name);
         invalidateState(1 << name);
@@ -415,32 +400,19 @@
     name -= TRACK0;
     ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
     track_t& track = mState.tracks[name];
-    track.checkMagic();
 
     if (track.enabled) {
-        mLog->logf("disable %d", name);
         track.enabled = false;
         ALOGV("disable(%d)", name);
         invalidateState(1 << name);
     }
 }
 
-bool AudioMixer::enabled(int name)
-{
-    name -= TRACK0;
-    ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
-    track_t& track = mState.tracks[name];
-    track.checkMagic();
-
-    return track.enabled;
-}
-
 void AudioMixer::setParameter(int name, int target, int param, void *value)
 {
     name -= TRACK0;
     ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
     track_t& track = mState.tracks[name];
-    track.checkMagic();
 
     int valueInt = (int)value;
     int32_t *valueBuf = (int32_t *)value;
@@ -483,9 +455,6 @@
         //         for a specific track? or per mixer?
         /* case DOWNMIX_TYPE:
             break          */
-        case FAST_INDEX:
-            track.fastIndex = valueInt;
-            break;
         default:
             LOG_FATAL("bad param");
         }
@@ -571,7 +540,6 @@
 
 bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate)
 {
-    checkMagic();
     if (value != devSampleRate || resampler != NULL) {
         if (sampleRate != value) {
             sampleRate = value;
@@ -604,7 +572,6 @@
 inline
 void AudioMixer::track_t::adjustVolumeRamp(bool aux)
 {
-    checkMagic();
     for (uint32_t i=0 ; i<MAX_NUM_CHANNELS ; i++) {
         if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
             ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
@@ -633,10 +600,8 @@
 void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
 {
     name -= TRACK0;
-    mLog->logf("bp %d-%p", name, bufferProvider);
     ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
 
-    mState.tracks[name].checkMagic();
     if (mState.tracks[name].downmixerBufferProvider != NULL) {
         // update required?
         if (mState.tracks[name].downmixerBufferProvider->mTrackBufferProvider != bufferProvider) {
@@ -658,9 +623,6 @@
 
 void AudioMixer::process(int64_t pts)
 {
-    if (mState.needsChanged) {
-        mLog->logf("process needs=%#x", mState.needsChanged);
-    }
     mState.hook(&mState, pts);
 }
 
@@ -685,7 +647,6 @@
     }
     state->enabledTracks &= ~disabled;
     state->enabledTracks |=  enabled;
-    state->mLog->logf("process_validate ena=%#x", state->enabledTracks);
 
     // compute everything we need...
     int countActiveTracks = 0;
@@ -1142,7 +1103,6 @@
 
     // acquire each track's buffer
     uint32_t enabledTracks = state->enabledTracks;
-    state->mLog->logf("process_gNR ena=%#x", enabledTracks);
     uint32_t e0 = enabledTracks;
     while (e0) {
         const int i = 31 - __builtin_clz(e0);
@@ -1151,8 +1111,8 @@
         t.buffer.frameCount = state->frameCount;
         int valid = t.bufferProvider->getValid();
         if (valid != AudioBufferProvider::kValid) {
-            ALOGE("invalid bufferProvider=%p name=%d fastIndex=%d frameCount=%d valid=%#x enabledTracks=%#x",
-                    t.bufferProvider, i, t.fastIndex, t.buffer.frameCount, valid, enabledTracks);
+            ALOGE("invalid bufferProvider=%p name=%d frameCount=%d valid=%#x enabledTracks=%#x",
+                    t.bufferProvider, i, t.buffer.frameCount, valid, enabledTracks);
             // expect to crash
         }
         t.bufferProvider->getNextBuffer(&t.buffer, pts);
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index f0ccd8e..fd21fda 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -28,7 +28,6 @@
 
 #include <audio_effects/effect_downmix.h>
 #include <system/audio.h>
-#include <media/nbaio/NBLog.h>
 
 namespace android {
 
@@ -77,7 +76,6 @@
         MAIN_BUFFER     = 0x4002,
         AUX_BUFFER      = 0x4003,
         DOWNMIX_TYPE    = 0X4004,
-        FAST_INDEX      = 0x4005, // for debugging only
         // for target RESAMPLE
         SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
                                   // parameter 'value' is the new sample rate in Hz.
@@ -108,7 +106,6 @@
     // Enable or disable an allocated track by name
     void        enable(int name);
     void        disable(int name);
-    bool        enabled(int name);
 
     void        setParameter(int name, int target, int param, void *value);
 
@@ -203,10 +200,7 @@
 
         int32_t     sessionId;
 
-        int32_t     fastIndex;
-        int32_t     magic;
-        static const int kMagic = 0x54637281;
-        //int32_t     padding[1];
+        int32_t     padding[2];
 
         // 16-byte boundary
 
@@ -216,12 +210,6 @@
         void        adjustVolumeRamp(bool aux);
         size_t      getUnreleasedFrames() const { return resampler != NULL ?
                                                     resampler->getUnreleasedFrames() : 0; };
-        void        checkMagic() {
-            if (magic != kMagic) {
-                ALOGE("magic=%#x fastIndex=%d", magic, fastIndex);
-            }
-        }
-
     };
 
     // pad to 32-bytes to fill cache line
@@ -232,8 +220,7 @@
         void            (*hook)(state_t* state, int64_t pts);   // one of process__*, never NULL
         int32_t         *outputTemp;
         int32_t         *resampleTemp;
-        NBLog::Writer*  mLog;
-        int32_t         reserved[1];
+        int32_t         reserved[2];
         // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS
         track_t         tracks[MAX_NUM_TRACKS]; __attribute__((aligned(32)));
     };
@@ -260,11 +247,6 @@
 
     const uint32_t  mSampleRate;
 
-    NBLog::Writer*  mLog;
-    NBLog::Writer   mDummyLog;
-public:
-    void            setLog(NBLog::Writer* log);
-private:
     state_t         mState __attribute__((aligned(32)));
 
     // effect descriptor for the downmixer used by the mixer
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 5811771..80e37ca 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -120,16 +120,12 @@
         FastMixerState::Command command = next->mCommand;
         if (next != current) {
 
-            logWriter->logTimestamp();
             logWriter->log("next != current");
 
             // As soon as possible of learning of a new dump area, start using it
             dumpState = next->mDumpState != NULL ? next->mDumpState : &dummyDumpState;
             teeSink = next->mTeeSink;
             logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter;
-            if (mixer != NULL) {
-                mixer->setLog(logWriter);
-            }
 
             // We want to always have a valid reference to the previous (non-idle) state.
             // However, the state queue only guarantees access to current and previous states.
@@ -304,21 +300,13 @@
                     addedTracks &= ~(1 << i);
                     const FastTrack* fastTrack = &current->mFastTracks[i];
                     AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-                    logWriter->logf("bp %d i=%d %p", __LINE__, i, bufferProvider);
                     ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1);
-                    if (bufferProvider == NULL ||
-                            bufferProvider->getValid() != AudioBufferProvider::kValid) {
-                        logWriter->logTimestamp();
-                        logWriter->logf("added invalid %#x", i);
-                    }
                     if (mixer != NULL) {
                         // calling getTrackName with default channel mask and a random invalid
                         //   sessionId (no effects here)
                         name = mixer->getTrackName(AUDIO_CHANNEL_OUT_STEREO, -555);
                         ALOG_ASSERT(name >= 0);
                         fastTrackNames[i] = name;
-                        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FAST_INDEX,
-                                (void *) i);
                         mixer->setBufferProvider(name, bufferProvider);
                         mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
                                 (void *) mixBuffer);
@@ -329,40 +317,27 @@
                         }
                         mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
                                 (void *) fastTrack->mChannelMask);
-                        if (!mixer->enabled(name)) {
-                            logWriter->logf("enable %d i=%d name=%d", __LINE__, i, name);
-                        }
                         mixer->enable(name);
                     }
                     generations[i] = fastTrack->mGeneration;
                 }
 
-                // finally process (potentially) modified tracks; these use the same slot
+                // finally process modified tracks; these use the same slot
                 // but may have a different buffer provider or volume provider
                 unsigned modifiedTracks = currentTrackMask & previousTrackMask;
                 if (modifiedTracks) {
-                    logWriter->logf("pot. mod. %#x", modifiedTracks);
+                    logWriter->logf("modified %#x", modifiedTracks);
                 }
-                unsigned actuallyModifiedTracks = 0;
                 while (modifiedTracks != 0) {
                     i = __builtin_ctz(modifiedTracks);
                     modifiedTracks &= ~(1 << i);
                     const FastTrack* fastTrack = &current->mFastTracks[i];
                     if (fastTrack->mGeneration != generations[i]) {
-                        actuallyModifiedTracks |= 1 << i;
                         AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-                        logWriter->logf("bp %d i=%d %p", __LINE__, i, bufferProvider);
                         ALOG_ASSERT(bufferProvider != NULL);
-                        if (bufferProvider == NULL ||
-                                bufferProvider->getValid() != AudioBufferProvider::kValid) {
-                            logWriter->logTimestamp();
-                            logWriter->logf("modified invalid %#x", i);
-                        }
                         if (mixer != NULL) {
                             name = fastTrackNames[i];
                             ALOG_ASSERT(name >= 0);
-                            mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FAST_INDEX,
-                                    (void *) i);
                             mixer->setBufferProvider(name, bufferProvider);
                             if (fastTrack->mVolumeProvider == NULL) {
                                 mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0,
@@ -385,9 +360,6 @@
                         generations[i] = fastTrack->mGeneration;
                     }
                 }
-                if (actuallyModifiedTracks) {
-                    logWriter->logf("act. mod. %#x", actuallyModifiedTracks);
-                }
 
                 fastTracksGen = current->mFastTracksGen;
 
@@ -405,7 +377,6 @@
             ALOG_ASSERT(mixBuffer != NULL);
             // for each track, update volume and check for underrun
             unsigned currentTrackMask = current->mTrackMask;
-            logWriter->logf("ctm %#x", currentTrackMask);
             while (currentTrackMask != 0) {
                 i = __builtin_ctz(currentTrackMask);
                 currentTrackMask &= ~(1 << i);
@@ -443,27 +414,15 @@
                         // allow mixing partial buffer
                         underruns.mBitFields.mPartial++;
                         underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
-                        if (!mixer->enabled(name)) {
-                            logWriter->logf("enable %d i=%d name=%d", __LINE__, i, name);
-                        }
                         mixer->enable(name);
                     }
                 } else {
                     underruns.mBitFields.mFull++;
                     underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
-                    if (!mixer->enabled(name)) {
-                        logWriter->logf("enable %d i=%d name=%d", __LINE__, i, name);
-                    }
                     mixer->enable(name);
                 }
                 ftDump->mUnderruns = underruns;
                 ftDump->mFramesReady = framesReady;
-                AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-                if (bufferProvider == NULL ||
-                        bufferProvider->getValid() != AudioBufferProvider::kValid) {
-                    logWriter->logTimestamp();
-                    logWriter->logf("mixing invalid %#x", i);
-                }
             }
 
             int64_t pts;
diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h
index 313330f..e33b3c6 100644
--- a/services/audioflinger/StateQueue.h
+++ b/services/audioflinger/StateQueue.h
@@ -174,7 +174,7 @@
 #endif
 
 private:
-    static const unsigned kN = 8;       // values < 4 are not supported by this code
+    static const unsigned kN = 4;       // values < 4 are not supported by this code
     T                 mStates[kN];      // written by mutator, read by observer
 
     // "volatile" is meaningless with SMP, but here it indicates that we're using atomic ops
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a19c5f4..ba848d7 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1196,8 +1196,6 @@
 
     { // scope for mLock
         Mutex::Autolock _l(mLock);
-        mNBLogWriter->logf("createTrack_l isFast=%d caller=%d",
-                (*flags & IAudioFlinger::TRACK_FAST) != 0, IPCThreadState::self()->getCallingPid());
 
         // all tracks in same audio session must share the same routing strategy otherwise
         // conflicts will happen when tracks are moved from one output to another by audio policy
@@ -1251,6 +1249,7 @@
     if (status) {
         *status = lStatus;
     }
+    mNBLogWriter->logf("createTrack_l");
     return track;
 }
 
@@ -1318,8 +1317,7 @@
 // addTrack_l() must be called with ThreadBase::mLock held
 status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
 {
-    mNBLogWriter->logf("addTrack_l mName=%d mFastIndex=%d caller=%d", track->mName,
-            track->mFastIndex, IPCThreadState::self()->getCallingPid());
+    mNBLogWriter->logf("addTrack_l mName=%d", track->mName);
     status_t status = ALREADY_EXISTS;
 
     // set retry count for buffer fill
@@ -1353,9 +1351,7 @@
 // destroyTrack_l() must be called with ThreadBase::mLock held
 void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
 {
-    mNBLogWriter->logTimestamp();
-    mNBLogWriter->logf("destroyTrack_l mName=%d mFastIndex=%d mClientPid=%d", track->mName,
-            track->mFastIndex, track->mClient != 0 ? track->mClient->pid() : -1);
+    mNBLogWriter->logf("destroyTrack_l mName=%d", track->mName);
     track->mState = TrackBase::TERMINATED;
     // active tracks are removed by threadLoop()
     if (mActiveTracks.indexOf(track) < 0) {
@@ -1365,9 +1361,7 @@
 
 void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
 {
-    mNBLogWriter->logTimestamp();
-    mNBLogWriter->logf("removeTrack_l mName=%d mFastIndex=%d clientPid=%d", track->mName,
-            track->mFastIndex, track->mClient != 0 ? track->mClient->pid() : -1);
+    mNBLogWriter->logf("removeTrack_l mName=%d", track->mName);
     track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
     mTracks.remove(track);
     deleteTrackName_l(track->name());
@@ -2155,7 +2149,6 @@
         FastTrack *fastTrack = &state->mFastTracks[0];
         // wrap the source side of the MonoPipe to make it an AudioBufferProvider
         fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
-        mNBLogWriter->logf("ft0 bp %p", fastTrack->mBufferProvider);
         fastTrack->mVolumeProvider = NULL;
         fastTrack->mGeneration++;
         state->mFastTracksGen++;
@@ -2560,7 +2553,6 @@
                 // was it previously inactive?
                 if (!(state->mTrackMask & (1 << j))) {
                     ExtendedAudioBufferProvider *eabp = track;
-                    mNBLogWriter->logf("j=%d bp %p", j, eabp);
                     VolumeProvider *vp = track;
                     fastTrack->mBufferProvider = eabp;
                     fastTrack->mVolumeProvider = vp;
@@ -2847,19 +2839,11 @@
             block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
             pauseAudioWatchdog = true;
         }
+        sq->end();
     }
     if (sq != NULL) {
-        unsigned trackMask = state->mTrackMask;
         sq->end(didModify);
-        if (didModify) {
-            mNBLogWriter->logTimestamp();
-            mNBLogWriter->logf("push trackMask=%#x block=%d", trackMask, block);
-        }
         sq->push(block);
-        if (didModify) {
-            mNBLogWriter->logTimestamp();
-            mNBLogWriter->log("pushed");
-        }
     }
 #ifdef AUDIO_WATCHDOG
     if (pauseAudioWatchdog && mAudioWatchdog != 0) {
@@ -2886,9 +2870,7 @@
     if (CC_UNLIKELY(count)) {
         for (size_t i=0 ; i<count ; i++) {
             const sp<Track>& track = tracksToRemove->itemAt(i);
-            mNBLogWriter->logTimestamp();
-            mNBLogWriter->logf("prepareTracks_l remove name=%u mFastIndex=%d", track->name(),
-                    track->mFastIndex);
+            mNBLogWriter->logf("prepareTracks_l remove name=%u", track->name());
             mActiveTracks.remove(track);
             if (track->mainBuffer() != mMixBuffer) {
                 chain = getEffectChain_l(track->sessionId());
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 3cd5c2c..fa1e336 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -315,7 +315,7 @@
                 // keyed by session ID, the second by type UUID timeLow field
                 KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > >
                                         mSuspendedSessions;
-                static const size_t     kLogSize = 4096;
+                static const size_t     kLogSize = 512;
                 sp<NBLog::Writer>       mNBLogWriter;
 };
 
@@ -546,7 +546,7 @@
     sp<NBAIO_Sink>          mTeeSink;
     sp<NBAIO_Source>        mTeeSource;
     uint32_t                mScreenState;   // cached copy of gScreenState
-    static const size_t     kFastMixerLogSize = 16 * 1024;
+    static const size_t     kFastMixerLogSize = 8 * 1024;
     sp<NBLog::Writer>       mFastMixerNBLogWriter;
 public:
     virtual     bool        hasFastMixer() const = 0;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f679751..315cbbc 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -351,7 +351,6 @@
             // Read the initial underruns because this field is never cleared by the fast mixer
             mObservedUnderruns = thread->getFastTrackUnderruns(i);
             thread->mFastTrackAvailMask &= ~(1 << i);
-            thread->mNBLogWriter->logf("new Track mName=%d mFastIndex=%d", mName, mFastIndex);
         }
     }
     ALOGV("Track constructor name %d, calling pid %d", mName,
@@ -361,7 +360,6 @@
 AudioFlinger::PlaybackThread::Track::~Track()
 {
     ALOGV("PlaybackThread::Track destructor");
-    // FIXME not sure if safe to log here, would need a lock on thread to do it
 }
 
 void AudioFlinger::PlaybackThread::Track::destroy()
@@ -571,8 +569,7 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        thread->mNBLogWriter->logf("start mName=%d mFastIndex=%d caller=%d", mName, mFastIndex,
-                IPCThreadState::self()->getCallingPid());
+        thread->mNBLogWriter->logf("start mName=%d", mName);
         track_state state = mState;
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
@@ -615,8 +612,7 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        thread->mNBLogWriter->logf("stop mName=%d mFastIndex=%d caller=%d", mName, mFastIndex,
-                IPCThreadState::self()->getCallingPid());
+        thread->mNBLogWriter->logf("stop mName=%d", mName);
         track_state state = mState;
         if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
             // If the track is not active (PAUSED and buffers full), flush buffers
@@ -653,8 +649,7 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        thread->mNBLogWriter->logf("pause mName=%d mFastIndex=%d caller=%d", mName, mFastIndex,
-                IPCThreadState::self()->getCallingPid());
+        thread->mNBLogWriter->logf("pause mName=%d", mName);
         if (mState == ACTIVE || mState == RESUMING) {
             mState = PAUSING;
             ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
@@ -678,8 +673,7 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        thread->mNBLogWriter->logf("flush mName=%d mFastIndex=%d caller=%d", mName, mFastIndex,
-                IPCThreadState::self()->getCallingPid());
+        thread->mNBLogWriter->logf("flush mName=%d", mName);
         if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
                 mState != PAUSING && mState != IDLE && mState != FLUSHED) {
             return;
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index 70bf0ac..b9feaf8 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -22,7 +22,6 @@
 #include <utils/Trace.h>
 
 #include <cutils/properties.h>
-#include <gui/SurfaceTextureClient.h>
 #include <gui/Surface.h>
 #include "camera2/Parameters.h"
 #include "Camera2Client.h"
@@ -489,7 +488,7 @@
     sp<IBinder> binder;
     sp<ANativeWindow> window;
     if (surface != 0) {
-        binder = surface->asBinder();
+        binder = surface->getISurfaceTexture()->asBinder();
         window = surface;
     }
 
@@ -508,7 +507,7 @@
     sp<ANativeWindow> window;
     if (bufferProducer != 0) {
         binder = bufferProducer->asBinder();
-        window = new SurfaceTextureClient(bufferProducer);
+        window = new Surface(bufferProducer);
     }
     return setPreviewWindowL(binder, window);
 }
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/CameraClient.cpp
index f9cee0d..5f03a1c 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/CameraClient.cpp
@@ -18,7 +18,6 @@
 //#define LOG_NDEBUG 0
 
 #include <cutils/properties.h>
-#include <gui/SurfaceTextureClient.h>
 #include <gui/Surface.h>
 
 #include "CameraClient.h"
@@ -302,7 +301,7 @@
 status_t CameraClient::setPreviewDisplay(const sp<Surface>& surface) {
     LOG1("setPreviewDisplay(%p) (pid %d)", surface.get(), getCallingPid());
 
-    sp<IBinder> binder(surface != 0 ? surface->asBinder() : 0);
+    sp<IBinder> binder(surface != 0 ? surface->getISurfaceTexture()->asBinder() : 0);
     sp<ANativeWindow> window(surface);
     return setPreviewWindow(binder, window);
 }
@@ -317,7 +316,7 @@
     sp<ANativeWindow> window;
     if (bufferProducer != 0) {
         binder = bufferProducer->asBinder();
-        window = new SurfaceTextureClient(bufferProducer);
+        window = new Surface(bufferProducer);
     }
     return setPreviewWindow(binder, window);
 }
diff --git a/services/camera/libcameraservice/CameraClient.h b/services/camera/libcameraservice/CameraClient.h
index 7da3da7..74829ce 100644
--- a/services/camera/libcameraservice/CameraClient.h
+++ b/services/camera/libcameraservice/CameraClient.h
@@ -124,7 +124,7 @@
 
     // Ensures atomicity among the public methods
     mutable Mutex                   mLock;
-    // This is a binder of Surface or SurfaceTextureClient.
+    // This is a binder of Surface or Surface.
     sp<IBinder>                     mSurface;
     sp<ANativeWindow>               mPreviewWindow;
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 124d24d..717e159 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -28,7 +28,6 @@
 #include <binder/MemoryHeapBase.h>
 #include <cutils/atomic.h>
 #include <cutils/properties.h>
-#include <gui/SurfaceTextureClient.h>
 #include <gui/Surface.h>
 #include <hardware/hardware.h>
 #include <media/AudioSystem.h>
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 307cfab..c4055e0 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -22,7 +22,7 @@
 #include <utils/Trace.h>
 
 #include "CallbackProcessor.h"
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include "../Camera2Device.h"
 #include "../Camera2Client.h"
 
@@ -65,7 +65,7 @@
         mCallbackConsumer = new CpuConsumer(kCallbackHeapCount);
         mCallbackConsumer->setFrameAvailableListener(this);
         mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
-        mCallbackWindow = new SurfaceTextureClient(
+        mCallbackWindow = new Surface(
             mCallbackConsumer->getProducerInterface());
     }
 
diff --git a/services/camera/libcameraservice/camera2/JpegProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
index 6280f83..1ec5694 100644
--- a/services/camera/libcameraservice/camera2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -26,7 +26,7 @@
 #include <utils/Trace.h>
 
 #include "JpegProcessor.h"
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include "../Camera2Device.h"
 #include "../Camera2Client.h"
 
@@ -82,7 +82,7 @@
         mCaptureConsumer = new CpuConsumer(1);
         mCaptureConsumer->setFrameAvailableListener(this);
         mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
-        mCaptureWindow = new SurfaceTextureClient(
+        mCaptureWindow = new Surface(
             mCaptureConsumer->getProducerInterface());
         // Create memory for API consumption
         mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0,
diff --git a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
index 6ea27b2..a0d1093 100644
--- a/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/StreamingProcessor.cpp
@@ -20,7 +20,7 @@
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <media/hardware/MetadataBufferType.h>
 
 #include "StreamingProcessor.h"
@@ -284,7 +284,7 @@
                 true);
         mRecordingConsumer->setFrameAvailableListener(this);
         mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
-        mRecordingWindow = new SurfaceTextureClient(
+        mRecordingWindow = new Surface(
             mRecordingConsumer->getProducerInterface());
         // Allocate memory later, since we don't know buffer size until receipt
     }
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
index 9584028..900c099 100644
--- a/services/camera/libcameraservice/camera2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -29,7 +29,7 @@
 #include <utils/Trace.h>
 
 #include "ZslProcessor.h"
-#include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include "../Camera2Device.h"
 #include "../Camera2Client.h"
 
@@ -124,7 +124,7 @@
             true);
         mZslConsumer->setFrameAvailableListener(this);
         mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
-        mZslWindow = new SurfaceTextureClient(
+        mZslWindow = new Surface(
             mZslConsumer->getProducerInterface());
     }