Testing the GL/ CPU encoding w/ Gralloc buffers

This is the test for Gralloc buffers based encoding.
contains a combination of two main changes:

1. GL based encoding tests added to SurfaceMediaSource_test

2. SurfaceMediaSource ::read() colorformat

The SurfaceMediaSource::getformat() sets it to OMX_COLOR_FormatAndroidOpaque.
The omx encoder needs to interpret that colorformat and reads the format
from the Gralloc buffers directly

Change-Id: Iee2fe8901384109a4952e1d6c528c59eb01eb5b1
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 1affb8a..74d54d1 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -182,9 +182,9 @@
 
 protected:
 
-    // freeAllBuffers frees the resources (both GraphicBuffer and EGLImage) for
+    // freeAllBuffersLocked frees the resources (both GraphicBuffer and EGLImage) for
     // all slots.
-    void freeAllBuffers();
+    void freeAllBuffersLocked();
     static bool isExternalFormat(uint32_t format);
 
 private:
@@ -337,8 +337,15 @@
     // Set to a default of 30 fps if not specified by the client side
     int32_t mFrameRate;
 
-    // mStarted is a flag to check if the recording has started
-    bool mStarted;
+    // mStopped is a flag to check if the recording is going on
+    bool mStopped;
+
+    // mNumFramesReceived indicates the number of frames recieved from
+    // the client side
+    int mNumFramesReceived;
+    // mNumFramesEncoded indicates the number of frames passed on to the
+    // encoder
+    int mNumFramesEncoded;
 
     // mFrameAvailableCondition condition used to indicate whether there
     // is a frame available for dequeuing
diff --git a/include/media/stagefright/openmax/OMX_IVCommon.h b/include/media/stagefright/openmax/OMX_IVCommon.h
index 97170d7..65b6339 100644
--- a/include/media/stagefright/openmax/OMX_IVCommon.h
+++ b/include/media/stagefright/openmax/OMX_IVCommon.h
@@ -154,7 +154,8 @@
      * Gralloc Buffers.
      * FIXME: In the process of reserving some enum values for
      * Android-specific OMX IL colorformats. Change this enum to
-     * an acceptable range once that is done.*/
+     * an acceptable range once that is done.
+     * */
     OMX_COLOR_FormatAndroidOpaque = 0x7F000001,
     OMX_TI_COLOR_FormatYUV420PackedSemiPlanar = 0x7F000100,
     OMX_QCOM_COLOR_FormatYVU420SemiPlanar = 0x7FA30C00,
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 27dfeab..525ee8b 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -808,7 +808,7 @@
         }
 
         if (format.eCompressionFormat == compressionFormat
-            && format.eColorFormat == colorFormat) {
+                && format.eColorFormat == colorFormat) {
             found = true;
             break;
         }
@@ -838,6 +838,15 @@
         case OMX_COLOR_FormatYUV420Planar:
         case OMX_COLOR_FormatYUV420SemiPlanar:
         case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
+        /*
+        * FIXME: For the Opaque color format, the frame size does not
+        * need to be (w*h*3)/2. It just needs to
+        * be larger than certain minimum buffer size. However,
+        * currently, this opaque foramt has been tested only on
+        * YUV420 formats. If that is changed, then we need to revisit
+        * this part in the future
+        */
+        case OMX_COLOR_FormatAndroidOpaque:
             return (width * height * 3) / 2;
 
         default:
@@ -887,7 +896,7 @@
         // Make sure that omx component does not overwrite
         // the incremented index (bug 2897413).
         CHECK_EQ(index, portFormat.nIndex);
-        if ((portFormat.eColorFormat == colorFormat)) {
+        if (portFormat.eColorFormat == colorFormat) {
             LOGV("Found supported color format: %d", portFormat.eColorFormat);
             return OK;  // colorFormat is supported!
         }
@@ -2923,6 +2932,7 @@
     size_t offset = 0;
     int32_t n = 0;
 
+
     for (;;) {
         MediaBuffer *srcBuffer;
         if (mSeekTimeUs >= 0) {
@@ -3021,6 +3031,7 @@
                 CHECK(info->mMediaBuffer == NULL);
                 info->mMediaBuffer = srcBuffer;
             } else {
+                CHECK(srcBuffer->data() != NULL) ;
                 memcpy((uint8_t *)info->mData + offset,
                         (const uint8_t *)srcBuffer->data()
                             + srcBuffer->range_offset(),
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index ddfd9ff..c2e6707 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -13,7 +13,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 // #define LOG_NDEBUG 0
 #define LOG_TAG "SurfaceMediaSource"
 
@@ -47,7 +46,9 @@
                 mSynchronousMode(true),
                 mConnectedApi(NO_CONNECTED_API),
                 mFrameRate(30),
-                mStarted(false)   {
+                mNumFramesReceived(0),
+                mNumFramesEncoded(0),
+                mStopped(false) {
     LOGV("SurfaceMediaSource::SurfaceMediaSource");
     sp<ISurfaceComposer> composer(ComposerService::getComposerService());
     mGraphicBufferAlloc = composer->createGraphicBufferAlloc();
@@ -55,10 +56,9 @@
 
 SurfaceMediaSource::~SurfaceMediaSource() {
     LOGV("SurfaceMediaSource::~SurfaceMediaSource");
-    if (mStarted) {
+    if (!mStopped) {
         stop();
     }
-    freeAllBuffers();
 }
 
 size_t SurfaceMediaSource::getQueuedCount() const {
@@ -139,12 +139,12 @@
 
     // here we're guaranteed that the client doesn't have dequeued buffers
     // and will release all of its buffer references.
-    freeAllBuffers();
     mBufferCount = bufferCount;
     mClientBufferCount = bufferCount;
     mCurrentSlot = INVALID_BUFFER_SLOT;
     mQueue.clear();
     mDequeueCondition.signal();
+    freeAllBuffersLocked();
     return OK;
 }
 
@@ -164,7 +164,7 @@
 status_t SurfaceMediaSource::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,
                                             uint32_t format, uint32_t usage) {
     LOGV("dequeueBuffer");
-
+    Mutex::Autolock lock(mMutex);
 
     // Check for the buffer size- the client should just use the
     // default width and height, and not try to set those.
@@ -184,10 +184,7 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock lock(mMutex);
-
     status_t returnFlags(OK);
-
     int found, foundSync;
     int dequeuedCount = 0;
     bool tryAgain = true;
@@ -218,6 +215,9 @@
                 LOGV("Waiting for the FIFO to drain");
                 mDequeueCondition.wait(mMutex);
             }
+            if (mStopped) {
+                return NO_INIT;
+            }
             // need to check again since the mode could have changed
             // while we were waiting
             minBufferCountNeeded = mSynchronousMode ?
@@ -228,7 +228,7 @@
                 ((mServerBufferCount != mBufferCount) ||
                         (mServerBufferCount < minBufferCountNeeded))) {
             // here we're guaranteed that mQueue is empty
-            freeAllBuffers();
+            freeAllBuffersLocked();
             mBufferCount = mServerBufferCount;
             if (mBufferCount < minBufferCountNeeded)
                 mBufferCount = minBufferCountNeeded;
@@ -290,9 +290,12 @@
         // for for some buffers to be consumed
         tryAgain = mSynchronousMode && (foundSync == INVALID_BUFFER_SLOT);
         if (tryAgain) {
-            LOGW("Waiting..In synchronous mode and no buffer to dQ");
+            LOGV("Waiting..In synchronous mode and no buffer to dequeue");
             mDequeueCondition.wait(mMutex);
         }
+        if (mStopped) {
+            return NO_INIT;
+        }
     }
 
     if (mSynchronousMode && found == INVALID_BUFFER_SLOT) {
@@ -304,7 +307,7 @@
         return -EBUSY;
     }
 
-    const int buf = found;
+    const int bufIndex = found;
     *outBuf = found;
 
     const bool useDefaultSize = !w && !h;
@@ -322,9 +325,9 @@
 
     // buffer is now in DEQUEUED (but can also be current at the same time,
     // if we're in synchronous mode)
-    mSlots[buf].mBufferState = BufferSlot::DEQUEUED;
+    mSlots[bufIndex].mBufferState = BufferSlot::DEQUEUED;
 
-    const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer);
+    const sp<GraphicBuffer>& buffer(mSlots[bufIndex].mGraphicBuffer);
     if ((buffer == NULL) ||
         (uint32_t(buffer->width)  != w) ||
         (uint32_t(buffer->height) != h) ||
@@ -342,22 +345,25 @@
             if (updateFormat) {
                 mPixelFormat = format;
             }
-            mSlots[buf].mGraphicBuffer = graphicBuffer;
-            mSlots[buf].mRequestBufferCalled = false;
+            mSlots[bufIndex].mGraphicBuffer = graphicBuffer;
+            mSlots[bufIndex].mRequestBufferCalled = false;
             returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION;
     }
     return returnFlags;
 }
 
+// TODO: clean this up
 status_t SurfaceMediaSource::setSynchronousMode(bool enabled) {
     Mutex::Autolock lock(mMutex);
+    if (mStopped) {
+        LOGE("setSynchronousMode: SurfaceMediaSource has been stopped!");
+        return NO_INIT;
+    }
 
-    status_t err = OK;
     if (!enabled) {
-        // going to asynchronous mode, drain the queue
-        while (mSynchronousMode != enabled && !mQueue.isEmpty()) {
-            mDequeueCondition.wait(mMutex);
-        }
+        // Async mode is not allowed
+        LOGE("SurfaceMediaSource can be used only synchronous mode!");
+        return INVALID_OPERATION;
     }
 
     if (mSynchronousMode != enabled) {
@@ -368,13 +374,19 @@
         mSynchronousMode = enabled;
         mDequeueCondition.signal();
     }
-    return err;
+    return OK;
 }
 
 status_t SurfaceMediaSource::connect(int api,
         uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform) {
     LOGV("SurfaceMediaSource::connect");
     Mutex::Autolock lock(mMutex);
+
+    if (mStopped) {
+        LOGE("Connect: SurfaceMediaSource has been stopped!");
+        return NO_INIT;
+    }
+
     status_t err = NO_ERROR;
     switch (api) {
         case NATIVE_WINDOW_API_EGL:
@@ -397,9 +409,25 @@
     return err;
 }
 
+// This is called by the client side when it is done
+// TODO: Currently, this also sets mStopped to true which
+// is needed for unblocking the encoder which might be
+// waiting to read more frames. So if on the client side,
+// the same thread supplies the frames and also calls stop
+// on the encoder, the client has to call disconnect before
+// it calls stop.
+// In the case of the camera,
+// that need not be required since the thread supplying the
+// frames is separate than the one calling stop.
 status_t SurfaceMediaSource::disconnect(int api) {
     LOGV("SurfaceMediaSource::disconnect");
     Mutex::Autolock lock(mMutex);
+
+    if (mStopped) {
+        LOGE("disconnect: SurfaceMediaSoource is already stopped!");
+        return NO_INIT;
+    }
+
     status_t err = NO_ERROR;
     switch (api) {
         case NATIVE_WINDOW_API_EGL:
@@ -408,6 +436,9 @@
         case NATIVE_WINDOW_API_CAMERA:
             if (mConnectedApi == api) {
                 mConnectedApi = NO_CONNECTED_API;
+                mStopped = true;
+                mDequeueCondition.signal();
+                mFrameAvailableCondition.signal();
             } else {
                 err = -EINVAL;
             }
@@ -419,45 +450,47 @@
     return err;
 }
 
-status_t SurfaceMediaSource::queueBuffer(int buf, int64_t timestamp,
+status_t SurfaceMediaSource::queueBuffer(int bufIndex, int64_t timestamp,
         uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform) {
     LOGV("queueBuffer");
 
     Mutex::Autolock lock(mMutex);
-    if (buf < 0 || buf >= mBufferCount) {
+    if (bufIndex < 0 || bufIndex >= mBufferCount) {
         LOGE("queueBuffer: slot index out of range [0, %d]: %d",
-                mBufferCount, buf);
+                mBufferCount, bufIndex);
         return -EINVAL;
-    } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) {
+    } else if (mSlots[bufIndex].mBufferState != BufferSlot::DEQUEUED) {
         LOGE("queueBuffer: slot %d is not owned by the client (state=%d)",
-                buf, mSlots[buf].mBufferState);
+                bufIndex, mSlots[bufIndex].mBufferState);
         return -EINVAL;
-    } else if (!mSlots[buf].mRequestBufferCalled) {
+    } else if (!mSlots[bufIndex].mRequestBufferCalled) {
         LOGE("queueBuffer: slot %d was enqueued without requesting a "
-                "buffer", buf);
+                "buffer", bufIndex);
         return -EINVAL;
     }
 
     if (mSynchronousMode) {
         // in synchronous mode we queue all buffers in a FIFO
-        mQueue.push_back(buf);
-        LOGV("Client queued buffer on slot: %d, Q size = %d",
-                                                buf, mQueue.size());
+        mQueue.push_back(bufIndex);
+        mNumFramesReceived++;
+        LOGV("Client queued buf# %d @slot: %d, Q size = %d, handle = %p, timestamp = %lld",
+            mNumFramesReceived, bufIndex, mQueue.size(),
+            mSlots[bufIndex].mGraphicBuffer->handle, timestamp);
     } else {
         // in asynchronous mode we only keep the most recent buffer
         if (mQueue.empty()) {
-            mQueue.push_back(buf);
+            mQueue.push_back(bufIndex);
         } else {
             Fifo::iterator front(mQueue.begin());
             // buffer currently queued is freed
             mSlots[*front].mBufferState = BufferSlot::FREE;
             // and we record the new buffer index in the queued list
-            *front = buf;
+            *front = bufIndex;
         }
     }
 
-    mSlots[buf].mBufferState = BufferSlot::QUEUED;
-    mSlots[buf].mTimestamp = timestamp;
+    mSlots[bufIndex].mBufferState = BufferSlot::QUEUED;
+    mSlots[bufIndex].mTimestamp = timestamp;
     // TODO: (Confirm) Don't want to signal dequeue here.
     // May be just in asynchronous mode?
     // mDequeueCondition.signal();
@@ -482,7 +515,7 @@
 // wait to hear from StageFrightRecorder to set the buffer FREE
 // Make sure this is called when the mutex is locked
 status_t SurfaceMediaSource::onFrameReceivedLocked() {
-    LOGV("On Frame Received");
+    LOGV("On Frame Received locked");
     // Signal the encoder that a new frame has arrived
     mFrameAvailableCondition.signal();
 
@@ -501,19 +534,19 @@
 }
 
 
-void SurfaceMediaSource::cancelBuffer(int buf) {
+void SurfaceMediaSource::cancelBuffer(int bufIndex) {
     LOGV("SurfaceMediaSource::cancelBuffer");
     Mutex::Autolock lock(mMutex);
-    if (buf < 0 || buf >= mBufferCount) {
+    if (bufIndex < 0 || bufIndex >= mBufferCount) {
         LOGE("cancelBuffer: slot index out of range [0, %d]: %d",
-                mBufferCount, buf);
+                mBufferCount, bufIndex);
         return;
-    } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) {
+    } else if (mSlots[bufIndex].mBufferState != BufferSlot::DEQUEUED) {
         LOGE("cancelBuffer: slot %d is not owned by the client (state=%d)",
-                buf, mSlots[buf].mBufferState);
+                bufIndex, mSlots[bufIndex].mBufferState);
         return;
     }
-    mSlots[buf].mBufferState = BufferSlot::FREE;
+    mSlots[bufIndex].mBufferState = BufferSlot::FREE;
     mDequeueCondition.signal();
 }
 
@@ -531,8 +564,8 @@
     mFrameAvailableListener = listener;
 }
 
-void SurfaceMediaSource::freeAllBuffers() {
-    LOGV("freeAllBuffers");
+void SurfaceMediaSource::freeAllBuffersLocked() {
+    LOGV("freeAllBuffersLocked");
     for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
         mSlots[i].mGraphicBuffer = 0;
         mSlots[i].mBufferState = BufferSlot::FREE;
@@ -648,10 +681,7 @@
 
 status_t SurfaceMediaSource::start(MetaData *params)
 {
-    LOGV("start");
-    Mutex::Autolock lock(mMutex);
-    CHECK(!mStarted);
-    mStarted = true;
+    LOGV("started!");
     return OK;
 }
 
@@ -662,8 +692,11 @@
 
     Mutex::Autolock lock(mMutex);
     // TODO: Add waiting on mFrameCompletedCondition here?
-    mStarted = false;
+    mStopped = true;
     mFrameAvailableCondition.signal();
+    mDequeueCondition.signal();
+    mQueue.clear();
+    freeAllBuffersLocked();
 
     return OK;
 }
@@ -688,23 +721,25 @@
 }
 
 status_t SurfaceMediaSource::read( MediaBuffer **buffer,
-                                const ReadOptions *options)
+                                    const ReadOptions *options)
 {
+    Mutex::Autolock autoLock(mMutex) ;
+
     LOGV("Read. Size of queued buffer: %d", mQueue.size());
     *buffer = NULL;
 
-    Mutex::Autolock autoLock(mMutex) ;
     // If the recording has started and the queue is empty, then just
     // wait here till the frames come in from the client side
-    while (mStarted && mQueue.empty()) {
+    while (!mStopped && mQueue.empty()) {
         LOGV("NO FRAMES! Recorder waiting for FrameAvailableCondition");
         mFrameAvailableCondition.wait(mMutex);
     }
 
     // If the loop was exited as a result of stopping the recording,
     // it is OK
-    if (!mStarted) {
-        return OK;
+    if (mStopped) {
+        LOGV("Read: SurfaceMediaSource is stopped. Returning NO_INIT;");
+        return NO_INIT;
     }
 
     // Update the current buffer info
@@ -712,15 +747,20 @@
     // can be more than one "current" slots.
     Fifo::iterator front(mQueue.begin());
     mCurrentSlot = *front;
+    mQueue.erase(front);
     mCurrentBuf = mSlots[mCurrentSlot].mGraphicBuffer;
+    int64_t prevTimeStamp = mCurrentTimestamp;
     mCurrentTimestamp = mSlots[mCurrentSlot].mTimestamp;
-
+    mNumFramesEncoded++;
     // Pass the data to the MediaBuffer. Pass in only the metadata
     passMetadataBufferLocked(buffer);
 
     (*buffer)->setObserver(this);
     (*buffer)->add_ref();
-    (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp);
+    (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp / 1000);
+    LOGV("Frames encoded = %d, timestamp = %lld, time diff = %lld",
+            mNumFramesEncoded, mCurrentTimestamp / 1000,
+            mCurrentTimestamp / 1000 - prevTimeStamp / 1000);
 
     return OK;
 }
@@ -743,15 +783,17 @@
         new MediaBuffer(4 + sizeof(buffer_handle_t));
     char *data = (char *)tempBuffer->data();
     if (data == NULL) {
-        LOGE("Cannot allocate memory for passing buffer metadata!");
+        LOGE("Cannot allocate memory for metadata buffer!");
         return;
     }
     OMX_U32 type = kMetadataBufferTypeGrallocSource;
     memcpy(data, &type, 4);
     memcpy(data + 4, &(mCurrentBuf->handle), sizeof(buffer_handle_t));
     *buffer = tempBuffer;
-}
 
+    LOGV("handle = %p, , offset = %d, length = %d",
+            mCurrentBuf->handle, (*buffer)->range_length(), (*buffer)->range_offset());
+}
 
 void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) {
     LOGV("signalBufferReturned");
@@ -759,16 +801,19 @@
     bool foundBuffer = false;
     Mutex::Autolock autoLock(mMutex);
 
-    if (!mStarted) {
-        LOGW("signalBufferReturned: mStarted = false! Nothing to do!");
+    if (mStopped) {
+        LOGV("signalBufferReturned: mStopped = true! Nothing to do!");
         return;
     }
 
-    for (Fifo::iterator it = mQueue.begin(); it != mQueue.end(); ++it) {
-        CHECK(mSlots[*it].mGraphicBuffer != NULL);
-        if (checkBufferMatchesSlot(*it, buffer)) {
-            mSlots[*it].mBufferState = BufferSlot::FREE;
-            mQueue.erase(it);
+    for (int id = 0; id < NUM_BUFFER_SLOTS; id++) {
+        if (mSlots[id].mGraphicBuffer == NULL) {
+            continue;
+        }
+        if (checkBufferMatchesSlot(id, buffer)) {
+            LOGV("Slot %d returned, matches handle = %p", id,
+                    mSlots[id].mGraphicBuffer->handle);
+            mSlots[id].mBufferState = BufferSlot::FREE;
             buffer->setObserver(0);
             buffer->release();
             mDequeueCondition.signal();
@@ -792,5 +837,4 @@
     return mSlots[slot].mGraphicBuffer->handle  ==  bufferHandle;
 }
 
-
 } // end of namespace android
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 3ea8f39..357feb1 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -19,12 +19,13 @@
 	libbinder \
 	libcutils \
 	libgui \
-	libstlport \
-	libui \
-	libutils \
+	libmedia \
 	libstagefright \
 	libstagefright_omx \
 	libstagefright_foundation \
+	libstlport \
+	libui \
+	libutils \
 
 LOCAL_STATIC_LIBRARIES := \
 	libgtest \
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index 5b32b68..d643a0b 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -14,14 +14,17 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "SurfaceMediaSource_test"
 // #define LOG_NDEBUG 0
+#define LOG_TAG "SurfaceMediaSource_test"
 
 #include <gtest/gtest.h>
 #include <utils/String8.h>
 #include <utils/Errors.h>
+#include <fcntl.h>
+#include <unistd.h>
 
 #include <media/stagefright/SurfaceMediaSource.h>
+#include <media/mediarecorder.h>
 
 #include <gui/SurfaceTextureClient.h>
 #include <ui/GraphicBuffer.h>
@@ -33,24 +36,322 @@
 #include <ui/FramebufferNativeWindow.h>
 
 #include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
 #include <OMX_Component.h>
 
 #include "DummyRecorder.h"
 
+
 namespace android {
 
+class GLTest : public ::testing::Test {
+protected:
 
+    GLTest():
+            mEglDisplay(EGL_NO_DISPLAY),
+            mEglSurface(EGL_NO_SURFACE),
+            mEglContext(EGL_NO_CONTEXT) {
+    }
+
+    virtual void SetUp() {
+        LOGV("GLTest::SetUp()");
+        mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+        ASSERT_NE(EGL_NO_DISPLAY, mEglDisplay);
+
+        EGLint majorVersion;
+        EGLint minorVersion;
+        EXPECT_TRUE(eglInitialize(mEglDisplay, &majorVersion, &minorVersion));
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+        RecordProperty("EglVersionMajor", majorVersion);
+        RecordProperty("EglVersionMajor", minorVersion);
+
+        EGLint numConfigs = 0;
+        EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mGlConfig,
+                1, &numConfigs));
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+
+        char* displaySecsEnv = getenv("GLTEST_DISPLAY_SECS");
+        if (displaySecsEnv != NULL) {
+            mDisplaySecs = atoi(displaySecsEnv);
+            if (mDisplaySecs < 0) {
+                mDisplaySecs = 0;
+            }
+        } else {
+            mDisplaySecs = 0;
+        }
+
+        if (mDisplaySecs > 0) {
+            mComposerClient = new SurfaceComposerClient;
+            ASSERT_EQ(NO_ERROR, mComposerClient->initCheck());
+
+            mSurfaceControl = mComposerClient->createSurface(
+                    String8("Test Surface"), 0,
+                    getSurfaceWidth(), getSurfaceHeight(),
+                    PIXEL_FORMAT_RGB_888, 0);
+
+            ASSERT_TRUE(mSurfaceControl != NULL);
+            ASSERT_TRUE(mSurfaceControl->isValid());
+
+            SurfaceComposerClient::openGlobalTransaction();
+            ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF));
+            ASSERT_EQ(NO_ERROR, mSurfaceControl->show());
+            SurfaceComposerClient::closeGlobalTransaction();
+
+            sp<ANativeWindow> window = mSurfaceControl->getSurface();
+            mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
+                    window.get(), NULL);
+        } else {
+            EGLint pbufferAttribs[] = {
+                EGL_WIDTH, getSurfaceWidth(),
+                EGL_HEIGHT, getSurfaceHeight(),
+                EGL_NONE };
+
+            mEglSurface = eglCreatePbufferSurface(mEglDisplay, mGlConfig,
+                    pbufferAttribs);
+        }
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+        ASSERT_NE(EGL_NO_SURFACE, mEglSurface);
+
+        mEglContext = eglCreateContext(mEglDisplay, mGlConfig, EGL_NO_CONTEXT,
+                getContextAttribs());
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+        ASSERT_NE(EGL_NO_CONTEXT, mEglContext);
+
+        EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface,
+                mEglContext));
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+
+        EGLint w, h;
+        EXPECT_TRUE(eglQuerySurface(mEglDisplay, mEglSurface, EGL_WIDTH, &w));
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+        EXPECT_TRUE(eglQuerySurface(mEglDisplay, mEglSurface, EGL_HEIGHT, &h));
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+        RecordProperty("EglSurfaceWidth", w);
+        RecordProperty("EglSurfaceHeight", h);
+
+        glViewport(0, 0, w, h);
+        ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+    }
+
+    virtual void TearDown() {
+        // Display the result
+        if (mDisplaySecs > 0 && mEglSurface != EGL_NO_SURFACE) {
+            eglSwapBuffers(mEglDisplay, mEglSurface);
+            sleep(mDisplaySecs);
+        }
+
+        if (mComposerClient != NULL) {
+            mComposerClient->dispose();
+        }
+        if (mEglContext != EGL_NO_CONTEXT) {
+            eglDestroyContext(mEglDisplay, mEglContext);
+        }
+        if (mEglSurface != EGL_NO_SURFACE) {
+            eglDestroySurface(mEglDisplay, mEglSurface);
+        }
+        if (mEglDisplay != EGL_NO_DISPLAY) {
+            eglMakeCurrent(mEglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE,
+                    EGL_NO_CONTEXT);
+            eglTerminate(mEglDisplay);
+        }
+        ASSERT_EQ(EGL_SUCCESS, eglGetError());
+    }
+
+    virtual EGLint const* getConfigAttribs() {
+        LOGV("GLTest getConfigAttribs");
+        static EGLint sDefaultConfigAttribs[] = {
+            EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
+            EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+            EGL_RED_SIZE, 8,
+            EGL_GREEN_SIZE, 8,
+            EGL_BLUE_SIZE, 8,
+            EGL_ALPHA_SIZE, 8,
+            EGL_DEPTH_SIZE, 16,
+            EGL_STENCIL_SIZE, 8,
+            EGL_NONE };
+
+        return sDefaultConfigAttribs;
+    }
+
+    virtual EGLint const* getContextAttribs() {
+        static EGLint sDefaultContextAttribs[] = {
+            EGL_CONTEXT_CLIENT_VERSION, 2,
+            EGL_NONE };
+
+        return sDefaultContextAttribs;
+    }
+
+    virtual EGLint getSurfaceWidth() {
+        return 512;
+    }
+
+    virtual EGLint getSurfaceHeight() {
+        return 512;
+    }
+
+    void loadShader(GLenum shaderType, const char* pSource, GLuint* outShader) {
+        GLuint shader = glCreateShader(shaderType);
+        ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+        if (shader) {
+            glShaderSource(shader, 1, &pSource, NULL);
+            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+            glCompileShader(shader);
+            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+            GLint compiled = 0;
+            glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
+            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+            if (!compiled) {
+                GLint infoLen = 0;
+                glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
+                ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+                if (infoLen) {
+                    char* buf = (char*) malloc(infoLen);
+                    if (buf) {
+                        glGetShaderInfoLog(shader, infoLen, NULL, buf);
+                        printf("Shader compile log:\n%s\n", buf);
+                        free(buf);
+                        FAIL();
+                    }
+                } else {
+                    char* buf = (char*) malloc(0x1000);
+                    if (buf) {
+                        glGetShaderInfoLog(shader, 0x1000, NULL, buf);
+                        printf("Shader compile log:\n%s\n", buf);
+                        free(buf);
+                        FAIL();
+                    }
+                }
+                glDeleteShader(shader);
+                shader = 0;
+            }
+        }
+        ASSERT_TRUE(shader != 0);
+        *outShader = shader;
+    }
+
+    void createProgram(const char* pVertexSource, const char* pFragmentSource,
+            GLuint* outPgm) {
+        GLuint vertexShader, fragmentShader;
+        {
+            SCOPED_TRACE("compiling vertex shader");
+            loadShader(GL_VERTEX_SHADER, pVertexSource, &vertexShader);
+            if (HasFatalFailure()) {
+                return;
+            }
+        }
+        {
+            SCOPED_TRACE("compiling fragment shader");
+            loadShader(GL_FRAGMENT_SHADER, pFragmentSource, &fragmentShader);
+            if (HasFatalFailure()) {
+                return;
+            }
+        }
+
+        GLuint program = glCreateProgram();
+        ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+        if (program) {
+            glAttachShader(program, vertexShader);
+            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+            glAttachShader(program, fragmentShader);
+            ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
+            glLinkProgram(program);
+            GLint linkStatus = GL_FALSE;
+            glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
+            if (linkStatus != GL_TRUE) {
+                GLint bufLength = 0;
+                glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
+                if (bufLength) {
+                    char* buf = (char*) malloc(bufLength);
+                    if (buf) {
+                        glGetProgramInfoLog(program, bufLength, NULL, buf);
+                        printf("Program link log:\n%s\n", buf);
+                        free(buf);
+                        FAIL();
+                    }
+                }
+                glDeleteProgram(program);
+                program = 0;
+            }
+        }
+        glDeleteShader(vertexShader);
+        glDeleteShader(fragmentShader);
+        ASSERT_TRUE(program != 0);
+        *outPgm = program;
+    }
+
+    static int abs(int value) {
+        return value > 0 ? value : -value;
+    }
+
+    ::testing::AssertionResult checkPixel(int x, int y, int r,
+            int g, int b, int a, int tolerance=2) {
+        GLubyte pixel[4];
+        String8 msg;
+        glReadPixels(x, y, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixel);
+        GLenum err = glGetError();
+        if (err != GL_NO_ERROR) {
+            msg += String8::format("error reading pixel: %#x", err);
+            while ((err = glGetError()) != GL_NO_ERROR) {
+                msg += String8::format(", %#x", err);
+            }
+            fprintf(stderr, "pixel check failure: %s\n", msg.string());
+            return ::testing::AssertionFailure(
+                    ::testing::Message(msg.string()));
+        }
+        if (r >= 0 && abs(r - int(pixel[0])) > tolerance) {
+            msg += String8::format("r(%d isn't %d)", pixel[0], r);
+        }
+        if (g >= 0 && abs(g - int(pixel[1])) > tolerance) {
+            if (!msg.isEmpty()) {
+                msg += " ";
+            }
+            msg += String8::format("g(%d isn't %d)", pixel[1], g);
+        }
+        if (b >= 0 && abs(b - int(pixel[2])) > tolerance) {
+            if (!msg.isEmpty()) {
+                msg += " ";
+            }
+            msg += String8::format("b(%d isn't %d)", pixel[2], b);
+        }
+        if (a >= 0 && abs(a - int(pixel[3])) > tolerance) {
+            if (!msg.isEmpty()) {
+                msg += " ";
+            }
+            msg += String8::format("a(%d isn't %d)", pixel[3], a);
+        }
+        if (!msg.isEmpty()) {
+            fprintf(stderr, "pixel check failure: %s\n", msg.string());
+            return ::testing::AssertionFailure(
+                    ::testing::Message(msg.string()));
+        } else {
+            return ::testing::AssertionSuccess();
+        }
+    }
+
+    int mDisplaySecs;
+    sp<SurfaceComposerClient> mComposerClient;
+    sp<SurfaceControl> mSurfaceControl;
+
+    EGLDisplay mEglDisplay;
+    EGLSurface mEglSurface;
+    EGLContext mEglContext;
+    EGLConfig  mGlConfig;
+};
+
+///////////////////////////////////////////////////////////////////////
+//    Class for  the NON-GL tests
+///////////////////////////////////////////////////////////////////////
 class SurfaceMediaSourceTest : public ::testing::Test {
 public:
 
-    SurfaceMediaSourceTest( ): mYuvTexWidth(64), mYuvTexHeight(66) { }
-    sp<MPEG4Writer>  setUpWriter(OMXClient &client );
+    SurfaceMediaSourceTest( ): mYuvTexWidth(176), mYuvTexHeight(144) { }
     void oneBufferPass(int width, int height );
+    void oneBufferPassNoFill(int width, int height );
     static void fillYV12Buffer(uint8_t* buf, int w, int h, int stride) ;
     static void fillYV12BufferRect(uint8_t* buf, int w, int h,
                         int stride, const android_native_rect_t& rect) ;
@@ -62,10 +363,8 @@
         mSMS->setSynchronousMode(true);
         mSTC = new SurfaceTextureClient(mSMS);
         mANW = mSTC;
-
     }
 
-
     virtual void TearDown() {
         mSMS.clear();
         mSTC.clear();
@@ -78,11 +377,142 @@
     sp<SurfaceMediaSource> mSMS;
     sp<SurfaceTextureClient> mSTC;
     sp<ANativeWindow> mANW;
-
 };
 
+///////////////////////////////////////////////////////////////////////
+//    Class for  the GL tests
+///////////////////////////////////////////////////////////////////////
+class SurfaceMediaSourceGLTest : public GLTest {
+public:
+
+    SurfaceMediaSourceGLTest( ): mYuvTexWidth(176), mYuvTexHeight(144) { }
+    virtual EGLint const* getConfigAttribs();
+    void oneBufferPassGL(int num = 0);
+    static sp<MediaRecorder> setUpMediaRecorder(int fileDescriptor, int videoSource,
+        int outputFormat, int videoEncoder, int width, int height, int fps);
+protected:
+
+    virtual void SetUp() {
+        LOGV("SMS-GLTest::SetUp()");
+        android::ProcessState::self()->startThreadPool();
+        mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
+        mSTC = new SurfaceTextureClient(mSMS);
+        mANW = mSTC;
+
+        // Doing the setup related to the GL Side
+        GLTest::SetUp();
+    }
+
+    virtual void TearDown() {
+        mSMS.clear();
+        mSTC.clear();
+        mANW.clear();
+        GLTest::TearDown();
+        eglDestroySurface(mEglDisplay, mSmsEglSurface);
+    }
+
+    void setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr);
+
+    const int mYuvTexWidth;
+    const int mYuvTexHeight;
+
+    sp<SurfaceMediaSource> mSMS;
+    sp<SurfaceTextureClient> mSTC;
+    sp<ANativeWindow> mANW;
+    EGLConfig  mSMSGlConfig;
+    EGLSurface  mSmsEglSurface;
+};
+
+/////////////////////////////////////////////////////////////////////
+// Methods in SurfaceMediaSourceGLTest
+/////////////////////////////////////////////////////////////////////
+EGLint const* SurfaceMediaSourceGLTest::getConfigAttribs() {
+        LOGV("SurfaceMediaSourceGLTest getConfigAttribs");
+    static EGLint sDefaultConfigAttribs[] = {
+        EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+        EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+        EGL_RED_SIZE, 8,
+        EGL_GREEN_SIZE, 8,
+        EGL_BLUE_SIZE, 8,
+        EGL_RECORDABLE_ANDROID, EGL_TRUE,
+        EGL_NONE };
+
+    return sDefaultConfigAttribs;
+}
+
+// One pass of dequeuing and queuing a GLBuffer
+void SurfaceMediaSourceGLTest::oneBufferPassGL(int num) {
+    int d = num % 50;
+    float f = 0.2f; // 0.1f * d;
+
+    glClearColor(0, 0.3, 0, 0.6);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    glEnable(GL_SCISSOR_TEST);
+    glScissor(4 + d, 4 + d, 4, 4);
+    glClearColor(1.0 - f, f, f, 1.0);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    glScissor(24 + d, 48 + d, 4, 4);
+    glClearColor(f, 1.0 - f, f, 1.0);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    glScissor(37 + d, 17 + d, 4, 4);
+    glClearColor(f, f, 1.0 - f, 1.0);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    // The following call dequeues and queues the buffer
+    eglSwapBuffers(mEglDisplay, mSmsEglSurface);
+    glDisable(GL_SCISSOR_TEST);
+}
+
+// Set up the MediaRecorder which runs in the same process as mediaserver
+sp<MediaRecorder> SurfaceMediaSourceGLTest::setUpMediaRecorder(int fd, int videoSource,
+        int outputFormat, int videoEncoder, int width, int height, int fps) {
+    sp<MediaRecorder> mr = new MediaRecorder();
+    mr->setVideoSource(videoSource);
+    mr->setOutputFormat(outputFormat);
+    mr->setVideoEncoder(videoEncoder);
+    mr->setOutputFile(fd, 0, 0);
+    mr->setVideoSize(width, height);
+    mr->setVideoFrameRate(fps);
+    mr->prepare();
+    LOGV("Starting MediaRecorder...");
+    CHECK_EQ(OK, mr->start());
+    return mr;
+}
+
+// query the mediarecorder for a surfacemeidasource and create an egl surface with that
+void SurfaceMediaSourceGLTest::setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr) {
+    sp<ISurfaceTexture> iST = mr->querySurfaceMediaSourceFromMediaServer();
+    mSTC = new SurfaceTextureClient(iST);
+    mANW = mSTC;
+
+    EGLint numConfigs = 0;
+    EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mSMSGlConfig,
+            1, &numConfigs));
+    ASSERT_EQ(EGL_SUCCESS, eglGetError());
+
+    LOGV("Native Window = %p, mSTC = %p", mANW.get(), mSTC.get());
+
+    mSmsEglSurface = eglCreateWindowSurface(mEglDisplay, mSMSGlConfig,
+                                mANW.get(), NULL);
+    ASSERT_EQ(EGL_SUCCESS, eglGetError());
+    ASSERT_NE(EGL_NO_SURFACE, mSmsEglSurface) ;
+
+    EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mSmsEglSurface, mSmsEglSurface,
+            mEglContext));
+    ASSERT_EQ(EGL_SUCCESS, eglGetError());
+}
+
+
+/////////////////////////////////////////////////////////////////////
+// Methods in SurfaceMediaSourceTest
+/////////////////////////////////////////////////////////////////////
+
+// One pass of dequeuing and queuing the buffer. Fill it in with
+// cpu YV12 buffer
 void SurfaceMediaSourceTest::oneBufferPass(int width, int height ) {
-    LOGV("One Buffer Pass");
     ANativeWindowBuffer* anb;
     ASSERT_EQ(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb));
     ASSERT_TRUE(anb != NULL);
@@ -99,42 +529,16 @@
     ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer()));
 }
 
-sp<MPEG4Writer> SurfaceMediaSourceTest::setUpWriter(OMXClient &client ) {
-    // Writing to a file
-    const char *fileName = "/sdcard/outputSurfEnc.mp4";
-    sp<MetaData> enc_meta = new MetaData;
-    enc_meta->setInt32(kKeyBitRate, 300000);
-    enc_meta->setInt32(kKeyFrameRate, 30);
+// Dequeuing and queuing the buffer without really filling it in.
+void SurfaceMediaSourceTest::oneBufferPassNoFill(int width, int height ) {
+    ANativeWindowBuffer* anb;
+    ASSERT_EQ(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb));
+    ASSERT_TRUE(anb != NULL);
 
-    enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
-
-    sp<MetaData> meta = mSMS->getFormat();
-
-    int32_t width, height, stride, sliceHeight, colorFormat;
-    CHECK(meta->findInt32(kKeyWidth, &width));
-    CHECK(meta->findInt32(kKeyHeight, &height));
-    CHECK(meta->findInt32(kKeyStride, &stride));
-    CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
-    CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));
-
-    enc_meta->setInt32(kKeyWidth, width);
-    enc_meta->setInt32(kKeyHeight, height);
-    enc_meta->setInt32(kKeyIFramesInterval, 1);
-    enc_meta->setInt32(kKeyStride, stride);
-    enc_meta->setInt32(kKeySliceHeight, sliceHeight);
-    // TODO: overwriting the colorformat since the format set by GRAlloc
-    // could be wrong or not be read by OMX
-    enc_meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatYUV420Planar);
-
-
-    sp<MediaSource> encoder =
-        OMXCodec::Create(
-                client.interface(), enc_meta, true /* createEncoder */, mSMS);
-
-    sp<MPEG4Writer> writer = new MPEG4Writer(fileName);
-    writer->addSource(encoder);
-
-    return writer;
+    sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+    // ASSERT_EQ(NO_ERROR, mANW->lockBuffer(mANW.get(), buf->getNativeBuffer()));
+    // We do not fill the buffer in. Just queue it back.
+    ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer()));
 }
 
 // Fill a YV12 buffer with a multi-colored checkerboard pattern
@@ -216,46 +620,53 @@
             return OK;
         }
 };
-
 ///////////////////////////////////////////////////////////////////
 //           TESTS
+// SurfaceMediaSourceTest class contains tests that fill the buffers
+// using the cpu calls
+// SurfaceMediaSourceGLTest class contains tests that fill the buffers
+// using the GL calls.
+// TODO: None of the tests actually verify the encoded images.. so at this point,
+// these are mostly functionality tests + visual inspection
+//////////////////////////////////////////////////////////////////////
+
 // Just pass one buffer from the native_window to the SurfaceMediaSource
-TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotOneBufferPass) {
+// Dummy Encoder
+static int testId = 1;
+TEST_F(SurfaceMediaSourceTest, DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotOneBufferPass) {
+    LOGV("Test # %d", testId++);
     LOGV("Testing OneBufferPass ******************************");
 
-    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
-            0, 0, HAL_PIXEL_FORMAT_YV12));
-    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
-            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
-
+    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
+            HAL_PIXEL_FORMAT_YV12));
     oneBufferPass(mYuvTexWidth, mYuvTexHeight);
 }
 
 // Pass the buffer with the wrong height and weight and should not be accepted
-TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotWrongSizeBufferPass) {
+// Dummy Encoder
+TEST_F(SurfaceMediaSourceTest, DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotWrongSizeBufferPass) {
+    LOGV("Test # %d", testId++);
     LOGV("Testing Wrong size BufferPass ******************************");
 
     // setting the client side buffer size different than the server size
-    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
-             10, 10, HAL_PIXEL_FORMAT_YV12));
-    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
-            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+    ASSERT_EQ(NO_ERROR, native_window_set_buffers_dimensions(mANW.get(),
+             10, 10));
+    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
+            HAL_PIXEL_FORMAT_YV12));
 
     ANativeWindowBuffer* anb;
 
-    // make sure we get an error back when dequeuing!
+    // Note: make sure we get an ERROR back when dequeuing!
     ASSERT_NE(NO_ERROR, mANW->dequeueBuffer(mANW.get(), &anb));
 }
 
-
 // pass multiple buffers from the native_window the SurfaceMediaSource
-// A dummy writer is used to simulate actual MPEG4Writer
-TEST_F(SurfaceMediaSourceTest,  EncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
+// Dummy Encoder
+TEST_F(SurfaceMediaSourceTest,  DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
+    LOGV("Test # %d", testId++);
     LOGV("Testing MultiBufferPass, Dummy Recorder *********************");
-    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
-            0, 0, HAL_PIXEL_FORMAT_YV12));
-    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
-            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
+            HAL_PIXEL_FORMAT_YV12));
 
     SimpleDummyRecorder writer(mSMS);
     writer.start();
@@ -272,14 +683,13 @@
 }
 
 // Delayed pass of multiple buffers from the native_window the SurfaceMediaSource
-// A dummy writer is used to simulate actual MPEG4Writer
-TEST_F(SurfaceMediaSourceTest,  EncodingFromCpuFilledYV12BufferNpotMultiBufferPassLag) {
+// Dummy Encoder
+TEST_F(SurfaceMediaSourceTest,  DISABLED_DummyLagEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
+    LOGV("Test # %d", testId++);
     LOGV("Testing MultiBufferPass, Dummy Recorder Lagging **************");
 
-    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
-            0, 0, HAL_PIXEL_FORMAT_YV12));
-    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
-            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
+            HAL_PIXEL_FORMAT_YV12));
 
     SimpleDummyRecorder writer(mSMS);
     writer.start();
@@ -299,12 +709,11 @@
 
 // pass multiple buffers from the native_window the SurfaceMediaSource
 // A dummy writer (MULTITHREADED) is used to simulate actual MPEG4Writer
-TEST_F(SurfaceMediaSourceTest, EncodingFromCpuFilledYV12BufferNpotMultiBufferPassThreaded) {
+TEST_F(SurfaceMediaSourceTest, DISABLED_DummyThreadedEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
+    LOGV("Test # %d", testId++);
     LOGV("Testing MultiBufferPass, Dummy Recorder Multi-Threaded **********");
-    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
-            0, 0, HAL_PIXEL_FORMAT_YV12));
-    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
-            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
+    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
+            HAL_PIXEL_FORMAT_YV12));
 
     DummyRecorder writer(mSMS);
     writer.start();
@@ -318,32 +727,210 @@
     writer.stop();
 }
 
-// Test to examine the actual encoding. Temporarily disabled till the
-// colorformat and encoding from GRAlloc data is resolved
-TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuFilledYV12BufferNpotWrite) {
-    LOGV("Testing the whole pipeline with actual Recorder");
-    ASSERT_EQ(NO_ERROR, native_window_set_buffers_geometry(mANW.get(),
-            0, 0, HAL_PIXEL_FORMAT_YV12));
-    ASSERT_EQ(NO_ERROR, native_window_set_usage(mANW.get(),
-            GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN));
-    OMXClient client;
-    CHECK_EQ(OK, client.connect());
+// Test to examine actual encoding using mediarecorder
+// We use the mediaserver to create a mediarecorder and send
+// it back to us. So SurfaceMediaSource lives in the same process
+// as the mediaserver.
+// Very close to the actual camera, except that the
+// buffers are filled and queueud by the CPU instead of GL.
+TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuYV12BufferNpotWriteMediaServer) {
+    LOGV("Test # %d", testId++);
+    LOGV("************** Testing the whole pipeline with actual MediaRecorder ***********");
+    LOGV("************** SurfaceMediaSource is same process as mediaserver    ***********");
 
-    sp<MPEG4Writer> writer = setUpWriter(client);
-    int64_t start = systemTime();
-    CHECK_EQ(OK, writer->start());
+    const char *fileName = "/sdcard/outputSurfEncMSource.mp4";
+    int fd = open(fileName, O_RDWR | O_CREAT, 0744);
+    if (fd < 0) {
+        LOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd);
+    }
+    CHECK(fd >= 0);
+
+    sp<MediaRecorder> mr = SurfaceMediaSourceGLTest::setUpMediaRecorder(fd,
+            VIDEO_SOURCE_GRALLOC_BUFFER,
+            OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth,
+            mYuvTexHeight, 30);
+    // get the reference to the surfacemediasource living in
+    // mediaserver that is created by stagefrightrecorder
+    sp<ISurfaceTexture> iST = mr->querySurfaceMediaSourceFromMediaServer();
+    mSTC = new SurfaceTextureClient(iST);
+    mANW = mSTC;
+    ASSERT_EQ(NO_ERROR, native_window_api_connect(mANW.get(), NATIVE_WINDOW_API_CPU));
+    ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
+                                                HAL_PIXEL_FORMAT_YV12));
 
     int32_t nFramesCount = 0;
     while (nFramesCount <= 300) {
-        oneBufferPass(mYuvTexWidth, mYuvTexHeight);
+        oneBufferPassNoFill(mYuvTexWidth, mYuvTexHeight);
         nFramesCount++;
+        LOGV("framesCount = %d", nFramesCount);
     }
 
-    CHECK_EQ(OK, writer->stop());
-    writer.clear();
-    int64_t end = systemTime();
-    client.disconnect();
+    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_CPU));
+    LOGV("Stopping MediaRecorder...");
+    CHECK_EQ(OK, mr->stop());
+    mr.clear();
+    close(fd);
 }
 
+//////////////////////////////////////////////////////////////////////
+// GL tests
+/////////////////////////////////////////////////////////////////////
 
+// Test to examine whether we can choose the Recordable Android GLConfig
+// DummyRecorder used- no real encoding here
+TEST_F(SurfaceMediaSourceGLTest, ChooseAndroidRecordableEGLConfigDummyWrite) {
+    LOGV("Test # %d", testId++);
+    LOGV("Test to verify creating a surface w/ right config *********");
+
+    mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
+    mSTC = new SurfaceTextureClient(mSMS);
+    mANW = mSTC;
+
+    DummyRecorder writer(mSMS);
+    writer.start();
+
+    EGLint numConfigs = 0;
+    EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mSMSGlConfig,
+            1, &numConfigs));
+    ASSERT_EQ(EGL_SUCCESS, eglGetError());
+
+    mSmsEglSurface = eglCreateWindowSurface(mEglDisplay, mSMSGlConfig,
+                                mANW.get(), NULL);
+    ASSERT_EQ(EGL_SUCCESS, eglGetError());
+    ASSERT_NE(EGL_NO_SURFACE, mSmsEglSurface) ;
+
+    EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mSmsEglSurface, mSmsEglSurface,
+            mEglContext));
+    ASSERT_EQ(EGL_SUCCESS, eglGetError());
+
+    int32_t nFramesCount = 0;
+    while (nFramesCount <= 300) {
+        oneBufferPassGL();
+        nFramesCount++;
+        LOGV("framesCount = %d", nFramesCount);
+    }
+
+    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_EGL));
+    writer.stop();
+}
+// Test to examine whether we can render GL buffers in to the surface
+// created with the native window handle
+TEST_F(SurfaceMediaSourceGLTest, RenderingToRecordableEGLSurfaceWorks) {
+    LOGV("Test # %d", testId++);
+    LOGV("RenderingToRecordableEGLSurfaceWorks *********************");
+    // Do the producer side of things
+    glClearColor(0.6, 0.6, 0.6, 0.6);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    glEnable(GL_SCISSOR_TEST);
+    glScissor(4, 4, 4, 4);
+    glClearColor(1.0, 0.0, 0.0, 1.0);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    glScissor(24, 48, 4, 4);
+    glClearColor(0.0, 1.0, 0.0, 1.0);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    glScissor(37, 17, 4, 4);
+    glClearColor(0.0, 0.0, 1.0, 1.0);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    EXPECT_TRUE(checkPixel( 0,  0, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(63,  0, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(63, 63, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel( 0, 63, 153, 153, 153, 153));
+
+    EXPECT_TRUE(checkPixel( 4,  7, 255,   0,   0, 255));
+    EXPECT_TRUE(checkPixel(25, 51,   0, 255,   0, 255));
+    EXPECT_TRUE(checkPixel(40, 19,   0,   0, 255, 255));
+    EXPECT_TRUE(checkPixel(29, 51, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel( 5, 32, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(13,  8, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(46,  3, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(30, 33, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel( 6, 52, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(55, 33, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(16, 29, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel( 1, 30, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(41, 37, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(46, 29, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel(15, 25, 153, 153, 153, 153));
+    EXPECT_TRUE(checkPixel( 3, 52, 153, 153, 153, 153));
+}
+
+// Test to examine the actual encoding with GL buffers
+// Actual encoder, Actual GL Buffers Filled SurfaceMediaSource
+// The same pattern is rendered every frame
+TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaSameImageEachBufNpotWrite) {
+    LOGV("Test # %d", testId++);
+    LOGV("************** Testing the whole pipeline with actual Recorder ***********");
+    LOGV("************** GL Filling the buffers ***********");
+    // Note: No need to set the colorformat for the buffers. The colorformat is
+    // in the GRAlloc buffers itself.
+
+    const char *fileName = "/sdcard/outputSurfEncMSourceGL.mp4";
+    int fd = open(fileName, O_RDWR | O_CREAT, 0744);
+    if (fd < 0) {
+        LOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd);
+    }
+    CHECK(fd >= 0);
+
+    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER,
+            OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
+
+    // get the reference to the surfacemediasource living in
+    // mediaserver that is created by stagefrightrecorder
+    setUpEGLSurfaceFromMediaRecorder(mr);
+
+    int32_t nFramesCount = 0;
+    while (nFramesCount <= 300) {
+        oneBufferPassGL();
+        nFramesCount++;
+        LOGV("framesCount = %d", nFramesCount);
+    }
+
+    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_EGL));
+    LOGV("Stopping MediaRecorder...");
+    CHECK_EQ(OK, mr->stop());
+    mr.clear();
+    close(fd);
+}
+
+// Test to examine the actual encoding from the GL Buffers
+// Actual encoder, Actual GL Buffers Filled SurfaceMediaSource
+// A different pattern is rendered every frame
+TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaDiffImageEachBufNpotWrite) {
+    LOGV("Test # %d", testId++);
+    LOGV("************** Testing the whole pipeline with actual Recorder ***********");
+    LOGV("************** Diff GL Filling the buffers ***********");
+    // Note: No need to set the colorformat for the buffers. The colorformat is
+    // in the GRAlloc buffers itself.
+
+    const char *fileName = "/sdcard/outputSurfEncMSourceGLDiff.mp4";
+    int fd = open(fileName, O_RDWR | O_CREAT, 0744);
+    if (fd < 0) {
+        LOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd);
+    }
+    CHECK(fd >= 0);
+
+    sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER,
+            OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
+
+    // get the reference to the surfacemediasource living in
+    // mediaserver that is created by stagefrightrecorder
+    setUpEGLSurfaceFromMediaRecorder(mr);
+
+    int32_t nFramesCount = 0;
+    while (nFramesCount <= 300) {
+        oneBufferPassGL(nFramesCount);
+        nFramesCount++;
+        LOGV("framesCount = %d", nFramesCount);
+    }
+
+    ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_EGL));
+    LOGV("Stopping MediaRecorder...");
+    CHECK_EQ(OK, mr->stop());
+    mr.clear();
+    close(fd);
+}
 } // namespace android