Rebuild EVS app with richer feature set

Adds support for additional data formats, multiple cameras, and lays the
foundation for GL rendered overlays.

Test: build and run on Gordon Peak with Logitech USB camera
Change-Id: I60403361a91593d4e387df67a563522547efa981
diff --git a/evs/app/Android.mk b/evs/app/Android.mk
index 1978616..5cf6eec 100644
--- a/evs/app/Android.mk
+++ b/evs/app/Android.mk
@@ -6,8 +6,17 @@
 LOCAL_SRC_FILES := \
     evs_app.cpp \
     EvsStateControl.cpp \
-    StreamHandler.cpp \
+    RenderBase.cpp \
+    RenderDirectView.cpp \
+    RenderPixelCopy.cpp \
     ConfigManager.cpp \
+    glError.cpp \
+    shader.cpp \
+    TexWrapper.cpp \
+    VideoTex.cpp \
+    StreamHandler.cpp \
+    WindowSurface.cpp \
+    FormatConvert.cpp \
 
 LOCAL_C_INCLUDES += \
     frameworks/base/include \
@@ -18,17 +27,18 @@
     liblog \
     libutils \
     libui \
-    libhwbinder \
+    libgui \
     libhidlbase \
     libhidltransport \
-    libGLESv1_CM \
-    libOpenSLES \
-    libtinyalsa \
+    libEGL \
+    libGLESv2 \
     libhardware \
+    libpng \
     android.hardware.automotive.evs@1.0 \
     android.hardware.automotive.vehicle@2.0 \
 
 LOCAL_STATIC_LIBRARIES := \
+    libmath \
     libjsoncpp \
 
 LOCAL_STRIP_MODULE := keep_symbols
@@ -36,7 +46,7 @@
 LOCAL_MODULE:= evs_app
 LOCAL_MODULE_TAGS := optional
 
-LOCAL_CFLAGS += -DGL_GLEXT_PROTOTYPES -DEGL_EGLEXT_PROTOTYPES
+LOCAL_CFLAGS += -DGL_GLEXT_PROTOTYPES -DEGL_EGLEXT_PROTOTYPES -DLOG_TAG=\"EVSAPP\"
 LOCAL_CFLAGS += -Wall -Werror -Wunused -Wunreachable-code
 
 include $(BUILD_EXECUTABLE)
diff --git a/evs/app/CarFromTop.png b/evs/app/CarFromTop.png
new file mode 100644
index 0000000..11f929e
--- /dev/null
+++ b/evs/app/CarFromTop.png
Binary files differ
diff --git a/evs/app/ConfigManager.cpp b/evs/app/ConfigManager.cpp
index 48966d9..07e570d 100644
--- a/evs/app/ConfigManager.cpp
+++ b/evs/app/ConfigManager.cpp
@@ -49,24 +49,6 @@
 }
 
 
-static bool ReadChildNodeAsUint(const char* groupName,
-                                const Json::Value& parentNode,
-                                const char* childName,
-                                unsigned* value) {
-    // Must have a place to put the value!
-    assert(value);
-
-    Json::Value childNode = parentNode[childName];
-    if (!childNode.isNumeric()) {
-        printf("Missing or invalid field %s in record %s", childName, groupName);
-        return false;
-    }
-
-    *value = childNode.asUInt();
-    return true;
-}
-
-
 bool ConfigManager::initialize(const char* configFileName)
 {
     bool complete = true;
@@ -110,8 +92,6 @@
             printf("Invalid configuration format -- we expect a display description\n");
             return false;
         }
-        complete &= ReadChildNodeAsUint("display", displayNode, "width",      &mPixelWidth);
-        complete &= ReadChildNodeAsUint("display", displayNode, "height",     &mPixelHeight);
         complete &= readChildNodeAsFloat("display", displayNode, "frontRange", &mFrontRangeInCarSpace);
         complete &= readChildNodeAsFloat("display", displayNode, "rearRange",  &mRearRangeInCarSpace);
     }
@@ -147,7 +127,6 @@
             // Get data from the configuration file
             Json::Value nameNode = node.get("cameraId", "MISSING");
             const char *cameraId = nameNode.asCString();
-            printf("Loading camera %s\n", cameraId);
 
             Json::Value usageNode = node.get("function", "");
             const char *function = usageNode.asCString();
diff --git a/evs/app/ConfigManager.h b/evs/app/ConfigManager.h
index 1b61146..cf1c4df 100644
--- a/evs/app/ConfigManager.h
+++ b/evs/app/ConfigManager.h
@@ -23,8 +23,8 @@
 class ConfigManager {
 public:
     struct CameraInfo {
-        std::string cameraId = 0;   // The name of the camera from the point of view of the HAL
-        std::string function = 0;   // The expected use for this camera ("reverse", "left", "right")
+        std::string cameraId = "";  // The name of the camera from the point of view of the HAL
+        std::string function = "";  // The expected use for this camera ("reverse", "left", "right")
         float position[3] = {0};    // x, y, z -> right, fwd, up in the units of car space
         float yaw   = 0;    // radians positive to the left (right hand rule about global z axis)
         float pitch = 0;    // positive upward (ie: right hand rule about local x axis)
@@ -83,8 +83,6 @@
     float mRearExtent;
 
     // Display information
-    unsigned mPixelWidth;
-    unsigned mPixelHeight;
     float    mFrontRangeInCarSpace;     // How far the display extends in front of the car
     float    mRearRangeInCarSpace;      // How far the display extends behind the car
 
diff --git a/evs/app/EvsStateControl.cpp b/evs/app/EvsStateControl.cpp
index ec29e39..eb3c698 100644
--- a/evs/app/EvsStateControl.cpp
+++ b/evs/app/EvsStateControl.cpp
@@ -13,10 +13,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-#define LOG_TAG "EVSAPP"
-
 #include "EvsStateControl.h"
+#include "RenderDirectView.h"
+#include "RenderPixelCopy.h"
 
 #include <stdio.h>
 #include <string.h>
@@ -90,8 +89,95 @@
 }
 
 
-bool EvsStateControl::configureForVehicleState() {
-    ALOGD("configureForVehicleState");
+bool EvsStateControl::startUpdateLoop() {
+    // Create the thread and report success if it gets started
+    mRenderThread = std::thread([this](){ updateLoop(); });
+    return mRenderThread.joinable();
+}
+
+
+void EvsStateControl::postCommand(const Command& cmd) {
+    // Push the command onto the queue watched by updateLoop
+    mLock.lock();
+    mCommandQueue.push(cmd);
+    mLock.unlock();
+
+    // Send a signal to wake updateLoop in case it is asleep
+    mWakeSignal.notify_all();
+}
+
+
+void EvsStateControl::updateLoop() {
+    ALOGD("Starting EvsStateControl update loop");
+
+    bool run = true;
+    while (run) {
+        // Process incoming commands
+        {
+            std::lock_guard <std::mutex> lock(mLock);
+            while (!mCommandQueue.empty()) {
+                const Command& cmd = mCommandQueue.front();
+                switch (cmd.operation) {
+                case Op::EXIT:
+                    run = false;
+                    break;
+                case Op::CHECK_VEHICLE_STATE:
+                    // Just running selectStateForCurrentConditions below will take care of this
+                    break;
+                case Op::TOUCH_EVENT:
+                    // TODO:  Implement this given the x/y location of the touch event
+                    // Ignore for now
+                    break;
+                }
+                mCommandQueue.pop();
+            }
+        }
+
+        // Review vehicle state and choose an appropriate renderer
+        if (!selectStateForCurrentConditions()) {
+            ALOGE("selectStateForCurrentConditions failed so we're going to die");
+            break;
+        }
+
+        // If we have an active renderer, give it a chance to draw
+        if (mCurrentRenderer) {
+            // Get the output buffer we'll use to display the imagery
+            BufferDesc tgtBuffer = {};
+            mDisplay->getTargetBuffer([&tgtBuffer](const BufferDesc& buff) {
+                                          tgtBuffer = buff;
+                                      }
+            );
+
+            if (tgtBuffer.memHandle == nullptr) {
+                ALOGE("Didn't get requested output buffer -- skipping this frame.");
+            } else {
+                // Generate our output image
+                if (!mCurrentRenderer->drawFrame(tgtBuffer)) {
+                    // If drawing failed, we want to exit quickly so an app restart can happen
+                    run = false;
+                }
+
+                // Send the finished image back for display
+                mDisplay->returnTargetBufferForDisplay(tgtBuffer);
+            }
+        } else {
+            // No active renderer, so sleep until somebody wakes us with another command
+            std::unique_lock<std::mutex> lock(mLock);
+            mWakeSignal.wait(lock);
+        }
+    }
+
+    ALOGW("EvsStateControl update loop ending");
+
+    // TODO:  Fix it so we can exit cleanly from the main thread instead
+    printf("Shutting down app due to state control loop ending\n");
+    ALOGE("KILLING THE APP FROM THE EvsStateControl LOOP ON DRAW FAILURE!!!");
+    exit(1);
+}
+
+
+bool EvsStateControl::selectStateForCurrentConditions() {
+    ALOGD("selectStateForCurrentConditions");
 
     static int32_t sDummyGear   = int32_t(VehicleGear::GEAR_REVERSE);
     static int32_t sDummySignal = int32_t(VehicleTurnSignal::NONE);
@@ -133,12 +219,10 @@
         desiredState = LEFT;
     }
 
-    // Apply the desire state
     ALOGV("Selected state %d.", desiredState);
-    configureEvsPipeline(desiredState);
 
-    // Operation was successful
-    return true;
+    // Apply the desire state
+    return configureEvsPipeline(desiredState);
 }
 
 
@@ -146,23 +230,15 @@
     ALOGD("invokeGet");
 
     StatusCode status = StatusCode::TRY_AGAIN;
-    bool called = false;
 
     // Call the Vehicle HAL, which will block until the callback is complete
     mVehicle->get(*pRequestedPropValue,
-                  [pRequestedPropValue, &status, &called]
+                  [pRequestedPropValue, &status]
                   (StatusCode s, const VehiclePropValue& v) {
                        status = s;
                        *pRequestedPropValue = v;
-                       called = true;
                   }
     );
-    // This should be true as long as the get call is block as it should
-    // TODO:  Once we've got some milage on this code and the underlying HIDL services,
-    // we should remove this belt-and-suspenders check for correct operation as unnecessary.
-    if (!called) {
-        ALOGE("VehicleNetwork query did not run as expected.");
-    }
 
     return status;
 }
@@ -176,49 +252,49 @@
         return true;
     }
 
-    // See if we actually have to change cameras
-    if (mCameraInfo[mCurrentState].cameraId != mCameraInfo[desiredState].cameraId) {
-        ALOGI("Camera change required");
-        ALOGD("  Current cameraId (%d) = %s", mCurrentState,
-              mCameraInfo[mCurrentState].cameraId.c_str());
-        ALOGD("  Desired cameraId (%d) = %s", desiredState,
-              mCameraInfo[desiredState].cameraId.c_str());
+    ALOGD("  Current state (%d) = %s", mCurrentState,
+          mCameraInfo[mCurrentState].cameraId.c_str());
+    ALOGD("  Desired state (%d) = %s", desiredState,
+          mCameraInfo[desiredState].cameraId.c_str());
 
-        // Yup, we need to change cameras, so close the previous one, if necessary.
-        if (mCurrentCamera != nullptr) {
-            mCurrentStreamHandler->blockingStopStream();
-            mCurrentStreamHandler = nullptr;
-            mCurrentCamera = nullptr;
+    // Since we're changing states, shut down the current renderer
+    if (mCurrentRenderer != nullptr) {
+        mCurrentRenderer->deactivate();
+        mCurrentRenderer = nullptr; // It's a smart pointer, so destructs on assignment to null
+    }
+
+    // Do we need a new direct view renderer?
+    if (desiredState == PARKING) {
+        // We need a new top view renderer
+        // TODO:  Implement this by instantiating a new RenderTopView
+    } else if (!mCameraInfo[desiredState].cameraId.empty()) {
+        // We have a camera assigned to this state for direct view
+        mCurrentRenderer = std::make_unique<RenderDirectView>(mEvs, mCameraInfo[desiredState]);
+//        mCurrentRenderer = std::make_unique<RenderPixelCopy>(mEvs, mCameraInfo[desiredState]);
+        if (!mCurrentRenderer) {
+            ALOGE("Failed to construct renderer.  Skipping state change.");
+            return false;
+        }
+    }
+
+    // Now set the display state based on whether we have a video feed to show
+    if (mCurrentRenderer == nullptr) {
+        ALOGD("Turning off the display");
+        mDisplay->setDisplayState(DisplayState::NOT_VISIBLE);
+    } else {
+        // Start the camera stream
+        ALOGD("Starting camera stream");
+        if (!mCurrentRenderer->activate()) {
+            ALOGE("New renderer failed to activate");
+            return false;
         }
 
-        // Now do we need a new camera?
-        if (!mCameraInfo[desiredState].cameraId.empty()) {
-            // Need a new camera, so open it
-            ALOGD("Open camera %s", mCameraInfo[desiredState].cameraId.c_str());
-            mCurrentCamera = mEvs->openCamera(mCameraInfo[desiredState].cameraId);
-
-            // If we didn't get the camera we asked for, we need to bail out and try again later
-            if (mCurrentCamera == nullptr) {
-                ALOGE("Failed to open EVS camera.  Skipping state change.");
-                return false;
-            }
-        }
-
-        // Now set the display state based on whether we have a camera feed to show
-        if (mCurrentCamera == nullptr) {
-            ALOGD("Turning off the display");
-            mDisplay->setDisplayState(DisplayState::NOT_VISIBLE);
-        } else {
-            // Create the stream handler object to receive and forward the video frames
-            mCurrentStreamHandler = new StreamHandler(mCurrentCamera, mDisplay);
-
-            // Start the camera stream
-            ALOGD("Starting camera stream");
-            mCurrentStreamHandler->startStream();
-
-            // Activate the display
-            ALOGD("Arming the display");
-            mDisplay->setDisplayState(DisplayState::VISIBLE_ON_NEXT_FRAME);
+        // Activate the display
+        ALOGD("Arming the display");
+        Return<EvsResult> result = mDisplay->setDisplayState(DisplayState::VISIBLE_ON_NEXT_FRAME);
+        if (result != EvsResult::OK) {
+            ALOGE("setDisplayState returned an error (%d)", (EvsResult)result);
+            return false;
         }
     }
 
diff --git a/evs/app/EvsStateControl.h b/evs/app/EvsStateControl.h
index d8ce9e6..3a830cf 100644
--- a/evs/app/EvsStateControl.h
+++ b/evs/app/EvsStateControl.h
@@ -17,13 +17,16 @@
 #ifndef CAR_EVS_APP_EVSSTATECONTROL_H
 #define CAR_EVS_APP_EVSSTATECONTROL_H
 
+#include "StreamHandler.h"
+#include "ConfigManager.h"
+#include "RenderBase.h"
+
 #include <android/hardware/automotive/vehicle/2.0/IVehicle.h>
 #include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
 #include <android/hardware/automotive/evs/1.0/IEvsDisplay.h>
 #include <android/hardware/automotive/evs/1.0/IEvsCamera.h>
 
-#include "StreamHandler.h"
-#include "ConfigManager.h"
+#include <thread>
 
 
 using namespace ::android::hardware::automotive::evs::V1_0;
@@ -35,6 +38,11 @@
 using ::android::sp;
 
 
+/*
+ * This class runs the main update loop for the EVS application.  It will sleep when it has
+ * nothing to do.  It provides a thread safe way for other threads to wake it and pass commands
+ * to it.
+ */
 class EvsStateControl {
 public:
     EvsStateControl(android::sp <IVehicle>       pVnet,
@@ -43,18 +51,37 @@
                     const ConfigManager&         config);
 
     enum State {
-        REVERSE = 0,
+        OFF = 0,
+        REVERSE,
         LEFT,
         RIGHT,
-        OFF,
+        PARKING,
         NUM_STATES  // Must come last
     };
 
-    bool configureForVehicleState();
+    enum class Op {
+        EXIT,
+        CHECK_VEHICLE_STATE,
+        TOUCH_EVENT,
+    };
+
+    struct Command {
+        Op          operation;
+        uint32_t    arg1;
+        uint32_t    arg2;
+    };
+
+    // This spawns a new thread that is expected to run continuously
+    bool startUpdateLoop();
+
+    // Safe to be called from other threads
+    void postCommand(const Command& cmd);
 
 private:
+    void updateLoop();
     StatusCode invokeGet(VehiclePropValue *pRequestedPropValue);
-    bool configureEvsPipeline(State desiredState);
+    bool selectStateForCurrentConditions();
+    bool configureEvsPipeline(State desiredState);  // Only call from one thread!
 
     sp<IVehicle>                mVehicle;
     sp<IEvsEnumerator>          mEvs;
@@ -63,11 +90,17 @@
     VehiclePropValue            mGearValue;
     VehiclePropValue            mTurnSignalValue;
 
-    ConfigManager::CameraInfo   mCameraInfo[State::NUM_STATES];
-    State                       mCurrentState;
-    sp<IEvsCamera>              mCurrentCamera;
+    State                       mCurrentState = OFF;
 
-    sp<StreamHandler>           mCurrentStreamHandler;
+    ConfigManager::CameraInfo   mCameraInfo[NUM_STATES] = {};
+    std::unique_ptr<RenderBase> mCurrentRenderer;
+
+    std::thread                 mRenderThread;  // The thread that runs the main rendering loop
+
+    // Other threads may want to spur us into action, so we provide a thread safe way to do that
+    std::mutex                  mLock;
+    std::condition_variable     mWakeSignal;
+    std::queue<Command>         mCommandQueue;
 };
 
 
diff --git a/evs/app/EvsVehicleListener.h b/evs/app/EvsVehicleListener.h
index b15ac0d..2935ab0 100644
--- a/evs/app/EvsVehicleListener.h
+++ b/evs/app/EvsVehicleListener.h
@@ -19,7 +19,11 @@
 
 #include "EvsStateControl.h"
 
-
+/*
+ * This class listens for asynchronous updates from the Vehicle HAL.  While the EVS
+ * applications is active, it can poll the vehicle state directly.  However, when it goes to
+ * sleep, we need these notifications to bring it active again.
+ */
 class EvsVehicleListener : public IVehicleCallback {
 public:
     // Methods from ::android::hardware::automotive::vehicle::V2_0::IVehicleCallback follow.
@@ -59,7 +63,12 @@
             waitForEvents(5000);
 
             // If we were delivered an event (or it's been a while) update as necessary
-            pStateController->configureForVehicleState();
+            EvsStateControl::Command cmd = {
+                .operation = EvsStateControl::Op::CHECK_VEHICLE_STATE,
+                .arg1      = 0,
+                .arg2      = 0,
+            };
+            pStateController->postCommand(cmd);
         }
     }
 
diff --git a/evs/app/FormatConvert.cpp b/evs/app/FormatConvert.cpp
new file mode 100644
index 0000000..bd83ba3
--- /dev/null
+++ b/evs/app/FormatConvert.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FormatConvert.h"
+
+
+// Round up to the nearest multiple of the given alignment value
+template<unsigned alignment>
+int align(int value) {
+    static_assert((alignment && !(alignment & (alignment - 1))),
+                  "alignment must be a power of 2");
+
+    unsigned mask = alignment - 1;
+    return (value + mask) & ~mask;
+}
+
+
+// Limit the given value to the provided range.  :)
+static inline float clamp(float v, float min, float max) {
+    if (v < min) return min;
+    if (v > max) return max;
+    return v;
+}
+
+
+static uint32_t yuvToRgbx(const unsigned char Y, const unsigned char Uin, const unsigned char Vin) {
+    // Don't use this if you want to see the best performance.  :)
+    // Better to do this in a pixel shader if we really have to, but on actual
+    // embedded hardware we expect to be able to texture directly from the YUV data
+    float U = Uin - 128.0f;
+    float V = Vin - 128.0f;
+
+    float Rf = Y + 1.140f*V;
+    float Gf = Y - 0.395f*U - 0.581f*V;
+    float Bf = Y + 2.032f*U;
+    unsigned char R = (unsigned char)clamp(Rf, 0.0f, 255.0f);
+    unsigned char G = (unsigned char)clamp(Gf, 0.0f, 255.0f);
+    unsigned char B = (unsigned char)clamp(Bf, 0.0f, 255.0f);
+
+    return (R      ) |
+           (G <<  8) |
+           (B << 16) |
+           0xFF000000;  // Fill the alpha channel with ones
+}
+
+
+void copyNV21toRGB32(unsigned width, unsigned height,
+                     uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels)
+{
+    // The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
+    // U/V array.  It assumes an even width and height for the overall image, and a horizontal
+    // stride that is an even multiple of 16 bytes for both the Y and UV arrays.
+    unsigned strideLum = align<16>(width);
+    unsigned sizeY = strideLum * height;
+    unsigned strideColor = strideLum;   // 1/2 the samples, but two interleaved channels
+    unsigned offsetUV = sizeY;
+
+    uint8_t* srcY = src;
+    uint8_t* srcUV = src+offsetUV;
+
+    for (unsigned r = 0; r < height; r++) {
+        // Note that we're walking the same UV row twice for even/odd luminance rows
+        uint8_t* rowY  = srcY  + r*strideLum;
+        uint8_t* rowUV = srcUV + (r/2 * strideColor);
+
+        uint32_t* rowDest = dst + r*dstStridePixels;
+
+        for (unsigned c = 0; c < width; c++) {
+            unsigned uCol = (c & ~1);   // uCol is always even and repeats 1:2 with Y values
+            unsigned vCol = uCol | 1;   // vCol is always odd
+            rowDest[c] = yuvToRgbx(rowY[c], rowUV[uCol], rowUV[vCol]);
+        }
+    }
+}
+
+
+void copyYV12toRGB32(unsigned width, unsigned height,
+                     uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels)
+{
+    // The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
+    // by another 1/2 x 1/2 V array.  It assumes an even width and height for the overall image,
+    // and a horizontal stride that is an even multiple of 16 bytes for each of the Y, U,
+    // and V arrays.
+    unsigned strideLum = align<16>(width);
+    unsigned sizeY = strideLum * height;
+    unsigned strideColor = align<16>(strideLum/2);
+    unsigned sizeColor = strideColor * height/2;
+    unsigned offsetU = sizeY;
+    unsigned offsetV = sizeY + sizeColor;
+
+    uint8_t* srcY = src;
+    uint8_t* srcU = src+offsetU;
+    uint8_t* srcV = src+offsetV;
+
+    for (unsigned r = 0; r < height; r++) {
+        // Note that we're walking the same U and V rows twice for even/odd luminance rows
+        uint8_t* rowY = srcY + r*strideLum;
+        uint8_t* rowU = srcU + (r/2 * strideColor);
+        uint8_t* rowV = srcV + (r/2 * strideColor);
+
+        uint32_t* rowDest = dst + r*dstStridePixels;
+
+        for (unsigned c = 0; c < width; c++) {
+            rowDest[c] = yuvToRgbx(rowY[c], rowU[c], rowV[c]);
+        }
+    }
+}
+
+
+void copyYUYVtoRGB32(unsigned width, unsigned height,
+                     uint8_t* src, unsigned srcStridePixels,
+                     uint32_t* dst, unsigned dstStridePixels)
+{
+    uint32_t* srcWords = (uint32_t*)src;
+
+    const int srcRowPadding32 = srcStridePixels/2 - width/2;  // 2 bytes per pixel, 4 bytes per word
+    const int dstRowPadding32 = dstStridePixels   - width;    // 4 bytes per pixel, 4 bytes per word
+
+    for (unsigned r = 0; r < height; r++) {
+        for (unsigned c = 0; c < width/2; c++) {
+            // Note:  we're walking two pixels at a time here (even/odd)
+            uint32_t srcPixel = *srcWords++;
+
+            uint8_t Y1 = (srcPixel)       & 0xFF;
+            uint8_t U  = (srcPixel >> 8)  & 0xFF;
+            uint8_t Y2 = (srcPixel >> 16) & 0xFF;
+            uint8_t V  = (srcPixel >> 24) & 0xFF;
+
+            // On the RGB output, we're writing one pixel at a time
+            *(dst+0) = yuvToRgbx(Y1, U, V);
+            *(dst+1) = yuvToRgbx(Y2, U, V);
+            dst += 2;
+        }
+
+        // Skip over any extra data or end of row alignment padding
+        srcWords += srcRowPadding32;
+        dst += dstRowPadding32;
+    }
+}
+
+
+void copyMatchedInterleavedFormats(unsigned width, unsigned height,
+                                   void* src, unsigned srcStridePixels,
+                                   void* dst, unsigned dstStridePixels,
+                                   unsigned pixelSize) {
+    for (unsigned row = 0; row < height; row++) {
+        // Copy the entire row of pixel data
+        memcpy(dst, src, width * pixelSize);
+
+        // Advance to the next row (keeping in mind that stride here is in units of pixels)
+        src = (uint8_t*)src + srcStridePixels * pixelSize;
+        dst = (uint8_t*)dst + dstStridePixels * pixelSize;
+    }
+}
diff --git a/evs/app/FormatConvert.h b/evs/app/FormatConvert.h
new file mode 100644
index 0000000..3ff1eec
--- /dev/null
+++ b/evs/app/FormatConvert.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EVS_VTS_FORMATCONVERT_H
+#define EVS_VTS_FORMATCONVERT_H
+
+#include <queue>
+#include <stdint.h>
+
+
+// Given an image buffer in NV21 format (HAL_PIXEL_FORMAT_YCRCB_420_SP), output 32bit RGBx values.
+// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
+// U/V array.  It assumes an even width and height for the overall image, and a horizontal
+// stride that is an even multiple of 16 bytes for both the Y and UV arrays.
+void copyNV21toRGB32(unsigned width, unsigned height,
+                     uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels);
+
+
+// Given an image buffer in YV12 format (HAL_PIXEL_FORMAT_YV12), output 32bit RGBx values.
+// The YV12 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 U array, followed
+// by another 1/2 x 1/2 V array.  It assumes an even width and height for the overall image,
+// and a horizontal stride that is an even multiple of 16 bytes for each of the Y, U,
+// and V arrays.
+void copyYV12toRGB32(unsigned width, unsigned height,
+                     uint8_t* src,
+                     uint32_t* dst, unsigned dstStridePixels);
+
+
+// Given an image buffer in YUYV format (HAL_PIXEL_FORMAT_YCBCR_422_I), output 32bit RGBx values.
+// The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleaved
+// U/V array.  It assumes an even width and height for the overall image, and a horizontal
+// stride that is an even multiple of 16 bytes for both the Y and UV arrays.
+void copyYUYVtoRGB32(unsigned width, unsigned height,
+                     uint8_t* src, unsigned srcStrideBytes,
+                     uint32_t* dst, unsigned dstStrideBytes);
+
+
+// Given an simple rectangular image buffer with an integer number of bytes per pixel,
+// copy the pixel values into a new rectangular buffer (potentially with a different stride).
+// This is typically used to copy RGBx data into an RGBx output buffer.
+void copyMatchedInterleavedFormats(unsigned width, unsigned height,
+                                   void* src, unsigned srcStridePixels,
+                                   void* dst, unsigned dstStridePixels,
+                                   unsigned pixelSize);
+
+#endif // EVS_VTS_FORMATCONVERT_H
diff --git a/evs/app/RenderBase.cpp b/evs/app/RenderBase.cpp
new file mode 100644
index 0000000..d8bb7f0
--- /dev/null
+++ b/evs/app/RenderBase.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RenderBase.h"
+#include "glError.h"
+
+#include <log/log.h>
+#include <ui/GraphicBuffer.h>
+
+// Eventually we shouldn't need this dependency, but for now the
+// graphics allocator interface isn't fully supported on all platforms
+// and this is our work around.
+using ::android::GraphicBuffer;
+
+
+// OpenGL state shared among all renderers
+EGLDisplay   RenderBase::sDisplay = EGL_NO_DISPLAY;
+EGLContext   RenderBase::sContext = EGL_NO_CONTEXT;
+EGLSurface   RenderBase::sDummySurface = EGL_NO_SURFACE;
+GLuint       RenderBase::sFrameBuffer = -1;
+GLuint       RenderBase::sColorBuffer = -1;
+GLuint       RenderBase::sDepthBuffer = -1;
+EGLImageKHR  RenderBase::sKHRimage = EGL_NO_IMAGE_KHR;
+unsigned     RenderBase::sWidth  = 0;
+unsigned     RenderBase::sHeight = 0;
+
+
+
+bool RenderBase::prepareGL() {
+    // Just trivially return success if we're already prepared
+    if (sDisplay != EGL_NO_DISPLAY) {
+        return true;
+    }
+
+    // Hardcoded to RGBx output display
+    const EGLint config_attribs[] = {
+        // Tag                  Value
+        EGL_RENDERABLE_TYPE,    EGL_OPENGL_ES2_BIT,
+        EGL_RED_SIZE,           8,
+        EGL_GREEN_SIZE,         8,
+        EGL_BLUE_SIZE,          8,
+        EGL_NONE
+    };
+
+    // Select OpenGL ES v 3
+    const EGLint context_attribs[] = {EGL_CONTEXT_CLIENT_VERSION, 3, EGL_NONE};
+
+
+    // Set up our OpenGL ES context associated with the default display (though we won't be visible)
+    EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+    if (display == EGL_NO_DISPLAY) {
+        ALOGE("Failed to get egl display");
+        return false;
+    }
+
+    EGLint major = 0;
+    EGLint minor = 0;
+    if (!eglInitialize(display, &major, &minor)) {
+        ALOGE("Failed to initialize EGL: %s", getEGLError());
+        return false;
+    } else {
+        ALOGI("Intiialized EGL at %d.%d", major, minor);
+    }
+
+
+    // Select the configuration that "best" matches our desired characteristics
+    EGLConfig egl_config;
+    EGLint num_configs;
+    if (!eglChooseConfig(display, config_attribs, &egl_config, 1, &num_configs)) {
+        ALOGE("eglChooseConfig() failed with error: %s", getEGLError());
+        return false;
+    }
+
+
+    // Create a dummy pbuffer so we have a surface to bind -- we never intend to draw to this
+    // because attachRenderTarget will be called first.
+    EGLint surface_attribs[] = { EGL_WIDTH, 1, EGL_HEIGHT, 1, EGL_NONE };
+    sDummySurface = eglCreatePbufferSurface(display, egl_config, surface_attribs);
+    if (sDummySurface == EGL_NO_SURFACE) {
+        ALOGE("Failed to create OpenGL ES Dummy surface: %s", getEGLError());
+        return false;
+    } else {
+        ALOGI("Dummy surface looks good!  :)");
+    }
+
+
+    //
+    // Create the EGL context
+    //
+    EGLContext context = eglCreateContext(display, egl_config, EGL_NO_CONTEXT, context_attribs);
+    if (context == EGL_NO_CONTEXT) {
+        ALOGE("Failed to create OpenGL ES Context: %s", getEGLError());
+        return false;
+    }
+
+
+    // Activate our render target for drawing
+    if (!eglMakeCurrent(display, sDummySurface, sDummySurface, context)) {
+        ALOGE("Failed to make the OpenGL ES Context current: %s", getEGLError());
+        return false;
+    } else {
+        ALOGI("We made our context current!  :)");
+    }
+
+
+    // Report the extensions available on this implementation
+    const char* gl_extensions = (const char*) glGetString(GL_EXTENSIONS);
+    ALOGI("GL EXTENSIONS:\n  %s", gl_extensions);
+
+
+    // Reserve handles for the color and depth targets we'll be setting up
+    glGenRenderbuffers(1, &sColorBuffer);
+    glGenRenderbuffers(1, &sDepthBuffer);
+
+    // Set up the frame buffer object we can modify and use for off screen rendering
+    glGenFramebuffers(1, &sFrameBuffer);
+    glBindFramebuffer(GL_FRAMEBUFFER, sFrameBuffer);
+
+
+    // Now that we're assured success, store object handles we constructed
+    sDisplay = display;
+    sContext = context;
+
+    return true;
+}
+
+
+bool RenderBase::attachRenderTarget(const BufferDesc& tgtBuffer) {
+    // Hardcoded to RGBx for now
+    if (tgtBuffer.format != HAL_PIXEL_FORMAT_RGBA_8888) {
+        ALOGE("Unsupported target buffer format");
+        return false;
+    }
+
+    // create a GraphicBuffer from the existing handle
+    sp<GraphicBuffer> pGfxBuffer = new GraphicBuffer(tgtBuffer.memHandle,
+                                                     GraphicBuffer::CLONE_HANDLE,
+                                                     tgtBuffer.width, tgtBuffer.height,
+                                                     tgtBuffer.format, 1, // layer count
+                                                     GRALLOC_USAGE_HW_RENDER,
+                                                     tgtBuffer.stride);
+    if (pGfxBuffer.get() == nullptr) {
+        ALOGE("Failed to allocate GraphicBuffer to wrap image handle");
+        return false;
+    }
+
+    // Get a GL compatible reference to the graphics buffer we've been given
+    EGLint eglImageAttributes[] = {EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE};
+    EGLClientBuffer clientBuf = static_cast<EGLClientBuffer>(pGfxBuffer->getNativeBuffer());
+    sKHRimage = eglCreateImageKHR(sDisplay, EGL_NO_CONTEXT,
+                                  EGL_NATIVE_BUFFER_ANDROID, clientBuf,
+                                  eglImageAttributes);
+    if (sKHRimage == EGL_NO_IMAGE_KHR) {
+        ALOGE("error creating EGLImage for target buffer: %s", getEGLError());
+        return false;
+    }
+
+    // Construct a render buffer around the external buffer
+    glBindRenderbuffer(GL_RENDERBUFFER, sColorBuffer);
+    glEGLImageTargetRenderbufferStorageOES(GL_RENDERBUFFER, static_cast<GLeglImageOES>(sKHRimage));
+    if (eglGetError() != EGL_SUCCESS) {
+        ALOGI("glEGLImageTargetRenderbufferStorageOES => %s", getEGLError());
+        return false;
+    }
+
+    glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, sColorBuffer);
+    if (eglGetError() != EGL_SUCCESS) {
+        ALOGE("glFramebufferRenderbuffer => %s", getEGLError());
+        return false;
+    }
+
+#if 0  // Do we need a z-buffer?  Does this code work correctly?
+    // Request a (local) depth buffer so we can z-test while drawing
+    glBindRenderbuffer(GL_RENDERBUFFER, sDepthBuffer);
+    if ((sWidth != tgtBuffer.width) || (sHeight != tgtBuffer.height)) {
+        // We can't reuse the depth buffer, so make a new one
+        sWidth = tgtBuffer.width;
+        sHeight = tgtBuffer.height;
+        glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, sWidth, sHeight);
+        glFramebufferRenderbuffer(GL_FRAMEBUFFER,
+                                  GL_DEPTH_ATTACHMENT,
+                                  GL_RENDERBUFFER,
+                                  sDepthBuffer);
+    }
+#endif
+
+    GLenum checkResult = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+    if (checkResult != GL_FRAMEBUFFER_COMPLETE) {
+        ALOGE("Offscreen framebuffer not configured successfully (%d: %s)",
+              checkResult, getGLFramebufferError());
+        return false;
+    }
+
+    return true;
+}
+
+
+void RenderBase::detachRenderTarget() {
+    // Drop our external render target
+    if (sKHRimage != EGL_NO_IMAGE_KHR) {
+        eglDestroyImageKHR(sDisplay, sKHRimage);
+        sKHRimage = EGL_NO_IMAGE_KHR;
+    }
+}
\ No newline at end of file
diff --git a/evs/app/RenderBase.h b/evs/app/RenderBase.h
new file mode 100644
index 0000000..df34639
--- /dev/null
+++ b/evs/app/RenderBase.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAR_EVS_APP_RENDERBASE_H
+#define CAR_EVS_APP_RENDERBASE_H
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES3/gl3.h>
+#include <GLES3/gl3ext.h>
+
+#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+
+using namespace ::android::hardware::automotive::evs::V1_0;
+using ::android::sp;
+
+
+/*
+ * Abstract base class for the workhorse classes that handle the user interaction and display for
+ * each mode of the EVS application.
+ */
+class RenderBase {
+public:
+    virtual ~RenderBase() {};
+
+    virtual bool activate() = 0;
+    virtual void deactivate() = 0;
+
+    virtual bool drawFrame(const BufferDesc& tgtBuffer) = 0;
+
+protected:
+    static bool prepareGL();
+
+    static bool attachRenderTarget(const BufferDesc& tgtBuffer);
+    static void detachRenderTarget();
+
+    // OpenGL state shared among all renderers
+    static EGLDisplay   sDisplay;
+    static EGLContext   sContext;
+    static EGLSurface   sDummySurface;
+    static GLuint       sFrameBuffer;
+    static GLuint       sColorBuffer;
+    static GLuint       sDepthBuffer;
+
+    static EGLImageKHR  sKHRimage;
+
+    static unsigned     sWidth;
+    static unsigned     sHeight;
+};
+
+
+#endif //CAR_EVS_APP_RENDERBASE_H
diff --git a/evs/app/RenderDirectView.cpp b/evs/app/RenderDirectView.cpp
new file mode 100644
index 0000000..2f89d3b
--- /dev/null
+++ b/evs/app/RenderDirectView.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RenderDirectView.h"
+#include "VideoTex.h"
+#include "glError.h"
+#include "shader.h"
+#include "shader_simpleTex.h"
+#include "shader_testColors.h"
+
+#include <log/log.h>
+
+#include <math/mat4.h>
+
+
+RenderDirectView::RenderDirectView(sp<IEvsEnumerator> enumerator,
+                                   const ConfigManager::CameraInfo& cam) {
+    mEnumerator = enumerator;
+    mCameraInfo = cam;
+}
+
+
+bool RenderDirectView::activate() {
+    // Ensure GL is ready to go...
+    if (!prepareGL()) {
+        ALOGE("Error initializing GL");
+        return false;
+    }
+
+    // Load our shader program if we don't have it already
+    if (!mShaderProgram) {
+        mShaderProgram = buildShaderProgram(vtxShader_simpleTexture,
+                                            pixShader_simpleTexture,
+                                            "simpleTexture");
+        if (!mShaderProgram) {
+            ALOGE("Error buliding shader program");
+            return false;
+        }
+    }
+
+    // TODO:  Remove this once we're done testing with it
+    if (!mShaderTestColors) {
+        mShaderTestColors = buildShaderProgram(vtxShader_testColors,
+                                               pixShader_testColors,
+                                               "testColors");
+        if (!mShaderTestColors) {
+            ALOGE("Error building shader program");
+            return false;
+        }
+    }
+
+    // Construct our video texture
+    mTexture.reset(createVideoTexture(mEnumerator, mCameraInfo.cameraId.c_str(), sDisplay));
+    if (!mTexture) {
+        ALOGE("Failed to set up video texture for %s (%s)",
+              mCameraInfo.cameraId.c_str(), mCameraInfo.function.c_str());
+        return false;
+    }
+
+    return true;
+}
+
+
+void RenderDirectView::deactivate() {
+    // Release our video texture
+    // We can't hold onto it because some other Render object might need the same camera
+    // TODO:  If start/stop costs become a problem, we could share video textures
+    mTexture = nullptr;
+}
+
+
+bool RenderDirectView::drawFrame(const BufferDesc& tgtBuffer) {
+    // Tell GL to render to the given buffer
+    if (!attachRenderTarget(tgtBuffer)) {
+        ALOGE("Failed to attached render target");
+        return false;
+    }
+
+    // Set the viewport
+    glViewport(0, 0, tgtBuffer.width, tgtBuffer.height);
+
+#if 0   // We don't actually need the clear if we're going to cover the whole screen anyway
+    // Clear the color buffer
+    glClearColor(0.8f, 0.1f, 0.2f, 1.0f);
+    glClear(GL_COLOR_BUFFER_BIT);
+#endif
+
+    // Select our screen space simple texture shader
+    glUseProgram(mShaderProgram);
+
+    // Set up the model to clip space transform (identity matrix if we're modeling in screen space)
+    GLint loc = glGetUniformLocation(mShaderProgram, "cameraMat");
+    if (loc < 0) {
+        ALOGE("Couldn't set shader parameter 'cameraMat'");
+        return false;
+    } else {
+        const android::mat4 identityMatrix;
+        glUniformMatrix4fv(loc, 1, false, &identityMatrix[0][0]);
+    }
+
+
+    // Bind the texture and assign it to the shader's sampler
+    mTexture->refresh();
+    glActiveTexture(GL_TEXTURE0);
+    glBindTexture(GL_TEXTURE_2D, mTexture->glId());
+
+
+#if 0
+    static TexWrapper* sTestTexture = createTextureFromPng("CarFromTop.png");
+    if (sTestTexture) {
+        static int tickTock = 0;
+        tickTock =~tickTock;
+        if (tickTock) {
+            printf("tick...");
+            glBindTexture(GL_TEXTURE_2D, sTestTexture->glId());
+        } else {
+            printf("tock\n");
+        }
+    }
+#endif
+
+    GLint sampler = glGetUniformLocation(mShaderProgram, "tex");
+    if (sampler < 0) {
+        ALOGE("Couldn't set shader parameter 'tex'");
+        return false;
+    } else {
+        // Tell the sampler we looked up from the shader to use texture slot 0 as its source
+        glUniform1i(sampler, 0);
+    }
+
+    // We want our image to show up opaque regardless of alpha values
+    glDisable(GL_BLEND);
+
+
+    // Draw a rectangle on the screen
+    // TODO:  We pulled in from the edges for now for diagnostic purposes...
+#if 1
+    GLfloat vertsCarPos[] = { -1.0,  1.0, 0.0f,   // left top in window space
+                               1.0,  1.0, 0.0f,   // right top
+                              -1.0, -1.0, 0.0f,   // left bottom
+                               1.0, -1.0, 0.0f    // right bottom
+    };
+#else
+    GLfloat vertsCarPos[] = { -0.8,  0.8, 0.0f,   // left top in window space
+                               0.8,  0.8, 0.0f,   // right top
+                              -0.8, -0.8, 0.0f,   // left bottom
+                               0.8, -0.8, 0.0f    // right bottom
+    };
+#endif
+    // TODO:  We're flipping horizontally here, but should do it only for specified cameras!
+    GLfloat vertsCarTex[] = { 1.0f, 1.0f,   // left top
+                              0.0f, 1.0f,   // right top
+                              1.0f, 0.0f,   // left bottom
+                              0.0f, 0.0f    // right bottom
+    };
+    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertsCarPos);
+    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, vertsCarTex);
+    glEnableVertexAttribArray(0);
+    glEnableVertexAttribArray(1);
+
+    glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
+
+    glDisableVertexAttribArray(0);
+    glDisableVertexAttribArray(1);
+
+
+    // Wait for the rendering to finish
+    glFinish();
+
+    return true;
+}
diff --git a/evs/app/RenderDirectView.h b/evs/app/RenderDirectView.h
new file mode 100644
index 0000000..c452297
--- /dev/null
+++ b/evs/app/RenderDirectView.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAR_EVS_APP_RENDERDIRECTVIEW_H
+#define CAR_EVS_APP_RENDERDIRECTVIEW_H
+
+
+#include "RenderBase.h"
+
+#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+#include "ConfigManager.h"
+#include "VideoTex.h"
+
+
+using namespace ::android::hardware::automotive::evs::V1_0;
+
+
+/*
+ * Renders the view from a single specified camera directly to the full display.
+ */
+class RenderDirectView: public RenderBase {
+public:
+    RenderDirectView(sp<IEvsEnumerator> enumerator, const ConfigManager::CameraInfo& cam);
+
+    virtual bool activate();
+    virtual void deactivate();
+
+    virtual bool drawFrame(const BufferDesc& tgtBuffer);
+
+protected:
+    sp<IEvsEnumerator>              mEnumerator;
+    ConfigManager::CameraInfo       mCameraInfo;
+
+    std::unique_ptr<VideoTex>       mTexture;
+
+    GLuint                          mShaderProgram = 0;
+    GLuint                          mShaderTestColors = 0;
+};
+
+
+#endif //CAR_EVS_APP_RENDERDIRECTVIEW_H
diff --git a/evs/app/RenderPixelCopy.cpp b/evs/app/RenderPixelCopy.cpp
new file mode 100644
index 0000000..0a586a4
--- /dev/null
+++ b/evs/app/RenderPixelCopy.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RenderPixelCopy.h"
+#include "FormatConvert.h"
+
+#include <log/log.h>
+
+
+RenderPixelCopy::RenderPixelCopy(sp<IEvsEnumerator> enumerator,
+                                   const ConfigManager::CameraInfo& cam) {
+    mEnumerator = enumerator;
+    mCameraInfo = cam;
+}
+
+
+bool RenderPixelCopy::activate() {
+    // Set up the camera to feed this texture
+    sp<IEvsCamera> pCamera = mEnumerator->openCamera(mCameraInfo.cameraId.c_str());
+    if (pCamera.get() == nullptr) {
+        ALOGE("Failed to allocate new EVS Camera interface");
+        return false;
+    }
+
+    // Initialize the stream that will help us update this texture's contents
+    sp<StreamHandler> pStreamHandler = new StreamHandler(pCamera);
+    if (pStreamHandler.get() == nullptr) {
+        ALOGE("failed to allocate FrameHandler");
+        return false;
+    }
+
+    // Start the video stream
+    if (!pStreamHandler->startStream()) {
+        ALOGE("start stream failed");
+        return false;
+    }
+
+    mStreamHandler = pStreamHandler;
+
+    return true;
+}
+
+
+void RenderPixelCopy::deactivate() {
+    mStreamHandler = nullptr;
+}
+
+
+bool RenderPixelCopy::drawFrame(const BufferDesc& tgtBuffer) {
+    bool success = true;
+
+    sp<android::GraphicBuffer> tgt = new android::GraphicBuffer(
+            tgtBuffer.memHandle, android::GraphicBuffer::CLONE_HANDLE,
+            tgtBuffer.width, tgtBuffer.height, tgtBuffer.format, 1, tgtBuffer.usage,
+            tgtBuffer.stride);
+
+    // Lock our target buffer for writing (should be RGBA8888 format)
+    uint32_t* tgtPixels = nullptr;
+    tgt->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)&tgtPixels);
+
+    if (tgtPixels) {
+        if (tgtBuffer.format != HAL_PIXEL_FORMAT_RGBA_8888) {
+            // We always expect 32 bit RGB for the display output for now.  Is there a need for 565?
+            ALOGE("Diplay buffer is always expected to be 32bit RGBA");
+            success = false;
+        } else {
+            // Make sure we have the latest frame data
+            if (mStreamHandler->newFrameAvailable()) {
+                const BufferDesc& srcBuffer = mStreamHandler->getNewFrame();
+
+                // Lock our source buffer for reading (current expectation are for this to be NV21 format)
+                sp<android::GraphicBuffer> src = new android::GraphicBuffer(
+                        srcBuffer.memHandle, android::GraphicBuffer::CLONE_HANDLE,
+                        srcBuffer.width, srcBuffer.height, srcBuffer.format, 1, srcBuffer.usage,
+                        srcBuffer.stride);
+                unsigned char* srcPixels = nullptr;
+                src->lock(GRALLOC_USAGE_SW_READ_OFTEN, (void**)&srcPixels);
+                if (!srcPixels) {
+                    ALOGE("Failed to get pointer into src image data");
+                }
+
+                // Make sure we don't run off the end of either buffer
+                const unsigned width     = std::min(tgtBuffer.width,
+                                                    srcBuffer.width);
+                const unsigned height    = std::min(tgtBuffer.height,
+                                                    srcBuffer.height);
+
+                if (srcBuffer.format == HAL_PIXEL_FORMAT_YCRCB_420_SP) {   // 420SP == NV21
+                    copyNV21toRGB32(width, height,
+                                    srcPixels,
+                                    tgtPixels, tgtBuffer.stride);
+                } else if (srcBuffer.format == HAL_PIXEL_FORMAT_YV12) { // YUV_420P == YV12
+                    copyYV12toRGB32(width, height,
+                                    srcPixels,
+                                    tgtPixels, tgtBuffer.stride);
+                } else if (srcBuffer.format == HAL_PIXEL_FORMAT_YCBCR_422_I) { // YUYV
+                    copyYUYVtoRGB32(width, height,
+                                    srcPixels, srcBuffer.stride,
+                                    tgtPixels, tgtBuffer.stride);
+                } else if (srcBuffer.format == tgtBuffer.format) {  // 32bit RGBA
+                    copyMatchedInterleavedFormats(width, height,
+                                                  srcPixels, srcBuffer.stride,
+                                                  tgtPixels, tgtBuffer.stride,
+                                                  tgtBuffer.pixelSize);
+                }
+
+                mStreamHandler->doneWithFrame(srcBuffer);
+            }
+        }
+    } else {
+        ALOGE("Failed to lock buffer contents for contents transfer");
+        success = false;
+    }
+
+    if (tgtPixels) {
+        tgt->unlock();
+    }
+
+    return success;
+}
diff --git a/evs/app/RenderPixelCopy.h b/evs/app/RenderPixelCopy.h
new file mode 100644
index 0000000..ff3917a
--- /dev/null
+++ b/evs/app/RenderPixelCopy.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAR_EVS_APP_RENDERPIXELCOPY_H
+#define CAR_EVS_APP_RENDERPIXELCOPY_H
+
+
+#include "RenderBase.h"
+
+#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+#include "ConfigManager.h"
+#include "VideoTex.h"
+
+
+using namespace ::android::hardware::automotive::evs::V1_0;
+
+
+/*
+ * Renders the view from a single specified camera directly to the full display.
+ */
+class RenderPixelCopy: public RenderBase {
+public:
+    RenderPixelCopy(sp<IEvsEnumerator> enumerator, const ConfigManager::CameraInfo& cam);
+
+    virtual bool activate();
+    virtual void deactivate();
+
+    virtual bool drawFrame(const BufferDesc& tgtBuffer);
+
+protected:
+    sp<IEvsEnumerator>              mEnumerator;
+    ConfigManager::CameraInfo       mCameraInfo;
+
+    sp<StreamHandler>               mStreamHandler;
+};
+
+
+#endif //CAR_EVS_APP_RENDERPIXELCOPY_H
diff --git a/evs/app/StreamHandler.cpp b/evs/app/StreamHandler.cpp
index ee50e96..5477642 100644
--- a/evs/app/StreamHandler.cpp
+++ b/evs/app/StreamHandler.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,40 +14,52 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "EvsTest"
+#define LOG_TAG "EVSAPP"
 
 #include "StreamHandler.h"
 
 #include <stdio.h>
 #include <string.h>
 
-#include <android/log.h>
+#include <log/log.h>
 #include <cutils/native_handle.h>
-#include <ui/GraphicBuffer.h>
-
-#include <algorithm>    // std::min
 
 
-// For the moment, we're assuming that the underlying EVS driver we're working with
-// is providing 4 byte RGBx data.  This is fine for loopback testing, although
-// real hardware is expected to provide YUV data -- most likly formatted as YV12
-static const unsigned kBytesPerPixel = 4;   // assuming 4 byte RGBx pixels
-
-
-StreamHandler::StreamHandler(android::sp<IEvsCamera> pCamera, android::sp<IEvsDisplay> pDisplay) :
-    mCamera(pCamera),
-    mDisplay(pDisplay) {
+StreamHandler::StreamHandler(android::sp <IEvsCamera> pCamera) :
+    mCamera(pCamera)
+{
+    // We rely on the camera having at least two buffers available since we'll hold one and
+    // expect the camera to be able to capture a new image in the background.
+    pCamera->setMaxFramesInFlight(2);
 }
 
 
-void StreamHandler::startStream() {
-    // Mark ourselves as running
-    mLock.lock();
-    mRunning = true;
-    mLock.unlock();
+void StreamHandler::shutdown()
+{
+    // Make sure we're not still streaming
+    blockingStopStream();
 
-    // Tell the camera to start streaming
-    mCamera->startVideoStream(this);
+    // At this point, the receiver thread is no longer running, so we can safely drop
+    // our remote object references so they can be freed
+    mCamera = nullptr;
+}
+
+
+bool StreamHandler::startStream() {
+    std::unique_lock<std::mutex> lock(mLock);
+
+    if (!mRunning) {
+        // Tell the camera to start streaming
+        Return <EvsResult> result = mCamera->startVideoStream(this);
+        if (result != EvsResult::OK) {
+            return false;
+        }
+
+        // Mark ourselves as running
+        mRunning = true;
+    }
+
+    return true;
 }
 
 
@@ -64,7 +76,9 @@
 
     // Wait until the stream has actually stopped
     std::unique_lock<std::mutex> lock(mLock);
-    mSignal.wait(lock, [this](){ return !mRunning; });
+    if (mRunning) {
+        mSignal.wait(lock, [this]() { return !mRunning; });
+    }
 }
 
 
@@ -74,146 +88,80 @@
 }
 
 
-unsigned StreamHandler::getFramesReceived() {
+bool StreamHandler::newFrameAvailable() {
     std::unique_lock<std::mutex> lock(mLock);
-    return mFramesReceived;
-};
-
-
-unsigned StreamHandler::getFramesCompleted() {
-    std::unique_lock<std::mutex> lock(mLock);
-    return mFramesCompleted;
-};
-
-
-Return<void> StreamHandler::deliverFrame(const BufferDesc& bufferArg) {
-    ALOGD("Received a frame from the camera (%p)", bufferArg.memHandle.getNativeHandle());
-
-    // Local flag we use to keep track of when the stream is stopping
-    bool timeToStop = false;
-
-    if (bufferArg.memHandle.getNativeHandle() == nullptr) {
-        // Signal that the last frame has been received and that the stream should stop
-        timeToStop = true;
-        ALOGI("End of stream signaled");
-    } else {
-        // Get the output buffer we'll use to display the imagery
-        BufferDesc tgtBuffer = {};
-        mDisplay->getTargetBuffer([&tgtBuffer]
-                                  (const BufferDesc& buff) {
-                                      tgtBuffer = buff;
-                                      ALOGD("Got output buffer (%p) with id %d cloned as (%p)",
-                                            buff.memHandle.getNativeHandle(),
-                                            tgtBuffer.bufferId,
-                                            tgtBuffer.memHandle.getNativeHandle());
-                                  }
-        );
-
-        if (tgtBuffer.memHandle == nullptr) {
-            printf("Didn't get target buffer - frame lost\n");
-            ALOGE("Didn't get requested output buffer -- skipping this frame.");
-        } else {
-            // Copy the contents of the of buffer.memHandle into tgtBuffer
-            copyBufferContents(tgtBuffer, bufferArg);
-
-            // TODO:  Add a bit of overlay graphics?
-            // TODO:  Use OpenGL to render from texture?
-            // NOTE:  If we mess with the frame contents, we'll need to update the frame inspection
-            //        logic in the default (test) display driver.
-
-            // Send the target buffer back for display
-            ALOGD("Calling returnTargetBufferForDisplay (%p)",
-                  tgtBuffer.memHandle.getNativeHandle());
-            Return<EvsResult> result = mDisplay->returnTargetBufferForDisplay(tgtBuffer);
-            if (!result.isOk()) {
-                printf("HIDL error on display buffer (%s)- frame lost\n",
-                       result.description().c_str());
-                ALOGE("Error making the remote function call.  HIDL said %s",
-                      result.description().c_str());
-            } else if (result != EvsResult::OK) {
-                printf("Display reported error - frame lost\n");
-                ALOGE("We encountered error %d when returning a buffer to the display!",
-                      (EvsResult)result);
-            } else {
-                // Everything looks good!  Keep track so tests or watch dogs can monitor progress
-                mLock.lock();
-                mFramesCompleted++;
-                mLock.unlock();
-                printf("frame OK\n");
-            }
-        }
-
-        // Send the camera buffer back now that we're done with it
-        ALOGD("Calling doneWithFrame");
-        // TODO:  Why is it that we get a HIDL crash if we pass back the cloned buffer?
-        mCamera->doneWithFrame(bufferArg);
-
-        ALOGD("Frame handling complete");
-    }
-
-
-    // Update our received frame count and notify anybody who cares that things have changed
-    mLock.lock();
-    if (timeToStop) {
-        mRunning = false;
-    } else {
-        mFramesReceived++;
-    }
-    mLock.unlock();
-    mSignal.notify_all();
-
-
-    return Void();
+    return (mReadyBuffer >= 0);
 }
 
 
-bool StreamHandler::copyBufferContents(const BufferDesc& tgtBuffer,
-                                       const BufferDesc& srcBuffer) {
-    bool success = true;
+const BufferDesc& StreamHandler::getNewFrame() {
+    std::unique_lock<std::mutex> lock(mLock);
 
-    // Make sure we don't run off the end of either buffer
-    const unsigned width     = std::min(tgtBuffer.width,
-                                        srcBuffer.width);
-    const unsigned height    = std::min(tgtBuffer.height,
-                                        srcBuffer.height);
-
-    sp<android::GraphicBuffer> tgt = new android::GraphicBuffer(
-            tgtBuffer.memHandle, android::GraphicBuffer::CLONE_HANDLE,
-            tgtBuffer.width, tgtBuffer.height, tgtBuffer.format, 1,
-            tgtBuffer.usage, tgtBuffer.stride);
-    sp<android::GraphicBuffer> src = new android::GraphicBuffer(
-            srcBuffer.memHandle, android::GraphicBuffer::CLONE_HANDLE,
-            srcBuffer.width, srcBuffer.height, srcBuffer.format, 1,
-            srcBuffer.usage, srcBuffer.stride);
-
-    // Lock our source buffer for reading
-    unsigned char* srcPixels = nullptr;
-    src->lock(GRALLOC_USAGE_SW_READ_OFTEN, (void **) &srcPixels);
-
-    // Lock our target buffer for writing
-    unsigned char* tgtPixels = nullptr;
-    tgt->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void **) &tgtPixels);
-
-    if (srcPixels && tgtPixels) {
-        for (unsigned row = 0; row < height; row++) {
-            // Copy the entire row of pixel data
-            memcpy(tgtPixels, srcPixels, width * kBytesPerPixel);
-
-            // Advance to the next row (keeping in mind that stride here is in units of pixels)
-            tgtPixels += tgtBuffer.stride * kBytesPerPixel;
-            srcPixels += srcBuffer.stride * kBytesPerPixel;
-        }
+    if (mHeldBuffer >= 0) {
+        ALOGE("Ignored call for new frame while still holding the old one.");
     } else {
-        ALOGE("Failed to copy buffer contents");
-        success = false;
+        if (mReadyBuffer < 0) {
+            ALOGE("Returning invalid buffer because we don't have any.  Call newFrameAvailable first?");
+            mReadyBuffer = 0;   // This is a lie!
+        }
+
+        // Move the ready buffer into the held position, and clear the ready position
+        mHeldBuffer = mReadyBuffer;
+        mReadyBuffer = -1;
     }
 
-    if (srcPixels) {
-        src->unlock();
-    }
-    if (tgtPixels) {
-        tgt->unlock();
+    return mBuffers[mHeldBuffer];
+}
+
+
+void StreamHandler::doneWithFrame(const BufferDesc& buffer) {
+    std::unique_lock<std::mutex> lock(mLock);
+
+    // We better be getting back the buffer we original delivered!
+    if ((mHeldBuffer < 0) || (buffer.bufferId != mBuffers[mHeldBuffer].bufferId)) {
+        ALOGE("StreamHandler::doneWithFrame got an unexpected buffer!");
     }
 
-    return success;
+    // Send the buffer back to the underlying camera
+    mCamera->doneWithFrame(mBuffers[mHeldBuffer]);
+
+    // Clear the held position
+    mHeldBuffer = -1;
+}
+
+
+Return<void> StreamHandler::deliverFrame(const BufferDesc& buffer) {
+    ALOGD("Received a frame from the camera (%p)", buffer.memHandle.getNativeHandle());
+
+    // Take the lock to protect our frame slots and running state variable
+    {
+        std::unique_lock <std::mutex> lock(mLock);
+
+        if (buffer.memHandle.getNativeHandle() == nullptr) {
+            // Signal that the last frame has been received and the stream is stopped
+            mRunning = false;
+        } else {
+            // Do we already have a "ready" frame?
+            if (mReadyBuffer >= 0) {
+                // Send the previously saved buffer back to the camera unused
+                mCamera->doneWithFrame(mBuffers[mReadyBuffer]);
+
+                // We'll reuse the same ready buffer index
+            } else if (mHeldBuffer >= 0) {
+                // The client is holding a buffer, so use the other slot for "on deck"
+                mReadyBuffer = 1 - mHeldBuffer;
+            } else {
+                // This is our first buffer, so just pick a slot
+                mReadyBuffer = 0;
+            }
+
+            // Save this frame until our client is interested in it
+            mBuffers[mReadyBuffer] = buffer;
+        }
+    }
+
+    // Notify anybody who cares that things have changed
+    mSignal.notify_all();
+
+    return Void();
 }
diff --git a/evs/app/StreamHandler.h b/evs/app/StreamHandler.h
index eb2f6ce..9e1d3b7 100644
--- a/evs/app/StreamHandler.h
+++ b/evs/app/StreamHandler.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,8 +14,12 @@
  * limitations under the License.
  */
 
-#ifndef CAR_EVS_APP_STREAMHANDLER_H
-#define CAR_EVS_APP_STREAMHANDLER_H
+#ifndef EVS_VTS_STREAMHANDLER_H
+#define EVS_VTS_STREAMHANDLER_H
+
+#include <queue>
+
+#include "ui/GraphicBuffer.h"
 
 #include <android/hardware/automotive/evs/1.0/IEvsCameraStream.h>
 #include <android/hardware/automotive/evs/1.0/IEvsCamera.h>
@@ -29,38 +33,49 @@
 using ::android::sp;
 
 
+/*
+ * StreamHandler:
+ * This class can be used to receive camera imagery from an IEvsCamera implementation.  It will
+ * hold onto the most recent image buffer, returning older ones.
+ * Note that the video frames are delivered on a background thread, while the control interface
+ * is actuated from the applications foreground thread.
+ */
 class StreamHandler : public IEvsCameraStream {
 public:
-    StreamHandler(android::sp <IEvsCamera>  pCamera,
-                  android::sp <IEvsDisplay> pDisplay);
+    virtual ~StreamHandler() { shutdown(); };
 
-    void startStream();
+    StreamHandler(android::sp <IEvsCamera> pCamera);
+    void shutdown();
+
+    bool startStream();
     void asyncStopStream();
     void blockingStopStream();
 
     bool isRunning();
 
-    unsigned getFramesReceived();
-    unsigned getFramesCompleted();
+    bool newFrameAvailable();
+    const BufferDesc& getNewFrame();
+    void doneWithFrame(const BufferDesc& buffer);
 
 private:
     // Implementation for ::android::hardware::automotive::evs::V1_0::ICarCameraStream
     Return<void> deliverFrame(const BufferDesc& buffer)  override;
 
-    // Local implementation details
-    bool copyBufferContents(const BufferDesc& tgtBuffer, const BufferDesc& srcBuffer);
-
+    // Values initialized as startup
     android::sp <IEvsCamera>    mCamera;
-    android::sp <IEvsDisplay>   mDisplay;
 
+    // Since we get frames delivered to us asnchronously via the ICarCameraStream interface,
+    // we need to protect all member variables that may be modified while we're streaming
+    // (ie: those below)
     std::mutex                  mLock;
     std::condition_variable     mSignal;
 
     bool                        mRunning = false;
 
-    unsigned                    mFramesReceived = 0;    // Simple counter -- rolls over eventually!
-    unsigned                    mFramesCompleted = 0;   // Simple counter -- rolls over eventually!
+    BufferDesc                  mBuffers[2];
+    int                         mHeldBuffer = -1;   // Index of the one currently held by the client
+    int                         mReadyBuffer = -1;  // Index of the newest available buffer
 };
 
 
-#endif //CAR_EVS_APP_STREAMHANDLER_H
+#endif //EVS_VTS_STREAMHANDLER_H
diff --git a/evs/app/TexWrapper.cpp b/evs/app/TexWrapper.cpp
new file mode 100644
index 0000000..7ec2191
--- /dev/null
+++ b/evs/app/TexWrapper.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "TexWrapper.h"
+#include "glError.h"
+
+#include "log/log.h"
+
+#include <fcntl.h>
+#include <malloc.h>
+#include <png.h>
+
+
+/* Create an new empty GL texture that will be filled later */
+TexWrapper::TexWrapper() {
+    GLuint textureId;
+    glGenTextures(1, &textureId);
+    if (textureId <= 0) {
+        ALOGE("Didn't get a texture handle allocated: %s", getEGLError());
+    } else {
+        // Store the basic texture properties
+        id = textureId;
+        w  = 0;
+        h  = 0;
+    }
+}
+
+
+/* Wrap a texture that already allocated.  The wrapper takes ownership. */
+TexWrapper::TexWrapper(GLuint textureId, unsigned width, unsigned height) {
+    // Store the basic texture properties
+    id = textureId;
+    w  = width;
+    h  = height;
+}
+
+
+TexWrapper::~TexWrapper() {
+    // Give the texture ID back
+    if (id > 0) {
+        glDeleteTextures(1, &id);
+    }
+    id = -1;
+}
+
+
+/* Factory to build TexWrapper objects from a given PNG file */
+TexWrapper* createTextureFromPng(const char * filename)
+{
+    // Open the PNG file
+    FILE *inputFile = fopen(filename, "rb");
+    if (inputFile == 0)
+    {
+        perror(filename);
+        return nullptr;
+    }
+
+    // Read the file header and validate that it is a PNG
+    static const int kSigSize = 8;
+    png_byte header[kSigSize] = {0};
+    fread(header, 1, kSigSize, inputFile);
+    if (png_sig_cmp(header, 0, kSigSize)) {
+        printf("%s is not a PNG.\n", filename);
+        fclose(inputFile);
+        return nullptr;
+    }
+
+    // Set up our control structure
+    png_structp pngControl = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
+    if (!pngControl)
+    {
+        printf("png_create_read_struct failed.\n");
+        fclose(inputFile);
+        return nullptr;
+    }
+
+    // Set up our image info structure
+    png_infop pngInfo = png_create_info_struct(pngControl);
+    if (!pngInfo)
+    {
+        printf("error: png_create_info_struct returned 0.\n");
+        png_destroy_read_struct(&pngControl, nullptr, nullptr);
+        fclose(inputFile);
+        return nullptr;
+    }
+
+    // Install an error handler
+    if (setjmp(png_jmpbuf(pngControl))) {
+        printf("libpng reported an error\n");
+        png_destroy_read_struct(&pngControl, &pngInfo, nullptr);
+        fclose(inputFile);
+        return nullptr;
+    }
+
+    // Set up the png reader and fetch the remaining bits of the header
+    png_init_io(pngControl, inputFile);
+    png_set_sig_bytes(pngControl, kSigSize);
+    png_read_info(pngControl, pngInfo);
+
+    // Get basic information about the PNG we're reading
+    int bitDepth;
+    int colorFormat;
+    png_uint_32 width;
+    png_uint_32 height;
+    png_get_IHDR(pngControl, pngInfo,
+                 &width, &height,
+                 &bitDepth, &colorFormat,
+                 NULL, NULL, NULL);
+
+    GLint format;
+    switch(colorFormat)
+    {
+        case PNG_COLOR_TYPE_RGB:
+            format = GL_RGB;
+            break;
+        case PNG_COLOR_TYPE_RGB_ALPHA:
+            format = GL_RGBA;
+            break;
+        default:
+            printf("%s: Unknown libpng color format %d.\n", filename, colorFormat);
+            return nullptr;
+    }
+
+    // Refresh the values in the png info struct in case any transformation shave been applied.
+    png_read_update_info(pngControl, pngInfo);
+    int stride = png_get_rowbytes(pngControl, pngInfo);
+    stride += 3 - ((stride-1) % 4);   // glTexImage2d requires rows to be 4-byte aligned
+
+    // Allocate storage for the pixel data
+    png_byte * buffer = (png_byte*)malloc(stride * height);
+    if (buffer == NULL)
+    {
+        printf("error: could not allocate memory for PNG image data\n");
+        png_destroy_read_struct(&pngControl, &pngInfo, nullptr);
+        fclose(inputFile);
+        return nullptr;
+    }
+
+    // libpng needs an array of pointers into the image data for each row
+    png_byte ** rowPointers = (png_byte**)malloc(height * sizeof(png_byte*));
+    if (rowPointers == NULL)
+    {
+        printf("Failed to allocate temporary row pointers\n");
+        png_destroy_read_struct(&pngControl, &pngInfo, nullptr);
+        free(buffer);
+        fclose(inputFile);
+        return nullptr;
+    }
+    for (unsigned int r = 0; r < height; r++)
+    {
+        rowPointers[r] = buffer + r*stride;
+    }
+
+
+    // Read in the actual image bytes
+    png_read_image(pngControl, rowPointers);
+    png_read_end(pngControl, nullptr);
+
+
+    // Set up the OpenGL texture to contain this image
+    GLuint textureId;
+    glGenTextures(1, &textureId);
+    glBindTexture(GL_TEXTURE_2D, textureId);
+
+    // Send the image data to GL
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
+
+    // Initialize the sampling properties (it seems the sample may not work if this isn't done)
+    // The user of this texture may very well want to set their own filtering, but we're going
+    // to pay the (minor) price of setting this up for them to avoid the dreaded "black image" if
+    // they forget.
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+    // clean up
+    png_destroy_read_struct(&pngControl, &pngInfo, nullptr);
+    free(buffer);
+    free(rowPointers);
+    fclose(inputFile);
+
+    glBindTexture(GL_TEXTURE_2D, 0);
+
+
+    // Return the texture
+    return new TexWrapper(textureId, width, height);
+}
diff --git a/evs/app/TexWrapper.h b/evs/app/TexWrapper.h
new file mode 100644
index 0000000..7c92247
--- /dev/null
+++ b/evs/app/TexWrapper.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TEXWRAPPER_H
+#define TEXWRAPPER_H
+
+#include <GLES2/gl2.h>
+
+
+class TexWrapper {
+public:
+    TexWrapper(GLuint textureId, unsigned width, unsigned height);
+    virtual ~TexWrapper();
+
+    GLuint glId()       { return id; };
+    unsigned width()    { return w; };
+    unsigned height()   { return h; };
+
+protected:
+    TexWrapper();
+
+    GLuint id;
+    unsigned w;
+    unsigned h;
+};
+
+
+TexWrapper* createTextureFromPng(const char* filename);
+
+#endif // TEXWRAPPER_H
\ No newline at end of file
diff --git a/evs/app/VideoTex.cpp b/evs/app/VideoTex.cpp
new file mode 100644
index 0000000..10d54bd
--- /dev/null
+++ b/evs/app/VideoTex.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vector>
+#include <stdio.h>
+#include <fcntl.h>
+#include <alloca.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <malloc.h>
+#include <png.h>
+
+#include "VideoTex.h"
+#include "glError.h"
+
+#include <ui/GraphicBuffer.h>
+
+// Eventually we shouldn't need this dependency, but for now the
+// graphics allocator interface isn't fully supported on all platforms
+// and this is our work around.
+using ::android::GraphicBuffer;
+
+
+VideoTex::VideoTex(sp<IEvsEnumerator> pEnum,
+                   sp<IEvsCamera> pCamera,
+                   sp<StreamHandler> pStreamHandler,
+                   EGLDisplay glDisplay)
+    : TexWrapper()
+    , mEnumerator(pEnum)
+    , mCamera(pCamera)
+    , mStreamHandler(pStreamHandler)
+    , mDisplay(glDisplay) {
+    // Nothing but initialization here...
+}
+
+VideoTex::~VideoTex() {
+    // Tell the stream to stop flowing
+    mStreamHandler->asyncStopStream();
+
+    // Close the camera
+    mEnumerator->closeCamera(mCamera);
+
+    // Drop our device texture image
+    if (mKHRimage != EGL_NO_IMAGE_KHR) {
+        eglDestroyImageKHR(mDisplay, mKHRimage);
+        mKHRimage = EGL_NO_IMAGE_KHR;
+    }
+}
+
+
+// Return true if the texture contents are changed
+bool VideoTex::refresh() {
+    if (!mStreamHandler->newFrameAvailable()) {
+        // No new image has been delivered, so there's nothing to do here
+        return false;
+    }
+
+    // If we already have an image backing us, then it's time to return it
+    if (mImageBuffer.memHandle.getNativeHandle() != nullptr) {
+        // Drop our device texture image
+        if (mKHRimage != EGL_NO_IMAGE_KHR) {
+            eglDestroyImageKHR(mDisplay, mKHRimage);
+            mKHRimage = EGL_NO_IMAGE_KHR;
+        }
+
+        // Return it since we're done with it
+        mStreamHandler->doneWithFrame(mImageBuffer);
+    }
+
+    // Get the new image we want to use as our contents
+    mImageBuffer = mStreamHandler->getNewFrame();
+
+
+    // create a GraphicBuffer from the existing handle
+    sp<GraphicBuffer> pGfxBuffer = new GraphicBuffer(mImageBuffer.memHandle,
+                                                     GraphicBuffer::CLONE_HANDLE,
+                                                     mImageBuffer.width, mImageBuffer.height,
+                                                     mImageBuffer.format, 1, // layer count
+                                                     GRALLOC_USAGE_HW_TEXTURE,
+                                                     mImageBuffer.stride);
+    if (pGfxBuffer.get() == nullptr) {
+        ALOGE("Failed to allocate GraphicBuffer to wrap image handle");
+        // Returning "true" in this error condition because we already released the
+        // previous image (if any) and so the texture may change in unpredictable ways now!
+        return true;
+    }
+
+    // Get a GL compatible reference to the graphics buffer we've been given
+    EGLint eglImageAttributes[] = {EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE};
+    EGLClientBuffer clientBuf = static_cast<EGLClientBuffer>(pGfxBuffer->getNativeBuffer());
+    mKHRimage = eglCreateImageKHR(mDisplay, EGL_NO_CONTEXT,
+                                  EGL_NATIVE_BUFFER_ANDROID, clientBuf,
+                                  eglImageAttributes);
+    if (mKHRimage == EGL_NO_IMAGE_KHR) {
+        const char *msg = getEGLError();
+        ALOGE("error creating EGLImage: %s", msg);
+    } else {
+        // Update the texture handle we already created to refer to this gralloc buffer
+        glActiveTexture(GL_TEXTURE0);
+        glBindTexture(GL_TEXTURE_2D, glId());
+        glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, static_cast<GLeglImageOES>(mKHRimage));
+
+        // Initialize the sampling properties (it seems the sample may not work if this isn't done)
+        // The user of this texture may very well want to set their own filtering, but we're going
+        // to pay the (minor) price of setting this up for them to avoid the dreaded "black image"
+        // if they forget.
+        // TODO:  Can we do this once for the texture ID rather than ever refresh?
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+    }
+
+    return true;
+}
+
+
+VideoTex* createVideoTexture(sp<IEvsEnumerator> pEnum,
+                             const char* evsCameraId,
+                             EGLDisplay glDisplay) {
+    // Set up the camera to feed this texture
+    sp<IEvsCamera> pCamera = pEnum->openCamera(evsCameraId);
+    if (pCamera.get() == nullptr) {
+        ALOGE("Failed to allocate new EVS Camera interface for %s", evsCameraId);
+        return nullptr;
+    }
+
+    // Initialize the stream that will help us update this texture's contents
+    sp<StreamHandler> pStreamHandler = new StreamHandler(pCamera);
+    if (pStreamHandler.get() == nullptr) {
+        ALOGE("failed to allocate FrameHandler");
+        return nullptr;
+    }
+
+    // Start the video stream
+    if (!pStreamHandler->startStream()) {
+        printf("Couldn't start the camera stream (%s)\n", evsCameraId);
+        ALOGE("start stream failed for %s", evsCameraId);
+        return nullptr;
+    }
+
+    return new VideoTex(pEnum, pCamera, pStreamHandler, glDisplay);
+}
diff --git a/evs/app/VideoTex.h b/evs/app/VideoTex.h
new file mode 100644
index 0000000..0b95c1d
--- /dev/null
+++ b/evs/app/VideoTex.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef VIDEOTEX_H
+#define VIDEOTEX_H
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES3/gl3.h>
+#include <GLES3/gl3ext.h>
+
+#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+
+#include "TexWrapper.h"
+#include "StreamHandler.h"
+
+
+using namespace ::android::hardware::automotive::evs::V1_0;
+
+
+class VideoTex: public TexWrapper {
+    friend VideoTex* createVideoTexture(sp<IEvsEnumerator> pEnum,
+                                        const char * evsCameraId,
+                                        EGLDisplay glDisplay);
+
+public:
+    VideoTex() = delete;
+    virtual ~VideoTex();
+
+    bool refresh();     // returns true if the texture contents were updated
+
+private:
+    VideoTex(sp<IEvsEnumerator> pEnum,
+             sp<IEvsCamera> pCamera,
+             sp<StreamHandler> pStreamHandler,
+             EGLDisplay glDisplay);
+
+    sp<IEvsEnumerator>  mEnumerator;
+    sp<IEvsCamera>      mCamera;
+    sp<StreamHandler>   mStreamHandler;
+    BufferDesc          mImageBuffer;
+
+    EGLDisplay          mDisplay;
+    EGLImageKHR mKHRimage = EGL_NO_IMAGE_KHR;
+};
+
+
+VideoTex* createVideoTexture(sp<IEvsEnumerator> pEnum,
+                             const char * deviceName,
+                             EGLDisplay glDisplay);
+
+#endif // VIDEOTEX_H
\ No newline at end of file
diff --git a/evs/app/WindowSurface.cpp b/evs/app/WindowSurface.cpp
new file mode 100644
index 0000000..a3f56bc
--- /dev/null
+++ b/evs/app/WindowSurface.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WindowSurface.h"
+
+#include <gui/SurfaceComposerClient.h>
+#include <gui/ISurfaceComposer.h>
+#include <gui/Surface.h>
+#include <ui/DisplayInfo.h>
+
+using namespace android;
+
+WindowSurface::WindowSurface() {
+    status_t err;
+
+    sp<SurfaceComposerClient> surfaceComposerClient = new SurfaceComposerClient;
+    err = surfaceComposerClient->initCheck();
+    if (err != NO_ERROR) {
+        fprintf(stderr, "SurfaceComposerClient::initCheck error: %#x\n", err);
+        return;
+    }
+
+    // Get main display parameters.
+    sp<IBinder> mainDpy = SurfaceComposerClient::getBuiltInDisplay(
+            ISurfaceComposer::eDisplayIdMain);
+    DisplayInfo mainDpyInfo;
+    err = SurfaceComposerClient::getDisplayInfo(mainDpy, &mainDpyInfo);
+    if (err != NO_ERROR) {
+        fprintf(stderr, "ERROR: unable to get display characteristics\n");
+        return;
+    }
+
+    uint32_t width, height;
+    if (mainDpyInfo.orientation != DISPLAY_ORIENTATION_0 &&
+            mainDpyInfo.orientation != DISPLAY_ORIENTATION_180) {
+        // rotated
+        width = mainDpyInfo.h;
+        height = mainDpyInfo.w;
+    } else {
+        width = mainDpyInfo.w;
+        height = mainDpyInfo.h;
+    }
+
+    sp<SurfaceControl> sc = surfaceComposerClient->createSurface(
+            String8("Benchmark"), width, height,
+            PIXEL_FORMAT_RGBX_8888, ISurfaceComposerClient::eOpaque);
+    if (sc == NULL || !sc->isValid()) {
+        fprintf(stderr, "Failed to create SurfaceControl\n");
+        return;
+    }
+
+    SurfaceComposerClient::openGlobalTransaction();
+    err = sc->setLayer(0x7FFFFFFF);     // always on top
+    if (err != NO_ERROR) {
+        fprintf(stderr, "SurfaceComposer::setLayer error: %#x\n", err);
+        return;
+    }
+
+    err = sc->show();
+    if (err != NO_ERROR) {
+        fprintf(stderr, "SurfaceComposer::show error: %#x\n", err);
+        return;
+    }
+    SurfaceComposerClient::closeGlobalTransaction();
+
+    mSurfaceControl = sc;
+}
+
+EGLNativeWindowType WindowSurface::getSurface() const {
+    sp<ANativeWindow> anw = mSurfaceControl->getSurface();
+    return (EGLNativeWindowType) anw.get();
+}
+
diff --git a/evs/app/WindowSurface.h b/evs/app/WindowSurface.h
new file mode 100644
index 0000000..966ea11
--- /dev/null
+++ b/evs/app/WindowSurface.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OPENGL_TESTS_WINDOWSURFACE_H
+#define OPENGL_TESTS_WINDOWSURFACE_H
+
+#include <gui/SurfaceControl.h>
+
+#include <EGL/egl.h>
+
+
+/*
+ * A window that covers the entire display surface.
+ *
+ * The window is destroyed when this object is destroyed, so don't try
+ * to use the surface after that point.
+ */
+class WindowSurface {
+public:
+    // Creates the window.
+    WindowSurface();
+
+    // Retrieves a handle to the window.
+    EGLNativeWindowType getSurface() const;
+
+private:
+    WindowSurface(const WindowSurface&) = delete;
+    WindowSurface& operator=(const WindowSurface&) = delete;
+
+    android::sp<android::SurfaceControl> mSurfaceControl;
+};
+
+#endif /* OPENGL_TESTS_WINDOWSURFACE_H */
diff --git a/evs/app/config.json b/evs/app/config.json
index 791dc4e..7086fbe 100644
--- a/evs/app/config.json
+++ b/evs/app/config.json
@@ -6,8 +6,6 @@
     "rearExtent" : 40
   },
   "display" : {
-    "width" : 640,
-    "height" : 480,
     "frontRange" : 100,
     "rearRange" : 100
   },
@@ -17,39 +15,19 @@
   },
   "cameras" : [
     {
-      "name" : "rightFront",
-      "x" : 36.0,
-      "y" : 90.0,
-      "z" : 36,
-      "yaw" : -45,
-      "pitch" : -25,
+      "cameraId" : "/dev/video0",
+      "function" : "reverse",
+      "x" : 0.0,
+      "y" : -40.0,
+      "z" : 48,
+      "yaw" : 180,
+      "pitch" : -10,
       "hfov" : 60,
-      "vfov" : 40
+      "vfov" : 42
     },
     {
-      "name" : "rightRear",
-      "function" : "right",
-      "x" : 36.0,
-      "y" : -10,
-      "z" : 36,
-      "yaw" : -135,
-      "pitch" : -25,
-      "hfov" : 60,
-      "vfov" : 40
-    },
-    {
-      "name" : "left",
-      "function" : "left",
-      "x" : -36.0,
-      "y" : 80,
-      "z" : 30,
-      "yaw" : 90,
-      "pitch" : -45,
-      "hfov" : 90,
-      "vfov" : 90
-    },
-    {
-      "name" : "front",
+      "cameraId" : "/dev/video1",
+      "function" : "front",
       "x" : 0.0,
       "y" : 100.0,
       "z" : 48,
@@ -59,15 +37,15 @@
       "vfov" : 42
     },
     {
-      "name" : "rear",
-      "function" : "rear",
+      "cameraId" : "backup",
+      "function" : "reverse",
       "x" : 0.0,
-      "y" : -40,
-      "z" : 30,
-      "yaw" : 180,
-      "pitch" : -45,
-      "hfov" : 90,
-      "vfov" : 60
+      "y" : 100.0,
+      "z" : 48,
+      "yaw" : 0,
+      "pitch" : -10,
+      "hfov" : 60,
+      "vfov" : 42
     }
   ]
-}
\ No newline at end of file
+}
diff --git a/evs/app/evs_app.cpp b/evs/app/evs_app.cpp
index d17119e..008c18d 100644
--- a/evs/app/evs_app.cpp
+++ b/evs/app/evs_app.cpp
@@ -40,14 +40,25 @@
 using android::hardware::joinRpcThreadpool;
 
 
-// TODO:  Should this somehow be a shared definition with the module itself?
-const static char kEvsServiceName[] = "EvsSharedEnumerator";
-
-
 // Main entry point
-int main(int /* argc */, char** /* argv */)
+int main(int argc, char** argv)
 {
-    printf("EVS app starting\n");
+    ALOGI("EVS app starting\n");
+
+    // Set up default behavior, then check for command line options
+    bool useVehicleHal = true;
+    const char* evsServiceName = "default";
+    for (int i=1; i< argc; i++) {
+        if (strcmp(argv[i], "--test") == 0) {
+            useVehicleHal = false;
+        } else if (strcmp(argv[i], "--hw") == 0) {
+            evsServiceName = "EvsEnumeratorHw";
+        } else if (strcmp(argv[i], "--mock") == 0) {
+            evsServiceName = "EvsEnumeratorHw-Mock";
+        } else {
+            printf("Ignoring unrecognized command line arg '%s'\n", argv[i]);
+        }
+    }
 
     // Load our configuration information
     ConfigManager config;
@@ -64,9 +75,9 @@
 
     // Get the EVS manager service
     ALOGI("Acquiring EVS Enumerator");
-    android::sp<IEvsEnumerator> pEvs = IEvsEnumerator::getService(kEvsServiceName);
+    android::sp<IEvsEnumerator> pEvs = IEvsEnumerator::getService(evsServiceName);
     if (pEvs.get() == nullptr) {
-        ALOGE("getService returned NULL.  Exiting.");
+        ALOGE("getService(%s) returned NULL.  Exiting.", evsServiceName);
         return 1;
     }
 
@@ -80,52 +91,54 @@
     }
 
     // Connect to the Vehicle HAL so we can monitor state
-    ALOGI("Connecting to Vehicle HAL");
-    android::sp <IVehicle> pVnet = IVehicle::getService();
-    if (pVnet.get() == nullptr) {
-#if 0
-        ALOGE("Vehicle HAL getService returned NULL.  Exiting.");
-        return 1;
-#else
-        // While testing, at least, we want to be able to run without a vehicle
-        ALOGE("getService returned NULL, but we're in test, so we'll pretend to be in reverse");
-#endif
-    } else {
-        // Register for vehicle state change callbacks we care about
-        // Changes in these values are what will trigger a reconfiguration of the EVS pipeline
-        SubscribeOptions optionsData[2] = {
-                {
-                    .propId = static_cast<int32_t>(VehicleProperty::GEAR_SELECTION),
-                    .flags = SubscribeFlags::DEFAULT
-                },
-                {
-                    .propId = static_cast<int32_t>(VehicleProperty::TURN_SIGNAL_STATE),
-                    .flags = SubscribeFlags::DEFAULT
-                },
-        };
-        hidl_vec<SubscribeOptions> options;
-        options.setToExternal(optionsData, arraysize(optionsData));
-        StatusCode status = pVnet->subscribe(pEvsListener, options);
-        if (status != StatusCode::OK) {
-            ALOGE("Subscription to vehicle notifications failed with code %d.  Exiting.", status);
+    android::sp <IVehicle> pVnet;
+    if (useVehicleHal) {
+        ALOGI("Connecting to Vehicle HAL");
+        pVnet = IVehicle::getService();
+        if (pVnet.get() == nullptr) {
+            ALOGE("Vehicle HAL getService returned NULL.  Exiting.");
             return 1;
+        } else {
+            // Register for vehicle state change callbacks we care about
+            // Changes in these values are what will trigger a reconfiguration of the EVS pipeline
+            SubscribeOptions optionsData[2] = {
+                    {
+                            .propId = static_cast<int32_t>(VehicleProperty::GEAR_SELECTION),
+                            .flags = SubscribeFlags::DEFAULT
+                    },
+                    {
+                            .propId = static_cast<int32_t>(VehicleProperty::TURN_SIGNAL_STATE),
+                            .flags = SubscribeFlags::DEFAULT
+                    },
+            };
+            hidl_vec <SubscribeOptions> options;
+            options.setToExternal(optionsData, arraysize(optionsData));
+            StatusCode status = pVnet->subscribe(pEvsListener, options);
+            if (status != StatusCode::OK) {
+                ALOGE("Subscription to vehicle notifications failed with code %d.  Exiting.",
+                      status);
+                return 1;
+            }
         }
+    } else {
+        ALOGW("Test mode selected, so not talking to Vehicle HAL");
     }
 
     // Configure ourselves for the current vehicle state at startup
     ALOGI("Constructing state controller");
     EvsStateControl *pStateController = new EvsStateControl(pVnet, pEvs, pDisplay, config);
-    if (!pStateController->configureForVehicleState()) {
+    if (!pStateController->startUpdateLoop()) {
         ALOGE("Initial configuration failed.  Exiting.");
         return 1;
-    } else {
-        // Run forever, reacting to events as necessary
-        ALOGI("Entering running state");
-        pEvsListener->run(pStateController);
     }
 
+    // Run forever, reacting to events as necessary
+    ALOGI("Entering running state");
+    pEvsListener->run(pStateController);
+
     // In normal operation, we expect to run forever, but in some error conditions we'll quit.
     // One known example is if another process preempts our registration for our service name.
-    printf("EVS Listener stopped.  Exiting.\n");
+    ALOGE("EVS Listener stopped.  Exiting.");
+
     return 0;
 }
diff --git a/evs/app/glError.cpp b/evs/app/glError.cpp
new file mode 100644
index 0000000..53188d3
--- /dev/null
+++ b/evs/app/glError.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <EGL/egl.h>
+#include <GLES3/gl3.h>
+
+
+const char *getEGLError(void) {
+    switch (eglGetError()) {
+        case EGL_SUCCESS:
+            return "EGL_SUCCESS";
+        case EGL_NOT_INITIALIZED:
+            return "EGL_NOT_INITIALIZED";
+        case EGL_BAD_ACCESS:
+            return "EGL_BAD_ACCESS";
+        case EGL_BAD_ALLOC:
+            return "EGL_BAD_ALLOC";
+        case EGL_BAD_ATTRIBUTE:
+            return "EGL_BAD_ATTRIBUTE";
+        case EGL_BAD_CONTEXT:
+            return "EGL_BAD_CONTEXT";
+        case EGL_BAD_CONFIG:
+            return "EGL_BAD_CONFIG";
+        case EGL_BAD_CURRENT_SURFACE:
+            return "EGL_BAD_CURRENT_SURFACE";
+        case EGL_BAD_DISPLAY:
+            return "EGL_BAD_DISPLAY";
+        case EGL_BAD_SURFACE:
+            return "EGL_BAD_SURFACE";
+        case EGL_BAD_MATCH:
+            return "EGL_BAD_MATCH";
+        case EGL_BAD_PARAMETER:
+            return "EGL_BAD_PARAMETER";
+        case EGL_BAD_NATIVE_PIXMAP:
+            return "EGL_BAD_NATIVE_PIXMAP";
+        case EGL_BAD_NATIVE_WINDOW:
+            return "EGL_BAD_NATIVE_WINDOW";
+        case EGL_CONTEXT_LOST:
+            return "EGL_CONTEXT_LOST";
+        default:
+            return "Unknown error";
+    }
+}
+
+
+const char *getGLFramebufferError(void) {
+    switch (glCheckFramebufferStatus(GL_FRAMEBUFFER)) {
+    case GL_FRAMEBUFFER_COMPLETE:
+        return "GL_FRAMEBUFFER_COMPLETE";
+    case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
+        return "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT";
+    case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
+        return "GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";
+    case GL_FRAMEBUFFER_UNSUPPORTED:
+        return "GL_FRAMEBUFFER_UNSUPPORTED";
+    case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS:
+        return "GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS";
+    default:
+        return "Unknown error";
+    }
+}
diff --git a/evs/app/glError.h b/evs/app/glError.h
new file mode 100644
index 0000000..52c5d5a
--- /dev/null
+++ b/evs/app/glError.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GLERROR_H
+#define GLERROR_H
+
+const char *getEGLError(void);
+
+const char *getGLFramebufferError(void);
+
+#endif // GLERROR_H
\ No newline at end of file
diff --git a/evs/app/shader.cpp b/evs/app/shader.cpp
new file mode 100644
index 0000000..6922fbe
--- /dev/null
+++ b/evs/app/shader.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "shader.h"
+
+#include <stdio.h>
+#include <memory>
+
+
+// Given shader source, load and compile it
+static GLuint loadShader(GLenum type, const char *shaderSrc, const char *name) {
+    // Create the shader object
+    GLuint shader = glCreateShader (type);
+    if (shader == 0) {
+        return 0;
+    }
+
+    // Load and compile the shader
+    glShaderSource(shader, 1, &shaderSrc, nullptr);
+    glCompileShader(shader);
+
+    // Verify the compilation worked as expected
+    GLint compiled = 0;
+    glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
+    if (!compiled) {
+        printf("Error compiling %s shader for %s\n", (type==GL_VERTEX_SHADER) ? "vtx":"pxl", name);
+
+        GLint size = 0;
+        glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &size);
+        if (size > 0)
+        {
+            // Get and report the error message
+            std::unique_ptr<char> infoLog(new char[size]);
+            glGetShaderInfoLog(shader, size, NULL, infoLog.get());
+            printf("  msg:\n%s\n", infoLog.get());
+        }
+
+        glDeleteShader(shader);
+        return 0;
+    }
+
+    return shader;
+}
+
+
+// Create a program object given vertex and pixels shader source
+GLuint buildShaderProgram(const char* vtxSrc, const char* pxlSrc, const char* name) {
+    GLuint program = glCreateProgram();
+    if (program == 0) {
+        printf("Failed to allocate program object\n");
+        return 0;
+    }
+
+    // Compile the shaders and bind them to this program
+    GLuint vertexShader = loadShader(GL_VERTEX_SHADER, vtxSrc, name);
+    if (vertexShader == 0) {
+        printf("Failed to load vertex shader\n");
+        glDeleteProgram(program);
+        return 0;
+    }
+    GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, pxlSrc, name);
+    if (pixelShader == 0) {
+        printf("Failed to load pixel shader\n");
+        glDeleteProgram(program);
+        glDeleteShader(vertexShader);
+        return 0;
+    }
+    glAttachShader(program, vertexShader);
+    glAttachShader(program, pixelShader);
+
+    // Link the program
+    glLinkProgram(program);
+    GLint linked = 0;
+    glGetProgramiv(program, GL_LINK_STATUS, &linked);
+    if (!linked)
+    {
+        printf("Error linking program.\n");
+        GLint size = 0;
+        glGetProgramiv(program, GL_INFO_LOG_LENGTH, &size);
+        if (size > 0)
+        {
+            // Get and report the error message
+            std::unique_ptr<char> infoLog(new char[size]);
+            glGetProgramInfoLog(program, size, NULL, infoLog.get());
+            printf("  msg:  %s\n", infoLog.get());
+        }
+
+        glDeleteProgram(program);
+        glDeleteShader(vertexShader);
+        glDeleteShader(pixelShader);
+        return 0;
+    }
+
+
+#if 0 // Debug output to diagnose shader parameters
+    GLint numShaderParams;
+    GLchar paramName[128];
+    GLint paramSize;
+    GLenum paramType;
+    const char *typeName = "?";
+    printf("Shader parameters for %s:\n", name);
+    glGetProgramiv(program, GL_ACTIVE_UNIFORMS, &numShaderParams);
+    for (GLint i=0; i<numShaderParams; i++) {
+        glGetActiveUniform(program,
+                           i,
+                           sizeof(paramName),
+                           nullptr,
+                           &paramSize,
+                           &paramType,
+                           paramName);
+        switch (paramType) {
+            case GL_FLOAT:      typeName = "GL_FLOAT"; break;
+            case GL_FLOAT_VEC4: typeName = "GL_FLOAT_VEC4"; break;
+            case GL_FLOAT_MAT4: typeName = "GL_FLOAT_MAT4"; break;
+            case GL_SAMPLER_2D: typeName = "GL_SAMPLER_2D"; break;
+        }
+
+        printf("  %2d: %s\t (%d) of type %s(%d)\n", i, paramName, paramSize, typeName, paramType);
+    }
+#endif
+
+
+    return program;
+}
diff --git a/evs/app/shader.h b/evs/app/shader.h
new file mode 100644
index 0000000..476a2f0
--- /dev/null
+++ b/evs/app/shader.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHADER_H
+#define SHADER_H
+
+#include <GLES2/gl2.h>
+
+
+// Create a program object given vertex and pixels shader source
+GLuint buildShaderProgram(const char* vtxSrc, const char* pxlSrc, const char* name);
+
+#endif // SHADER_H
\ No newline at end of file
diff --git a/evs/app/shader_projectedTex.h b/evs/app/shader_projectedTex.h
new file mode 100644
index 0000000..65e9109
--- /dev/null
+++ b/evs/app/shader_projectedTex.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHADER_PROJECTED_TEX_H
+#define SHADER_PROJECTED_TEX_H
+
+// This shader is used to project a sensors image onto wold space geometry
+// as if it were projected from the original sensor's point of view in the world.
+
+const char vtxShader_projectedTexture[] = ""
+        "#version 300 es                            \n"
+        "layout(location = 0) in vec4 pos;          \n"
+        "uniform mat4 cameraMat;                    \n"
+        "uniform mat4 projectionMat;                \n"
+        "out vec4 projectionSpace;                  \n"
+        "void main()                                \n"
+        "{                                          \n"
+        "   gl_Position = cameraMat * pos;          \n"
+        "   projectionSpace = projectionMat * pos;  \n"
+        "}                                          \n";
+
+const char pixShader_projectedTexture[] =
+        "#version 300 es                                        \n"
+        "precision mediump float;                               \n"
+        "uniform sampler2D tex;                                 \n"
+        "in vec4 projectionSpace;                               \n"
+        "out vec4 color;                                        \n"
+        "void main()                                            \n"
+        "{                                                      \n"
+        "    const vec2 zero = vec2(0.0f, 0.0f);                \n"
+        "    const vec2 one  = vec2(1.0f, 1.0f);                \n"
+        "                                                       \n"
+        "    // Compute perspective correct texture coordinates \n"
+        "    // in the sensor map                               \n"
+        "    vec2 cs = projectionSpace.xy / projectionSpace.w;  \n"
+        "                                                       \n"
+        "    // flip the texture!                               \n"
+        "    cs.y = -cs.y;                                      \n"
+        "                                                       \n"
+        "    // scale from -1/1 clip space to 0/1 uv space      \n"
+        "    vec2 uv = (cs + 1.0f) * 0.5f;                      \n"
+        "                                                       \n"
+        "    // Bail if we don't have a valid projection        \n"
+        "    if ((projectionSpace.w <= 0.0f) ||                 \n"
+        "        any(greaterThan(uv, one)) ||                   \n"
+        "        any(lessThan(uv, zero))) {                     \n"
+        "        discard;                                       \n"
+        "    }                                                  \n"
+        "    color = texture(tex, uv);                          \n"
+        "}                                                      \n";
+
+#endif // SHADER_PROJECTED_TEX_H
\ No newline at end of file
diff --git a/evs/app/shader_simpleTex.h b/evs/app/shader_simpleTex.h
new file mode 100644
index 0000000..0e962bd
--- /dev/null
+++ b/evs/app/shader_simpleTex.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHADER_SIMPLE_TEX_H
+#define SHADER_SIMPLE_TEX_H
+
+const char vtxShader_simpleTexture[] = ""
+        "#version 300 es                    \n"
+        "layout(location = 0) in vec4 pos;  \n"
+        "layout(location = 1) in vec2 tex;  \n"
+        "uniform mat4 cameraMat;            \n"
+        "out vec2 uv;                       \n"
+        "void main()                        \n"
+        "{                                  \n"
+        "   gl_Position = cameraMat * pos;  \n"
+        "   uv = tex;                       \n"
+        "}                                  \n";
+
+const char pixShader_simpleTexture[] =
+        "#version 300 es                            \n"
+        "precision mediump float;                   \n"
+        "uniform sampler2D tex;                     \n"
+        "in vec2 uv;                                \n"
+        "out vec4 color;                            \n"
+        "void main()                                \n"
+        "{                                          \n"
+        "    vec4 texel = texture(tex, uv);         \n"
+        "    color = texel;                         \n"
+        "}                                          \n";
+
+#endif // SHADER_SIMPLE_TEX_H
\ No newline at end of file
diff --git a/evs/app/shader_testColors.h b/evs/app/shader_testColors.h
new file mode 100644
index 0000000..20d73be
--- /dev/null
+++ b/evs/app/shader_testColors.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHADER_TESTCOLORS_H
+#define SHADER_TESTCOLORS_H
+
+const char vtxShader_testColors[] =
+        "#version 300 es                    \n"
+        "layout(location = 0) in vec4 pos;  \n"
+        "out vec2 uv;                       \n"
+        "void main()                        \n"
+        "{                                  \n"
+        "   gl_Position = pos;              \n"
+        "   // using the screen space position as the UV coordinates\n"
+        "   uv = pos.xy * 0.5f + 0.5f;      \n"
+        "}                                  \n";
+
+const char pixShader_testColors[] =
+        "#version 300 es                            \n"
+        "precision mediump float;                   \n"
+        "uniform sampler2D tex;                     \n"
+        "in vec2 uv;                                \n"
+        "out vec4 color;                            \n"
+        "void main()                                \n"
+        "{                                          \n"
+        "    //            R,   G,   B,   A         \n"
+        "    color = vec4(0.5, 1.0, 0.5, 1.0);      \n"
+        "    color.r = uv.x;                        \n"
+        "    color.b = uv.y;                        \n"
+        "}                                          \n";
+
+#endif // SHADER_TESTCOLORS_H
\ No newline at end of file
diff --git a/evs/manager/ServiceNames.h b/evs/manager/ServiceNames.h
index 3d85001..46705a9 100644
--- a/evs/manager/ServiceNames.h
+++ b/evs/manager/ServiceNames.h
@@ -16,7 +16,7 @@
 
 
 // This is the name as which we'll register ourselves
-const static char kManagedEnumeratorName[] = "EvsSharedEnumerator";
+const static char kManagedEnumeratorName[] = "default";
 
 // This is the name of the hardware provider to which we'll bind
 // TODO:  How should we configure these values to target appropriate hardware?