Merge "Add projected texture support to evs_app"
diff --git a/car_product/build/car_base.mk b/car_product/build/car_base.mk
index 7da9477..e73603c 100644
--- a/car_product/build/car_base.mk
+++ b/car_product/build/car_base.mk
@@ -88,7 +88,8 @@
PRODUCT_PACKAGES += evs_app
PRODUCT_COPY_FILES += \
packages/services/Car/evs/app/config.json:system/etc/automotive/evs/config.json \
- packages/services/Car/evs/app/CarFromTop.png:system/etc/automotive/evs/CarFromTop.png
+ packages/services/Car/evs/app/CarFromTop.png:system/etc/automotive/evs/CarFromTop.png \
+ packages/services/Car/evs/app/LabeledChecker.png:system/etc/automotive/evs/LabeledChecker.png
PRODUCT_PACKAGES += android.hardware.automotive.vehicle@2.1-service
diff --git a/evs/app/Android.mk b/evs/app/Android.mk
index 5cf6eec..784068d 100644
--- a/evs/app/Android.mk
+++ b/evs/app/Android.mk
@@ -8,7 +8,7 @@
EvsStateControl.cpp \
RenderBase.cpp \
RenderDirectView.cpp \
- RenderPixelCopy.cpp \
+ RenderTopView.cpp \
ConfigManager.cpp \
glError.cpp \
shader.cpp \
diff --git a/evs/app/ConfigManager.h b/evs/app/ConfigManager.h
index cf1c4df..0d24919 100644
--- a/evs/app/ConfigManager.h
+++ b/evs/app/ConfigManager.h
@@ -56,7 +56,7 @@
};
float getDisplayRightLocation(float aspectRatio) const {
// Given the display aspect ratio (width over height), how far can we see to the right?
- return (getDisplayTopLocation() - getDisplayBottomLocation()) * 0.5f / aspectRatio;
+ return (getDisplayTopLocation() - getDisplayBottomLocation()) * 0.5f * aspectRatio;
};
float getDisplayLeftLocation(float aspectRatio) const {
// Given the display aspect ratio (width over height), how far can we see to the left?
diff --git a/evs/app/EvsStateControl.cpp b/evs/app/EvsStateControl.cpp
index eb3c698..44986c0 100644
--- a/evs/app/EvsStateControl.cpp
+++ b/evs/app/EvsStateControl.cpp
@@ -15,7 +15,7 @@
*/
#include "EvsStateControl.h"
#include "RenderDirectView.h"
-#include "RenderPixelCopy.h"
+#include "RenderTopView.h"
#include <stdio.h>
#include <string.h>
@@ -38,6 +38,7 @@
mVehicle(pVnet),
mEvs(pEvs),
mDisplay(pDisplay),
+ mConfig(config),
mCurrentState(OFF) {
// Initialize the property value containers we'll be updating (they'll be zeroed by default)
@@ -49,10 +50,10 @@
mGearValue.prop = static_cast<int32_t>(VehicleProperty::GEAR_SELECTION);
mTurnSignalValue.prop = static_cast<int32_t>(VehicleProperty::TURN_SIGNAL_STATE);
+#if 0 // This way we only ever deal with cameras which exist in the system
// Build our set of cameras for the states we support
ALOGD("Requesting camera list");
- mEvs->getCameraList([this, &config]
- (hidl_vec<CameraDesc> cameraList) {
+ mEvs->getCameraList([this, &config](hidl_vec<CameraDesc> cameraList) {
ALOGI("Camera list callback received %zu cameras",
cameraList.size());
for (auto&& cam: cameraList) {
@@ -62,17 +63,23 @@
// Check our configuration for information about this camera
// Note that a camera can have a compound function string
// such that a camera can be "right/reverse" and be used for both.
+ // If more than one camera is listed for a given function, we'll
+ // list all of them and let the UX/rendering logic use one, some
+ // or all of them as appropriate.
for (auto&& info: config.getCameras()) {
if (cam.cameraId == info.cameraId) {
// We found a match!
if (info.function.find("reverse") != std::string::npos) {
- mCameraInfo[State::REVERSE] = info;
+ mCameraList[State::REVERSE].push_back(info);
}
if (info.function.find("right") != std::string::npos) {
- mCameraInfo[State::RIGHT] = info;
+ mCameraList[State::RIGHT].push_back(info);
}
if (info.function.find("left") != std::string::npos) {
- mCameraInfo[State::LEFT] = info;
+ mCameraList[State::LEFT].push_back(info);
+ }
+ if (info.function.find("park") != std::string::npos) {
+ mCameraList[State::PARKING].push_back(info);
}
cameraConfigFound = true;
break;
@@ -85,6 +92,25 @@
}
}
);
+#else // This way we use placeholders for cameras in the configuration but not reported by EVS
+ // Build our set of cameras for the states we support
+ ALOGD("Requesting camera list");
+ for (auto&& info: config.getCameras()) {
+ if (info.function.find("reverse") != std::string::npos) {
+ mCameraList[State::REVERSE].push_back(info);
+ }
+ if (info.function.find("right") != std::string::npos) {
+ mCameraList[State::RIGHT].push_back(info);
+ }
+ if (info.function.find("left") != std::string::npos) {
+ mCameraList[State::LEFT].push_back(info);
+ }
+ if (info.function.find("park") != std::string::npos) {
+ mCameraList[State::PARKING].push_back(info);
+ }
+ }
+#endif
+
ALOGD("State controller ready");
}
@@ -210,6 +236,7 @@
}
// Choose our desired EVS state based on the current car state
+ // TODO: Update this logic, and include user input when choosing if a view should be presented
State desiredState = OFF;
if (mGearValue.value.int32Values[0] == int32_t(VehicleGear::GEAR_REVERSE)) {
desiredState = REVERSE;
@@ -217,6 +244,8 @@
desiredState = RIGHT;
} else if (mTurnSignalValue.value.int32Values[0] == int32_t(VehicleTurnSignal::LEFT)) {
desiredState = LEFT;
+ } else if (mGearValue.value.int32Values[0] == int32_t(VehicleGear::GEAR_PARK)) {
+ desiredState = PARKING;
}
ALOGV("Selected state %d.", desiredState);
@@ -252,10 +281,10 @@
return true;
}
- ALOGD(" Current state (%d) = %s", mCurrentState,
- mCameraInfo[mCurrentState].cameraId.c_str());
- ALOGD(" Desired state (%d) = %s", desiredState,
- mCameraInfo[desiredState].cameraId.c_str());
+ ALOGD(" Current state %d has %zu cameras", mCurrentState,
+ mCameraList[mCurrentState].size());
+ ALOGD(" Desired state %d has %zu cameras", desiredState,
+ mCameraList[desiredState].size());
// Since we're changing states, shut down the current renderer
if (mCurrentRenderer != nullptr) {
@@ -264,15 +293,21 @@
}
// Do we need a new direct view renderer?
- if (desiredState == PARKING) {
- // We need a new top view renderer
- // TODO: Implement this by instantiating a new RenderTopView
- } else if (!mCameraInfo[desiredState].cameraId.empty()) {
- // We have a camera assigned to this state for direct view
- mCurrentRenderer = std::make_unique<RenderDirectView>(mEvs, mCameraInfo[desiredState]);
-// mCurrentRenderer = std::make_unique<RenderPixelCopy>(mEvs, mCameraInfo[desiredState]);
+ if (mCameraList[desiredState].size() > 1 || desiredState == PARKING) {
+ // TODO: DO we want other kinds of compound view or else sequentially selected views?
+ mCurrentRenderer = std::make_unique<RenderTopView>(mEvs,
+ mCameraList[desiredState],
+ mConfig);
if (!mCurrentRenderer) {
- ALOGE("Failed to construct renderer. Skipping state change.");
+ ALOGE("Failed to construct top view renderer. Skipping state change.");
+ return false;
+ }
+ } else if (mCameraList[desiredState].size() == 1) {
+ // We have a camera assigned to this state for direct view
+ mCurrentRenderer = std::make_unique<RenderDirectView>(mEvs,
+ mCameraList[desiredState][0]);
+ if (!mCurrentRenderer) {
+ ALOGE("Failed to construct direct renderer. Skipping state change.");
return false;
}
}
diff --git a/evs/app/EvsStateControl.h b/evs/app/EvsStateControl.h
index 3a830cf..cfb6833 100644
--- a/evs/app/EvsStateControl.h
+++ b/evs/app/EvsStateControl.h
@@ -86,13 +86,14 @@
sp<IVehicle> mVehicle;
sp<IEvsEnumerator> mEvs;
sp<IEvsDisplay> mDisplay;
+ const ConfigManager& mConfig;
VehiclePropValue mGearValue;
VehiclePropValue mTurnSignalValue;
State mCurrentState = OFF;
- ConfigManager::CameraInfo mCameraInfo[NUM_STATES] = {};
+ std::vector<ConfigManager::CameraInfo> mCameraList[NUM_STATES];
std::unique_ptr<RenderBase> mCurrentRenderer;
std::thread mRenderThread; // The thread that runs the main rendering loop
diff --git a/evs/app/LabeledChecker.png b/evs/app/LabeledChecker.png
new file mode 100644
index 0000000..02da85d
--- /dev/null
+++ b/evs/app/LabeledChecker.png
Binary files differ
diff --git a/evs/app/RenderBase.cpp b/evs/app/RenderBase.cpp
index d8bb7f0..cb6aa93 100644
--- a/evs/app/RenderBase.cpp
+++ b/evs/app/RenderBase.cpp
@@ -36,7 +36,7 @@
EGLImageKHR RenderBase::sKHRimage = EGL_NO_IMAGE_KHR;
unsigned RenderBase::sWidth = 0;
unsigned RenderBase::sHeight = 0;
-
+float RenderBase::sAspectRatio = 0.0f;
bool RenderBase::prepareGL() {
@@ -182,21 +182,6 @@
return false;
}
-#if 0 // Do we need a z-buffer? Does this code work correctly?
- // Request a (local) depth buffer so we can z-test while drawing
- glBindRenderbuffer(GL_RENDERBUFFER, sDepthBuffer);
- if ((sWidth != tgtBuffer.width) || (sHeight != tgtBuffer.height)) {
- // We can't reuse the depth buffer, so make a new one
- sWidth = tgtBuffer.width;
- sHeight = tgtBuffer.height;
- glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, sWidth, sHeight);
- glFramebufferRenderbuffer(GL_FRAMEBUFFER,
- GL_DEPTH_ATTACHMENT,
- GL_RENDERBUFFER,
- sDepthBuffer);
- }
-#endif
-
GLenum checkResult = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (checkResult != GL_FRAMEBUFFER_COMPLETE) {
ALOGE("Offscreen framebuffer not configured successfully (%d: %s)",
@@ -204,6 +189,21 @@
return false;
}
+ // Store the size of our target buffer
+ sWidth = tgtBuffer.width;
+ sHeight = tgtBuffer.height;
+ sAspectRatio = (float)sWidth / sHeight;
+
+ // Set the viewport
+ glViewport(0, 0, sWidth, sHeight);
+
+#if 1 // We don't actually need the clear if we're going to cover the whole screen anyway
+ // Clear the color buffer
+ glClearColor(0.8f, 0.1f, 0.2f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+#endif
+
+
return true;
}
diff --git a/evs/app/RenderBase.h b/evs/app/RenderBase.h
index df34639..25474d5 100644
--- a/evs/app/RenderBase.h
+++ b/evs/app/RenderBase.h
@@ -61,6 +61,7 @@
static unsigned sWidth;
static unsigned sHeight;
+ static float sAspectRatio;
};
diff --git a/evs/app/RenderDirectView.cpp b/evs/app/RenderDirectView.cpp
index a50b264..24eb485 100644
--- a/evs/app/RenderDirectView.cpp
+++ b/evs/app/RenderDirectView.cpp
@@ -19,10 +19,8 @@
#include "glError.h"
#include "shader.h"
#include "shader_simpleTex.h"
-#include "shader_testColors.h"
#include <log/log.h>
-
#include <math/mat4.h>
@@ -51,23 +49,13 @@
}
}
- // TODO: Remove this once we're done testing with it
- if (!mShaderTestColors) {
- mShaderTestColors = buildShaderProgram(vtxShader_testColors,
- pixShader_testColors,
- "testColors");
- if (!mShaderTestColors) {
- ALOGE("Error building shader program");
- return false;
- }
- }
-
// Construct our video texture
mTexture.reset(createVideoTexture(mEnumerator, mCameraInfo.cameraId.c_str(), sDisplay));
if (!mTexture) {
ALOGE("Failed to set up video texture for %s (%s)",
mCameraInfo.cameraId.c_str(), mCameraInfo.function.c_str());
- return false;
+// TODO: For production use, we may actually want to fail in this case, but not yet...
+// return false;
}
return true;
@@ -89,15 +77,6 @@
return false;
}
- // Set the viewport
- glViewport(0, 0, tgtBuffer.width, tgtBuffer.height);
-
-#if 0 // We don't actually need the clear if we're going to cover the whole screen anyway
- // Clear the color buffer
- glClearColor(0.8f, 0.1f, 0.2f, 1.0f);
- glClear(GL_COLOR_BUFFER_BIT);
-#endif
-
// Select our screen space simple texture shader
glUseProgram(mShaderProgram);
@@ -108,7 +87,7 @@
return false;
} else {
const android::mat4 identityMatrix;
- glUniformMatrix4fv(loc, 1, false, &identityMatrix[0][0]);
+ glUniformMatrix4fv(loc, 1, false, identityMatrix.asArray());
}
@@ -118,20 +97,6 @@
glBindTexture(GL_TEXTURE_2D, mTexture->glId());
-#if 0
- static TexWrapper* sTestTexture = createTextureFromPng("/system/etc/automotive/evs/CarFromTop.png");
- if (sTestTexture) {
- static int tickTock = 0;
- tickTock =~tickTock;
- if (tickTock) {
- printf("tick...");
- glBindTexture(GL_TEXTURE_2D, sTestTexture->glId());
- } else {
- printf("tock\n");
- }
- }
-#endif
-
GLint sampler = glGetUniformLocation(mShaderProgram, "tex");
if (sampler < 0) {
ALOGE("Couldn't set shader parameter 'tex'");
@@ -146,20 +111,11 @@
// Draw a rectangle on the screen
- // TODO: We pulled in from the edges for now for diagnostic purposes...
-#if 1
GLfloat vertsCarPos[] = { -1.0, 1.0, 0.0f, // left top in window space
1.0, 1.0, 0.0f, // right top
-1.0, -1.0, 0.0f, // left bottom
1.0, -1.0, 0.0f // right bottom
};
-#else
- GLfloat vertsCarPos[] = { -0.8, 0.8, 0.0f, // left top in window space
- 0.8, 0.8, 0.0f, // right top
- -0.8, -0.8, 0.0f, // left bottom
- 0.8, -0.8, 0.0f // right bottom
- };
-#endif
// TODO: We're flipping horizontally here, but should do it only for specified cameras!
GLfloat vertsCarTex[] = { 1.0f, 1.0f, // left top
0.0f, 1.0f, // right top
diff --git a/evs/app/RenderDirectView.h b/evs/app/RenderDirectView.h
index c452297..1543fce 100644
--- a/evs/app/RenderDirectView.h
+++ b/evs/app/RenderDirectView.h
@@ -35,8 +35,8 @@
public:
RenderDirectView(sp<IEvsEnumerator> enumerator, const ConfigManager::CameraInfo& cam);
- virtual bool activate();
- virtual void deactivate();
+ virtual bool activate() override;
+ virtual void deactivate() override;
virtual bool drawFrame(const BufferDesc& tgtBuffer);
@@ -47,7 +47,6 @@
std::unique_ptr<VideoTex> mTexture;
GLuint mShaderProgram = 0;
- GLuint mShaderTestColors = 0;
};
diff --git a/evs/app/RenderPixelCopy.h b/evs/app/RenderPixelCopy.h
index ff3917a..ee6eede 100644
--- a/evs/app/RenderPixelCopy.h
+++ b/evs/app/RenderPixelCopy.h
@@ -35,8 +35,8 @@
public:
RenderPixelCopy(sp<IEvsEnumerator> enumerator, const ConfigManager::CameraInfo& cam);
- virtual bool activate();
- virtual void deactivate();
+ virtual bool activate() override;
+ virtual void deactivate() override;
virtual bool drawFrame(const BufferDesc& tgtBuffer);
diff --git a/evs/app/RenderTopView.cpp b/evs/app/RenderTopView.cpp
new file mode 100644
index 0000000..a44f2af
--- /dev/null
+++ b/evs/app/RenderTopView.cpp
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "RenderTopView.h"
+#include "VideoTex.h"
+#include "glError.h"
+#include "shader.h"
+#include "shader_simpleTex.h"
+#include "shader_projectedTex.h"
+
+#include <log/log.h>
+#include <math/mat4.h>
+#include <math/vec3.h>
+
+
+// Simple aliases to make geometric math using vectors more readable
+static const unsigned X = 0;
+static const unsigned Y = 1;
+static const unsigned Z = 2;
+//static const unsigned W = 3;
+
+
+// Since we assume no roll in these views, we can simplify the required math
+static android::vec3 unitVectorFromPitchAndYaw(float pitch, float yaw) {
+ float sinPitch, cosPitch;
+ sincosf(pitch, &sinPitch, &cosPitch);
+ float sinYaw, cosYaw;
+ sincosf(yaw, &sinYaw, &cosYaw);
+ return android::vec3(cosPitch * -sinYaw,
+ cosPitch * cosYaw,
+ sinPitch);
+}
+
+
+// Helper function to set up a perspective matrix with independent horizontal and vertical
+// angles of view.
+static android::mat4 perspective(float hfov, float vfov, float near, float far) {
+ const float tanHalfFovX = tanf(hfov * 0.5f);
+ const float tanHalfFovY = tanf(vfov * 0.5f);
+
+ android::mat4 p(0.0f);
+ p[0][0] = 1.0f / tanHalfFovX;
+ p[1][1] = 1.0f / tanHalfFovY;
+ p[2][2] = - (far + near) / (far - near);
+ p[2][3] = -1.0f;
+ p[3][2] = - (2.0f * far * near) / (far - near);
+ return p;
+}
+
+
+// Helper function to set up a view matrix for a camera given it's yaw & pitch & location
+// Yes, with a bit of work, we could use lookAt, but it does a lot of extra work
+// internally that we can short cut.
+static android::mat4 cameraLookMatrix(const ConfigManager::CameraInfo& cam) {
+ float sinYaw, cosYaw;
+ sincosf(cam.yaw, &sinYaw, &cosYaw);
+
+ // Construct principal unit vectors
+ android::vec3 vAt = unitVectorFromPitchAndYaw(cam.pitch, cam.yaw);
+ android::vec3 vRt = android::vec3(cosYaw, sinYaw, 0.0f);
+ android::vec3 vUp = -cross(vAt, vRt);
+ android::vec3 eye = android::vec3(cam.position[X], cam.position[Y], cam.position[Z]);
+
+ android::mat4 Result(1.0f);
+ Result[0][0] = vRt.x;
+ Result[1][0] = vRt.y;
+ Result[2][0] = vRt.z;
+ Result[0][1] = vUp.x;
+ Result[1][1] = vUp.y;
+ Result[2][1] = vUp.z;
+ Result[0][2] =-vAt.x;
+ Result[1][2] =-vAt.y;
+ Result[2][2] =-vAt.z;
+ Result[3][0] =-dot(vRt, eye);
+ Result[3][1] =-dot(vUp, eye);
+ Result[3][2] = dot(vAt, eye);
+ return Result;
+}
+
+
+RenderTopView::RenderTopView(sp<IEvsEnumerator> enumerator,
+ const std::vector<ConfigManager::CameraInfo>& camList,
+ const ConfigManager& mConfig) :
+ mEnumerator(enumerator),
+ mConfig(mConfig) {
+
+ // Copy the list of cameras we're to employ into our local storage. We'll create and
+ // associate a streaming video texture when we are activated.
+ mActiveCameras.reserve(camList.size());
+ for (unsigned i=0; i<camList.size(); i++) {
+ mActiveCameras.emplace_back(camList[i]);
+ }
+}
+
+
+bool RenderTopView::activate() {
+ // Ensure GL is ready to go...
+ if (!prepareGL()) {
+ ALOGE("Error initializing GL");
+ return false;
+ }
+
+ // Load our shader programs
+ mPgmAssets.simpleTexture = buildShaderProgram(vtxShader_simpleTexture,
+ pixShader_simpleTexture,
+ "simpleTexture");
+ if (!mPgmAssets.simpleTexture) {
+ ALOGE("Failed to build shader program");
+ return false;
+ }
+ mPgmAssets.projectedTexture = buildShaderProgram(vtxShader_projectedTexture,
+ pixShader_projectedTexture,
+ "projectedTexture");
+ if (!mPgmAssets.projectedTexture) {
+ ALOGE("Failed to build shader program");
+ return false;
+ }
+
+
+ // Load the checkerboard text image
+ mTexAssets.checkerBoard.reset(createTextureFromPng(
+ "/system/etc/automotive/evs/LabeledChecker.png"));
+ if (!mTexAssets.checkerBoard->glId()) {
+ ALOGE("Failed to load checkerboard texture");
+ return false;
+ }
+
+ // Load the car image
+ mTexAssets.carTopView.reset(createTextureFromPng(
+ "/system/etc/automotive/evs/CarFromTop.png"));
+ if (!mTexAssets.carTopView->glId()) {
+ ALOGE("Failed to load carTopView texture");
+ return false;
+ }
+
+
+ // Set up streaming video textures for our associated cameras
+ for (auto&& cam: mActiveCameras) {
+ cam.tex.reset(createVideoTexture(mEnumerator, cam.info.cameraId.c_str(), sDisplay));
+ if (!cam.tex) {
+ ALOGE("Failed to set up video texture for %s (%s)",
+ cam.info.cameraId.c_str(), cam.info.function.c_str());
+// TODO: For production use, we may actually want to fail in this case, but not yet...
+// return false;
+ }
+ }
+
+ return true;
+}
+
+
+void RenderTopView::deactivate() {
+ // Release our video textures
+ // We can't hold onto it because some other Render object might need the same camera
+ // TODO: If start/stop costs become a problem, we could share video textures
+ for (auto&& cam: mActiveCameras) {
+ cam.tex = nullptr;
+ }
+}
+
+
+bool RenderTopView::drawFrame(const BufferDesc& tgtBuffer) {
+ // Tell GL to render to the given buffer
+ if (!attachRenderTarget(tgtBuffer)) {
+ ALOGE("Failed to attached render target");
+ return false;
+ }
+
+ // Set up our top down projection matrix from car space (world units, Xfwd, Yright, Zup)
+ // to view space (-1 to 1)
+ const float top = mConfig.getDisplayTopLocation();
+ const float bottom = mConfig.getDisplayBottomLocation();
+ const float right = mConfig.getDisplayRightLocation(sAspectRatio);
+ const float left = mConfig.getDisplayLeftLocation(sAspectRatio);
+
+ const float near = 10.0f; // arbitrary top of view volume
+ const float far = 0.0f; // ground plane is at zero
+
+ // We can use a simple, unrotated ortho view since the screen and car space axis are
+ // naturally aligned in the top down view.
+ // TODO: Not sure if flipping top/bottom here is "correct" or a double reverse...
+// orthoMatrix = android::mat4::ortho(left, right, bottom, top, near, far);
+ orthoMatrix = android::mat4::ortho(left, right, top, bottom, near, far);
+
+
+ // Refresh our video texture contents. We do it all at once in hopes of getting
+ // better coherence among images. This does not guarantee synchronization, of course...
+ for (auto&& cam: mActiveCameras) {
+ if (cam.tex) {
+ cam.tex->refresh();
+ }
+ }
+
+ // Iterate over all the cameras and project their images onto the ground plane
+ for (auto&& cam: mActiveCameras) {
+ renderCameraOntoGroundPlane(cam);
+ }
+
+ // Draw the car image
+ renderCarTopView();
+
+ // Wait for the rendering to finish
+ glFinish();
+
+ return true;
+}
+
+
+//
+// Responsible for drawing the car's self image in the top down view.
+// Draws in car model space (units of meters with origin at center of rear axel)
+// NOTE: We probably want to eventually switch to using a VertexArray based model system.
+//
+void RenderTopView::renderCarTopView() {
+ // Compute the corners of our image footprint in car space
+ const float carLengthInTexels = mConfig.carGraphicRearPixel() - mConfig.carGraphicFrontPixel();
+ const float carSpaceUnitsPerTexel = mConfig.getCarLength() / carLengthInTexels;
+ const float textureHeightInCarSpace = mTexAssets.carTopView->height() * carSpaceUnitsPerTexel;
+ const float textureAspectRatio = (float)mTexAssets.carTopView->width() /
+ mTexAssets.carTopView->height();
+ const float pixelsBehindCarInImage = mTexAssets.carTopView->height() -
+ mConfig.carGraphicRearPixel();
+ const float textureExtentBehindCarInCarSpace = pixelsBehindCarInImage * carSpaceUnitsPerTexel;
+
+ const float btCS = mConfig.getRearLocation() - textureExtentBehindCarInCarSpace;
+ const float tpCS = textureHeightInCarSpace + btCS;
+ const float ltCS = 0.5f * textureHeightInCarSpace * textureAspectRatio;
+ const float rtCS = -ltCS;
+
+ GLfloat vertsCarPos[] = { ltCS, tpCS, 0.0f, // left top in car space
+ rtCS, tpCS, 0.0f, // right top
+ ltCS, btCS, 0.0f, // left bottom
+ rtCS, btCS, 0.0f // right bottom
+ };
+ // NOTE: We didn't flip the image in the texture, so V=0 is actually the top of the image
+ GLfloat vertsCarTex[] = { 0.0f, 0.0f, // left top
+ 1.0f, 0.0f, // right top
+ 0.0f, 1.0f, // left bottom
+ 1.0f, 1.0f // right bottom
+ };
+ glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertsCarPos);
+ glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, vertsCarTex);
+ glEnableVertexAttribArray(0);
+ glEnableVertexAttribArray(1);
+
+
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+
+ glUseProgram(mPgmAssets.simpleTexture);
+ GLint loc = glGetUniformLocation(mPgmAssets.simpleTexture, "cameraMat");
+ glUniformMatrix4fv(loc, 1, false, orthoMatrix.asArray());
+ glBindTexture(GL_TEXTURE_2D, mTexAssets.carTopView->glId());
+
+ printf("top view tex=%u\n", mTexAssets.carTopView->glId());
+
+ glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
+
+
+ glDisable(GL_BLEND);
+
+ glDisableVertexAttribArray(0);
+ glDisableVertexAttribArray(1);
+}
+
+
+// NOTE: Might be worth reviewing the ideas at
+// http://math.stackexchange.com/questions/1691895/inverse-of-perspective-matrix
+// to see if that simplifies the math, although we'll still want to compute the actual ground
+// interception points taking into account the pitchLimit as below.
+void RenderTopView::renderCameraOntoGroundPlane(const ActiveCamera& cam) {
+ // How far is the farthest any camera should even consider projecting it's image?
+ const float visibleSizeV = mConfig.getDisplayTopLocation() - mConfig.getDisplayBottomLocation();
+ const float visibleSizeH = visibleSizeV * sAspectRatio;
+ const float maxRange = (visibleSizeH > visibleSizeV) ? visibleSizeH : visibleSizeV;
+
+ // Construct the projection matrix (View + Projection) associated with this sensor
+ // TODO: Consider just hard coding the far plane distance as it likely doesn't matter
+ const android::mat4 V = cameraLookMatrix(cam.info);
+ const android::mat4 P = perspective(cam.info.hfov, cam.info.vfov, cam.info.position[Z], maxRange);
+ const android::mat4 projectionMatix = P*V;
+
+ // Just draw the whole darn ground plane for now -- we're wasting fill rate, but so what?
+ // A 2x optimization would be to draw only the 1/2 space of the window in the direction
+ // the sensor is facing. A more complex solution would be to construct the intersection
+ // of the sensor volume with the ground plane and render only that geometry.
+ const float top = mConfig.getDisplayTopLocation();
+ const float bottom = mConfig.getDisplayBottomLocation();
+ const float wsHeight = top - bottom;
+ const float wsWidth = wsHeight * sAspectRatio;
+ const float right = wsWidth * 0.5f;
+ const float left = -right;
+
+ const android::vec3 topLeft(left, top, 0.0f);
+ const android::vec3 topRight(right, top, 0.0f);
+ const android::vec3 botLeft(left, bottom, 0.0f);
+ const android::vec3 botRight(right, bottom, 0.0f);
+
+ GLfloat vertsPos[] = { topLeft[X], topLeft[Y], topLeft[Z],
+ topRight[X], topRight[Y], topRight[Z],
+ botLeft[X], botLeft[Y], botLeft[Z],
+ botRight[X], botRight[Y], botRight[Z],
+ };
+ glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertsPos);
+ glEnableVertexAttribArray(0);
+
+
+ glDisable(GL_BLEND);
+
+ glUseProgram(mPgmAssets.projectedTexture);
+ GLint locCam = glGetUniformLocation(mPgmAssets.projectedTexture, "cameraMat");
+ glUniformMatrix4fv(locCam, 1, false, orthoMatrix.asArray());
+ GLint locProj = glGetUniformLocation(mPgmAssets.projectedTexture, "projectionMat");
+ glUniformMatrix4fv(locProj, 1, false, projectionMatix.asArray());
+
+ GLuint texId;
+ if (cam.tex) {
+ texId = cam.tex->glId();
+ } else {
+ texId = mTexAssets.checkerBoard->glId();
+ }
+ printf("projected tex=%u\n", texId);
+ glBindTexture(GL_TEXTURE_2D, texId);
+
+ glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
+ // glDrawArrays(GL_LINE_STRIP, 0, 4);
+
+
+ glDisableVertexAttribArray(0);
+}
diff --git a/evs/app/RenderTopView.h b/evs/app/RenderTopView.h
new file mode 100644
index 0000000..570718f
--- /dev/null
+++ b/evs/app/RenderTopView.h
@@ -0,0 +1,76 @@
+
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAR_EVS_APP_RENDERTOPVIEW_H
+#define CAR_EVS_APP_RENDERTOPVIEW_H
+
+
+#include "RenderBase.h"
+
+#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+#include "ConfigManager.h"
+#include "VideoTex.h"
+#include <math/mat4.h>
+
+
+using namespace ::android::hardware::automotive::evs::V1_0;
+
+
+/*
+ * Combines the views from all available cameras into one reprojected top down view.
+ */
+class RenderTopView: public RenderBase {
+public:
+ RenderTopView(sp<IEvsEnumerator> enumerator,
+ const std::vector<ConfigManager::CameraInfo>& camList,
+ const ConfigManager& config);
+
+ virtual bool activate() override;
+ virtual void deactivate() override;
+
+ virtual bool drawFrame(const BufferDesc& tgtBuffer);
+
+protected:
+ struct ActiveCamera {
+ const ConfigManager::CameraInfo& info;
+ std::unique_ptr<VideoTex> tex;
+
+ ActiveCamera(const ConfigManager::CameraInfo& c) : info(c) {};
+ };
+
+ void renderCarTopView();
+ void renderCameraOntoGroundPlane(const ActiveCamera& cam);
+
+ sp<IEvsEnumerator> mEnumerator;
+ const ConfigManager& mConfig;
+ std::vector<ActiveCamera> mActiveCameras;
+
+ struct {
+ std::unique_ptr<TexWrapper> checkerBoard;
+ std::unique_ptr<TexWrapper> carTopView;
+ } mTexAssets;
+
+ struct {
+ GLuint simpleTexture;
+ GLuint projectedTexture;
+ } mPgmAssets;
+
+ android::mat4 orthoMatrix;
+};
+
+
+#endif //CAR_EVS_APP_RENDERTOPVIEW_H
diff --git a/evs/app/config.json b/evs/app/config.json
index 7ba447d..dedac44 100644
--- a/evs/app/config.json
+++ b/evs/app/config.json
@@ -16,18 +16,18 @@
"cameras" : [
{
"cameraId" : "/dev/video45",
- "function" : "reverse",
+ "function" : "reverse,park",
"x" : 0.0,
"y" : -40.0,
"z" : 48,
"yaw" : 180,
- "pitch" : -10,
- "hfov" : 60,
- "vfov" : 42
+ "pitch" : -30,
+ "hfov" : 90,
+ "vfov" : 60
},
{
"cameraId" : "/dev/video1",
- "function" : "front",
+ "function" : "front,park",
"x" : 0.0,
"y" : 100.0,
"z" : 48,
@@ -37,13 +37,13 @@
"vfov" : 42
},
{
- "cameraId" : "backup",
- "function" : "reverse",
- "x" : 0.0,
- "y" : 100.0,
- "z" : 48,
- "yaw" : 0,
- "pitch" : -10,
+ "cameraId" : "/dev/video0",
+ "function" : "right,park",
+ "x" : 36.0,
+ "y" : 60.0,
+ "z" : 32,
+ "yaw" : -90,
+ "pitch" : -30,
"hfov" : 60,
"vfov" : 42
}
diff --git a/evs/app/shader_testColors.h b/evs/app/shader_testColors.h
deleted file mode 100644
index 20d73be..0000000
--- a/evs/app/shader_testColors.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SHADER_TESTCOLORS_H
-#define SHADER_TESTCOLORS_H
-
-const char vtxShader_testColors[] =
- "#version 300 es \n"
- "layout(location = 0) in vec4 pos; \n"
- "out vec2 uv; \n"
- "void main() \n"
- "{ \n"
- " gl_Position = pos; \n"
- " // using the screen space position as the UV coordinates\n"
- " uv = pos.xy * 0.5f + 0.5f; \n"
- "} \n";
-
-const char pixShader_testColors[] =
- "#version 300 es \n"
- "precision mediump float; \n"
- "uniform sampler2D tex; \n"
- "in vec2 uv; \n"
- "out vec4 color; \n"
- "void main() \n"
- "{ \n"
- " // R, G, B, A \n"
- " color = vec4(0.5, 1.0, 0.5, 1.0); \n"
- " color.r = uv.x; \n"
- " color.b = uv.y; \n"
- "} \n";
-
-#endif // SHADER_TESTCOLORS_H
\ No newline at end of file