Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "RenderTopView.h" |
| 18 | #include "VideoTex.h" |
| 19 | #include "glError.h" |
| 20 | #include "shader.h" |
| 21 | #include "shader_simpleTex.h" |
| 22 | #include "shader_projectedTex.h" |
| 23 | |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 24 | #include <math/mat4.h> |
| 25 | #include <math/vec3.h> |
Changyeon Jo | aab96aa | 2019-10-12 05:24:15 -0700 | [diff] [blame] | 26 | #include <android/hardware/camera/device/3.2/ICameraDevice.h> |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 27 | #include <android-base/logging.h> |
Changyeon Jo | aab96aa | 2019-10-12 05:24:15 -0700 | [diff] [blame] | 28 | |
| 29 | using ::android::hardware::camera::device::V3_2::Stream; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 30 | |
| 31 | |
| 32 | // Simple aliases to make geometric math using vectors more readable |
| 33 | static const unsigned X = 0; |
| 34 | static const unsigned Y = 1; |
| 35 | static const unsigned Z = 2; |
| 36 | //static const unsigned W = 3; |
| 37 | |
| 38 | |
| 39 | // Since we assume no roll in these views, we can simplify the required math |
| 40 | static android::vec3 unitVectorFromPitchAndYaw(float pitch, float yaw) { |
| 41 | float sinPitch, cosPitch; |
| 42 | sincosf(pitch, &sinPitch, &cosPitch); |
| 43 | float sinYaw, cosYaw; |
| 44 | sincosf(yaw, &sinYaw, &cosYaw); |
| 45 | return android::vec3(cosPitch * -sinYaw, |
| 46 | cosPitch * cosYaw, |
| 47 | sinPitch); |
| 48 | } |
| 49 | |
| 50 | |
| 51 | // Helper function to set up a perspective matrix with independent horizontal and vertical |
| 52 | // angles of view. |
| 53 | static android::mat4 perspective(float hfov, float vfov, float near, float far) { |
| 54 | const float tanHalfFovX = tanf(hfov * 0.5f); |
| 55 | const float tanHalfFovY = tanf(vfov * 0.5f); |
| 56 | |
| 57 | android::mat4 p(0.0f); |
| 58 | p[0][0] = 1.0f / tanHalfFovX; |
| 59 | p[1][1] = 1.0f / tanHalfFovY; |
| 60 | p[2][2] = - (far + near) / (far - near); |
| 61 | p[2][3] = -1.0f; |
| 62 | p[3][2] = - (2.0f * far * near) / (far - near); |
| 63 | return p; |
| 64 | } |
| 65 | |
| 66 | |
| 67 | // Helper function to set up a view matrix for a camera given it's yaw & pitch & location |
| 68 | // Yes, with a bit of work, we could use lookAt, but it does a lot of extra work |
| 69 | // internally that we can short cut. |
| 70 | static android::mat4 cameraLookMatrix(const ConfigManager::CameraInfo& cam) { |
| 71 | float sinYaw, cosYaw; |
| 72 | sincosf(cam.yaw, &sinYaw, &cosYaw); |
| 73 | |
| 74 | // Construct principal unit vectors |
| 75 | android::vec3 vAt = unitVectorFromPitchAndYaw(cam.pitch, cam.yaw); |
| 76 | android::vec3 vRt = android::vec3(cosYaw, sinYaw, 0.0f); |
| 77 | android::vec3 vUp = -cross(vAt, vRt); |
| 78 | android::vec3 eye = android::vec3(cam.position[X], cam.position[Y], cam.position[Z]); |
| 79 | |
| 80 | android::mat4 Result(1.0f); |
| 81 | Result[0][0] = vRt.x; |
| 82 | Result[1][0] = vRt.y; |
| 83 | Result[2][0] = vRt.z; |
| 84 | Result[0][1] = vUp.x; |
| 85 | Result[1][1] = vUp.y; |
| 86 | Result[2][1] = vUp.z; |
| 87 | Result[0][2] =-vAt.x; |
| 88 | Result[1][2] =-vAt.y; |
| 89 | Result[2][2] =-vAt.z; |
| 90 | Result[3][0] =-dot(vRt, eye); |
| 91 | Result[3][1] =-dot(vUp, eye); |
| 92 | Result[3][2] = dot(vAt, eye); |
| 93 | return Result; |
| 94 | } |
| 95 | |
| 96 | |
| 97 | RenderTopView::RenderTopView(sp<IEvsEnumerator> enumerator, |
| 98 | const std::vector<ConfigManager::CameraInfo>& camList, |
| 99 | const ConfigManager& mConfig) : |
| 100 | mEnumerator(enumerator), |
| 101 | mConfig(mConfig) { |
| 102 | |
| 103 | // Copy the list of cameras we're to employ into our local storage. We'll create and |
| 104 | // associate a streaming video texture when we are activated. |
| 105 | mActiveCameras.reserve(camList.size()); |
| 106 | for (unsigned i=0; i<camList.size(); i++) { |
| 107 | mActiveCameras.emplace_back(camList[i]); |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | |
| 112 | bool RenderTopView::activate() { |
| 113 | // Ensure GL is ready to go... |
| 114 | if (!prepareGL()) { |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 115 | LOG(ERROR) << "Error initializing GL"; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 116 | return false; |
| 117 | } |
| 118 | |
| 119 | // Load our shader programs |
| 120 | mPgmAssets.simpleTexture = buildShaderProgram(vtxShader_simpleTexture, |
| 121 | pixShader_simpleTexture, |
| 122 | "simpleTexture"); |
| 123 | if (!mPgmAssets.simpleTexture) { |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 124 | LOG(ERROR) << "Failed to build shader program"; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 125 | return false; |
| 126 | } |
| 127 | mPgmAssets.projectedTexture = buildShaderProgram(vtxShader_projectedTexture, |
| 128 | pixShader_projectedTexture, |
| 129 | "projectedTexture"); |
| 130 | if (!mPgmAssets.projectedTexture) { |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 131 | LOG(ERROR) << "Failed to build shader program"; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 132 | return false; |
| 133 | } |
| 134 | |
| 135 | |
| 136 | // Load the checkerboard text image |
| 137 | mTexAssets.checkerBoard.reset(createTextureFromPng( |
| 138 | "/system/etc/automotive/evs/LabeledChecker.png")); |
Scott Randolph | 2060b45 | 2017-10-17 18:48:29 +0100 | [diff] [blame] | 139 | if (!mTexAssets.checkerBoard) { |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 140 | LOG(ERROR) << "Failed to load checkerboard texture"; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 141 | return false; |
| 142 | } |
| 143 | |
| 144 | // Load the car image |
| 145 | mTexAssets.carTopView.reset(createTextureFromPng( |
| 146 | "/system/etc/automotive/evs/CarFromTop.png")); |
Scott Randolph | 2060b45 | 2017-10-17 18:48:29 +0100 | [diff] [blame] | 147 | if (!mTexAssets.carTopView) { |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 148 | LOG(ERROR) << "Failed to load carTopView texture"; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 149 | return false; |
| 150 | } |
| 151 | |
| 152 | |
| 153 | // Set up streaming video textures for our associated cameras |
| 154 | for (auto&& cam: mActiveCameras) { |
Changyeon Jo | aab96aa | 2019-10-12 05:24:15 -0700 | [diff] [blame] | 155 | cam.tex.reset(createVideoTexture(mEnumerator, |
| 156 | cam.info.cameraId.c_str(), |
| 157 | nullptr, |
| 158 | sDisplay)); |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 159 | if (!cam.tex) { |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 160 | LOG(ERROR) << "Failed to set up video texture for " << cam.info.cameraId |
| 161 | << " (" << cam.info.function << ")"; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 162 | // TODO: For production use, we may actually want to fail in this case, but not yet... |
| 163 | // return false; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | return true; |
| 168 | } |
| 169 | |
| 170 | |
| 171 | void RenderTopView::deactivate() { |
| 172 | // Release our video textures |
| 173 | // We can't hold onto it because some other Render object might need the same camera |
Haoxiang Li | 43ee1e8 | 2019-05-08 17:16:50 -0700 | [diff] [blame] | 174 | // TODO(b/131492626): investigate whether sharing video textures can save |
| 175 | // the time. |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 176 | for (auto&& cam: mActiveCameras) { |
| 177 | cam.tex = nullptr; |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | |
| 182 | bool RenderTopView::drawFrame(const BufferDesc& tgtBuffer) { |
| 183 | // Tell GL to render to the given buffer |
| 184 | if (!attachRenderTarget(tgtBuffer)) { |
Changyeon Jo | ffdf3db | 2020-03-06 15:23:02 -0800 | [diff] [blame] | 185 | LOG(ERROR) << "Failed to attached render target"; |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 186 | return false; |
| 187 | } |
| 188 | |
| 189 | // Set up our top down projection matrix from car space (world units, Xfwd, Yright, Zup) |
| 190 | // to view space (-1 to 1) |
| 191 | const float top = mConfig.getDisplayTopLocation(); |
| 192 | const float bottom = mConfig.getDisplayBottomLocation(); |
| 193 | const float right = mConfig.getDisplayRightLocation(sAspectRatio); |
| 194 | const float left = mConfig.getDisplayLeftLocation(sAspectRatio); |
| 195 | |
| 196 | const float near = 10.0f; // arbitrary top of view volume |
| 197 | const float far = 0.0f; // ground plane is at zero |
| 198 | |
| 199 | // We can use a simple, unrotated ortho view since the screen and car space axis are |
| 200 | // naturally aligned in the top down view. |
| 201 | // TODO: Not sure if flipping top/bottom here is "correct" or a double reverse... |
| 202 | // orthoMatrix = android::mat4::ortho(left, right, bottom, top, near, far); |
| 203 | orthoMatrix = android::mat4::ortho(left, right, top, bottom, near, far); |
| 204 | |
| 205 | |
| 206 | // Refresh our video texture contents. We do it all at once in hopes of getting |
| 207 | // better coherence among images. This does not guarantee synchronization, of course... |
| 208 | for (auto&& cam: mActiveCameras) { |
| 209 | if (cam.tex) { |
| 210 | cam.tex->refresh(); |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | // Iterate over all the cameras and project their images onto the ground plane |
| 215 | for (auto&& cam: mActiveCameras) { |
| 216 | renderCameraOntoGroundPlane(cam); |
| 217 | } |
| 218 | |
| 219 | // Draw the car image |
| 220 | renderCarTopView(); |
| 221 | |
Scott Randolph | 788109f | 2018-07-12 13:10:36 -0700 | [diff] [blame] | 222 | // Now that everythign is submitted, release our hold on the texture resource |
| 223 | detachRenderTarget(); |
| 224 | |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 225 | // Wait for the rendering to finish |
| 226 | glFinish(); |
min.yun | 0a07980 | 2018-04-25 16:12:37 +0900 | [diff] [blame] | 227 | detachRenderTarget(); |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 228 | return true; |
| 229 | } |
| 230 | |
| 231 | |
| 232 | // |
| 233 | // Responsible for drawing the car's self image in the top down view. |
| 234 | // Draws in car model space (units of meters with origin at center of rear axel) |
| 235 | // NOTE: We probably want to eventually switch to using a VertexArray based model system. |
| 236 | // |
| 237 | void RenderTopView::renderCarTopView() { |
| 238 | // Compute the corners of our image footprint in car space |
| 239 | const float carLengthInTexels = mConfig.carGraphicRearPixel() - mConfig.carGraphicFrontPixel(); |
| 240 | const float carSpaceUnitsPerTexel = mConfig.getCarLength() / carLengthInTexels; |
| 241 | const float textureHeightInCarSpace = mTexAssets.carTopView->height() * carSpaceUnitsPerTexel; |
| 242 | const float textureAspectRatio = (float)mTexAssets.carTopView->width() / |
| 243 | mTexAssets.carTopView->height(); |
| 244 | const float pixelsBehindCarInImage = mTexAssets.carTopView->height() - |
| 245 | mConfig.carGraphicRearPixel(); |
| 246 | const float textureExtentBehindCarInCarSpace = pixelsBehindCarInImage * carSpaceUnitsPerTexel; |
| 247 | |
| 248 | const float btCS = mConfig.getRearLocation() - textureExtentBehindCarInCarSpace; |
| 249 | const float tpCS = textureHeightInCarSpace + btCS; |
| 250 | const float ltCS = 0.5f * textureHeightInCarSpace * textureAspectRatio; |
| 251 | const float rtCS = -ltCS; |
| 252 | |
| 253 | GLfloat vertsCarPos[] = { ltCS, tpCS, 0.0f, // left top in car space |
| 254 | rtCS, tpCS, 0.0f, // right top |
| 255 | ltCS, btCS, 0.0f, // left bottom |
| 256 | rtCS, btCS, 0.0f // right bottom |
| 257 | }; |
| 258 | // NOTE: We didn't flip the image in the texture, so V=0 is actually the top of the image |
| 259 | GLfloat vertsCarTex[] = { 0.0f, 0.0f, // left top |
| 260 | 1.0f, 0.0f, // right top |
| 261 | 0.0f, 1.0f, // left bottom |
| 262 | 1.0f, 1.0f // right bottom |
| 263 | }; |
| 264 | glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertsCarPos); |
| 265 | glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, vertsCarTex); |
| 266 | glEnableVertexAttribArray(0); |
| 267 | glEnableVertexAttribArray(1); |
| 268 | |
| 269 | |
| 270 | glEnable(GL_BLEND); |
| 271 | glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); |
| 272 | |
| 273 | glUseProgram(mPgmAssets.simpleTexture); |
| 274 | GLint loc = glGetUniformLocation(mPgmAssets.simpleTexture, "cameraMat"); |
| 275 | glUniformMatrix4fv(loc, 1, false, orthoMatrix.asArray()); |
| 276 | glBindTexture(GL_TEXTURE_2D, mTexAssets.carTopView->glId()); |
| 277 | |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 278 | glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); |
| 279 | |
| 280 | |
| 281 | glDisable(GL_BLEND); |
| 282 | |
| 283 | glDisableVertexAttribArray(0); |
| 284 | glDisableVertexAttribArray(1); |
| 285 | } |
| 286 | |
| 287 | |
| 288 | // NOTE: Might be worth reviewing the ideas at |
| 289 | // http://math.stackexchange.com/questions/1691895/inverse-of-perspective-matrix |
| 290 | // to see if that simplifies the math, although we'll still want to compute the actual ground |
| 291 | // interception points taking into account the pitchLimit as below. |
| 292 | void RenderTopView::renderCameraOntoGroundPlane(const ActiveCamera& cam) { |
| 293 | // How far is the farthest any camera should even consider projecting it's image? |
| 294 | const float visibleSizeV = mConfig.getDisplayTopLocation() - mConfig.getDisplayBottomLocation(); |
| 295 | const float visibleSizeH = visibleSizeV * sAspectRatio; |
| 296 | const float maxRange = (visibleSizeH > visibleSizeV) ? visibleSizeH : visibleSizeV; |
| 297 | |
| 298 | // Construct the projection matrix (View + Projection) associated with this sensor |
| 299 | // TODO: Consider just hard coding the far plane distance as it likely doesn't matter |
| 300 | const android::mat4 V = cameraLookMatrix(cam.info); |
| 301 | const android::mat4 P = perspective(cam.info.hfov, cam.info.vfov, cam.info.position[Z], maxRange); |
| 302 | const android::mat4 projectionMatix = P*V; |
| 303 | |
| 304 | // Just draw the whole darn ground plane for now -- we're wasting fill rate, but so what? |
| 305 | // A 2x optimization would be to draw only the 1/2 space of the window in the direction |
| 306 | // the sensor is facing. A more complex solution would be to construct the intersection |
| 307 | // of the sensor volume with the ground plane and render only that geometry. |
| 308 | const float top = mConfig.getDisplayTopLocation(); |
| 309 | const float bottom = mConfig.getDisplayBottomLocation(); |
| 310 | const float wsHeight = top - bottom; |
| 311 | const float wsWidth = wsHeight * sAspectRatio; |
| 312 | const float right = wsWidth * 0.5f; |
| 313 | const float left = -right; |
| 314 | |
| 315 | const android::vec3 topLeft(left, top, 0.0f); |
| 316 | const android::vec3 topRight(right, top, 0.0f); |
| 317 | const android::vec3 botLeft(left, bottom, 0.0f); |
| 318 | const android::vec3 botRight(right, bottom, 0.0f); |
| 319 | |
| 320 | GLfloat vertsPos[] = { topLeft[X], topLeft[Y], topLeft[Z], |
| 321 | topRight[X], topRight[Y], topRight[Z], |
| 322 | botLeft[X], botLeft[Y], botLeft[Z], |
| 323 | botRight[X], botRight[Y], botRight[Z], |
| 324 | }; |
| 325 | glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertsPos); |
| 326 | glEnableVertexAttribArray(0); |
| 327 | |
| 328 | |
| 329 | glDisable(GL_BLEND); |
| 330 | |
| 331 | glUseProgram(mPgmAssets.projectedTexture); |
| 332 | GLint locCam = glGetUniformLocation(mPgmAssets.projectedTexture, "cameraMat"); |
| 333 | glUniformMatrix4fv(locCam, 1, false, orthoMatrix.asArray()); |
| 334 | GLint locProj = glGetUniformLocation(mPgmAssets.projectedTexture, "projectionMat"); |
| 335 | glUniformMatrix4fv(locProj, 1, false, projectionMatix.asArray()); |
| 336 | |
| 337 | GLuint texId; |
| 338 | if (cam.tex) { |
| 339 | texId = cam.tex->glId(); |
| 340 | } else { |
| 341 | texId = mTexAssets.checkerBoard->glId(); |
| 342 | } |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 343 | glBindTexture(GL_TEXTURE_2D, texId); |
| 344 | |
| 345 | glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); |
Scott Randolph | 4a83b5d | 2017-05-16 17:42:19 -0700 | [diff] [blame] | 346 | |
| 347 | |
| 348 | glDisableVertexAttribArray(0); |
| 349 | } |