| jvanverth@google.com | 8ffb704 | 2012-12-13 19:53:18 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #define LOG_TAG "PathRenderer" |
| 9 | #define LOG_NDEBUG 1 |
| 10 | #define ATRACE_TAG ATRACE_TAG_GRAPHICS |
| 11 | |
| 12 | #define VERTEX_DEBUG 0 |
| 13 | |
| 14 | #include <SkPath.h> |
| 15 | #include <SkPaint.h> |
| 16 | |
| 17 | #include <stdlib.h> |
| 18 | #include <stdint.h> |
| 19 | #include <sys/types.h> |
| 20 | |
| 21 | #include <utils/Log.h> |
| 22 | #include <utils/Trace.h> |
| 23 | |
| 24 | #include "PathRenderer.h" |
| 25 | #include "Matrix.h" |
| 26 | #include "Vector.h" |
| 27 | #include "Vertex.h" |
| 28 | |
| 29 | namespace android { |
| 30 | namespace uirenderer { |
| 31 | |
| 32 | #define THRESHOLD 0.5f |
| 33 | |
| 34 | SkRect PathRenderer::computePathBounds(const SkPath& path, const SkPaint* paint) { |
| 35 | SkRect bounds = path.getBounds(); |
| 36 | if (paint->getStyle() != SkPaint::kFill_Style) { |
| 37 | float outset = paint->getStrokeWidth() * 0.5f; |
| 38 | bounds.outset(outset, outset); |
| 39 | } |
| 40 | return bounds; |
| 41 | } |
| 42 | |
| 43 | void computeInverseScales(const mat4 *transform, float &inverseScaleX, float& inverseScaleY) { |
| 44 | if (CC_UNLIKELY(!transform->isPureTranslate())) { |
| 45 | float m00 = transform->data[Matrix4::kScaleX]; |
| 46 | float m01 = transform->data[Matrix4::kSkewY]; |
| 47 | float m10 = transform->data[Matrix4::kSkewX]; |
| 48 | float m11 = transform->data[Matrix4::kScaleY]; |
| 49 | float scaleX = sqrt(m00 * m00 + m01 * m01); |
| 50 | float scaleY = sqrt(m10 * m10 + m11 * m11); |
| 51 | inverseScaleX = (scaleX != 0) ? (1.0f / scaleX) : 1.0f; |
| 52 | inverseScaleY = (scaleY != 0) ? (1.0f / scaleY) : 1.0f; |
| 53 | } else { |
| 54 | inverseScaleX = 1.0f; |
| 55 | inverseScaleY = 1.0f; |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | inline void copyVertex(Vertex* destPtr, const Vertex* srcPtr) { |
| 60 | Vertex::set(destPtr, srcPtr->position[0], srcPtr->position[1]); |
| 61 | } |
| 62 | |
| 63 | inline void copyAlphaVertex(AlphaVertex* destPtr, const AlphaVertex* srcPtr) { |
| 64 | AlphaVertex::set(destPtr, srcPtr->position[0], srcPtr->position[1], srcPtr->alpha); |
| 65 | } |
| 66 | |
| 67 | /** |
| 68 | * Produces a pseudo-normal for a vertex, given the normals of the two incoming lines. If the offset |
| 69 | * from each vertex in a perimeter is calculated, the resultant lines connecting the offset vertices |
| 70 | * will be offset by 1.0 |
| 71 | * |
| 72 | * Note that we can't add and normalize the two vectors, that would result in a rectangle having an |
| 73 | * offset of (sqrt(2)/2, sqrt(2)/2) at each corner, instead of (1, 1) |
| 74 | * |
| 75 | * NOTE: assumes angles between normals 90 degrees or less |
| 76 | */ |
| 77 | inline vec2 totalOffsetFromNormals(const vec2& normalA, const vec2& normalB) { |
| 78 | return (normalA + normalB) / (1 + fabs(normalA.dot(normalB))); |
| 79 | } |
| 80 | |
| 81 | inline void scaleOffsetForStrokeWidth(vec2& offset, float halfStrokeWidth, |
| 82 | float inverseScaleX, float inverseScaleY) { |
| 83 | if (halfStrokeWidth == 0.0f) { |
| 84 | // hairline - compensate for scale |
| 85 | offset.x *= 0.5f * inverseScaleX; |
| 86 | offset.y *= 0.5f * inverseScaleY; |
| 87 | } else { |
| 88 | offset *= halfStrokeWidth; |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | void getFillVerticesFromPerimeter(const Vector<Vertex>& perimeter, VertexBuffer& vertexBuffer) { |
| 93 | Vertex* buffer = vertexBuffer.alloc<Vertex>(perimeter.size()); |
| 94 | |
| 95 | int currentIndex = 0; |
| 96 | // zig zag between all previous points on the inside of the hull to create a |
| 97 | // triangle strip that fills the hull |
| 98 | int srcAindex = 0; |
| 99 | int srcBindex = perimeter.size() - 1; |
| 100 | while (srcAindex <= srcBindex) { |
| 101 | copyVertex(&buffer[currentIndex++], &perimeter[srcAindex]); |
| 102 | if (srcAindex == srcBindex) break; |
| 103 | copyVertex(&buffer[currentIndex++], &perimeter[srcBindex]); |
| 104 | srcAindex++; |
| 105 | srcBindex--; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | void getStrokeVerticesFromPerimeter(const Vector<Vertex>& perimeter, float halfStrokeWidth, |
| 110 | VertexBuffer& vertexBuffer, float inverseScaleX, float inverseScaleY) { |
| 111 | Vertex* buffer = vertexBuffer.alloc<Vertex>(perimeter.size() * 2 + 2); |
| 112 | |
| 113 | int currentIndex = 0; |
| 114 | const Vertex* last = &(perimeter[perimeter.size() - 1]); |
| 115 | const Vertex* current = &(perimeter[0]); |
| 116 | vec2 lastNormal(current->position[1] - last->position[1], |
| 117 | last->position[0] - current->position[0]); |
| 118 | lastNormal.normalize(); |
| 119 | for (unsigned int i = 0; i < perimeter.size(); i++) { |
| 120 | const Vertex* next = &(perimeter[i + 1 >= perimeter.size() ? 0 : i + 1]); |
| 121 | vec2 nextNormal(next->position[1] - current->position[1], |
| 122 | current->position[0] - next->position[0]); |
| 123 | nextNormal.normalize(); |
| 124 | |
| 125 | vec2 totalOffset = totalOffsetFromNormals(lastNormal, nextNormal); |
| 126 | scaleOffsetForStrokeWidth(totalOffset, halfStrokeWidth, inverseScaleX, inverseScaleY); |
| 127 | |
| 128 | Vertex::set(&buffer[currentIndex++], |
| 129 | current->position[0] + totalOffset.x, |
| 130 | current->position[1] + totalOffset.y); |
| 131 | |
| 132 | Vertex::set(&buffer[currentIndex++], |
| 133 | current->position[0] - totalOffset.x, |
| 134 | current->position[1] - totalOffset.y); |
| 135 | |
| 136 | last = current; |
| 137 | current = next; |
| 138 | lastNormal = nextNormal; |
| 139 | } |
| 140 | |
| 141 | // wrap around to beginning |
| 142 | copyVertex(&buffer[currentIndex++], &buffer[0]); |
| 143 | copyVertex(&buffer[currentIndex++], &buffer[1]); |
| 144 | } |
| 145 | |
| 146 | void getStrokeVerticesFromUnclosedVertices(const Vector<Vertex>& vertices, float halfStrokeWidth, |
| 147 | VertexBuffer& vertexBuffer, float inverseScaleX, float inverseScaleY) { |
| 148 | Vertex* buffer = vertexBuffer.alloc<Vertex>(vertices.size() * 2); |
| 149 | |
| 150 | int currentIndex = 0; |
| 151 | const Vertex* current = &(vertices[0]); |
| 152 | vec2 lastNormal; |
| 153 | for (unsigned int i = 0; i < vertices.size() - 1; i++) { |
| 154 | const Vertex* next = &(vertices[i + 1]); |
| 155 | vec2 nextNormal(next->position[1] - current->position[1], |
| 156 | current->position[0] - next->position[0]); |
| 157 | nextNormal.normalize(); |
| 158 | |
| 159 | vec2 totalOffset; |
| 160 | if (i == 0) { |
| 161 | totalOffset = nextNormal; |
| 162 | } else { |
| 163 | totalOffset = totalOffsetFromNormals(lastNormal, nextNormal); |
| 164 | } |
| 165 | scaleOffsetForStrokeWidth(totalOffset, halfStrokeWidth, inverseScaleX, inverseScaleY); |
| 166 | |
| 167 | Vertex::set(&buffer[currentIndex++], |
| 168 | current->position[0] + totalOffset.x, |
| 169 | current->position[1] + totalOffset.y); |
| 170 | |
| 171 | Vertex::set(&buffer[currentIndex++], |
| 172 | current->position[0] - totalOffset.x, |
| 173 | current->position[1] - totalOffset.y); |
| 174 | |
| 175 | current = next; |
| 176 | lastNormal = nextNormal; |
| 177 | } |
| 178 | |
| 179 | vec2 totalOffset = lastNormal; |
| 180 | scaleOffsetForStrokeWidth(totalOffset, halfStrokeWidth, inverseScaleX, inverseScaleY); |
| 181 | |
| 182 | Vertex::set(&buffer[currentIndex++], |
| 183 | current->position[0] + totalOffset.x, |
| 184 | current->position[1] + totalOffset.y); |
| 185 | Vertex::set(&buffer[currentIndex++], |
| 186 | current->position[0] - totalOffset.x, |
| 187 | current->position[1] - totalOffset.y); |
| 188 | #if VERTEX_DEBUG |
| 189 | for (unsigned int i = 0; i < vertexBuffer.getSize(); i++) { |
| 190 | ALOGD("point at %f %f", buffer[i].position[0], buffer[i].position[1]); |
| 191 | } |
| 192 | #endif |
| 193 | } |
| 194 | |
| 195 | void getFillVerticesFromPerimeterAA(const Vector<Vertex>& perimeter, VertexBuffer& vertexBuffer, |
| 196 | float inverseScaleX, float inverseScaleY) { |
| 197 | AlphaVertex* buffer = vertexBuffer.alloc<AlphaVertex>(perimeter.size() * 3 + 2); |
| 198 | |
| 199 | // generate alpha points - fill Alpha vertex gaps in between each point with |
| 200 | // alpha 0 vertex, offset by a scaled normal. |
| 201 | int currentIndex = 0; |
| 202 | const Vertex* last = &(perimeter[perimeter.size() - 1]); |
| 203 | const Vertex* current = &(perimeter[0]); |
| 204 | vec2 lastNormal(current->position[1] - last->position[1], |
| 205 | last->position[0] - current->position[0]); |
| 206 | lastNormal.normalize(); |
| 207 | for (unsigned int i = 0; i < perimeter.size(); i++) { |
| 208 | const Vertex* next = &(perimeter[i + 1 >= perimeter.size() ? 0 : i + 1]); |
| 209 | vec2 nextNormal(next->position[1] - current->position[1], |
| 210 | current->position[0] - next->position[0]); |
| 211 | nextNormal.normalize(); |
| 212 | |
| 213 | // AA point offset from original point is that point's normal, such that each side is offset |
| 214 | // by .5 pixels |
| 215 | vec2 totalOffset = totalOffsetFromNormals(lastNormal, nextNormal); |
| 216 | totalOffset.x *= 0.5f * inverseScaleX; |
| 217 | totalOffset.y *= 0.5f * inverseScaleY; |
| 218 | |
| 219 | AlphaVertex::set(&buffer[currentIndex++], |
| 220 | current->position[0] + totalOffset.x, |
| 221 | current->position[1] + totalOffset.y, |
| 222 | 0.0f); |
| 223 | AlphaVertex::set(&buffer[currentIndex++], |
| 224 | current->position[0] - totalOffset.x, |
| 225 | current->position[1] - totalOffset.y, |
| 226 | 1.0f); |
| 227 | |
| 228 | last = current; |
| 229 | current = next; |
| 230 | lastNormal = nextNormal; |
| 231 | } |
| 232 | |
| 233 | // wrap around to beginning |
| 234 | copyAlphaVertex(&buffer[currentIndex++], &buffer[0]); |
| 235 | copyAlphaVertex(&buffer[currentIndex++], &buffer[1]); |
| 236 | |
| 237 | // zig zag between all previous points on the inside of the hull to create a |
| 238 | // triangle strip that fills the hull, repeating the first inner point to |
| 239 | // create degenerate tris to start inside path |
| 240 | int srcAindex = 0; |
| 241 | int srcBindex = perimeter.size() - 1; |
| 242 | while (srcAindex <= srcBindex) { |
| 243 | copyAlphaVertex(&buffer[currentIndex++], &buffer[srcAindex * 2 + 1]); |
| 244 | if (srcAindex == srcBindex) break; |
| 245 | copyAlphaVertex(&buffer[currentIndex++], &buffer[srcBindex * 2 + 1]); |
| 246 | srcAindex++; |
| 247 | srcBindex--; |
| 248 | } |
| 249 | |
| 250 | #if VERTEX_DEBUG |
| 251 | for (unsigned int i = 0; i < vertexBuffer.getSize(); i++) { |
| 252 | ALOGD("point at %f %f, alpha %f", buffer[i].position[0], buffer[i].position[1], buffer[i].alpha); |
| 253 | } |
| 254 | #endif |
| 255 | } |
| 256 | |
| 257 | |
| 258 | void getStrokeVerticesFromUnclosedVerticesAA(const Vector<Vertex>& vertices, float halfStrokeWidth, |
| 259 | VertexBuffer& vertexBuffer, float inverseScaleX, float inverseScaleY) { |
| 260 | AlphaVertex* buffer = vertexBuffer.alloc<AlphaVertex>(6 * vertices.size() + 2); |
| 261 | |
| 262 | // avoid lines smaller than hairline since they break triangle based sampling. instead reducing |
| 263 | // alpha value (TODO: support different X/Y scale) |
| 264 | float maxAlpha = 1.0f; |
| 265 | if (halfStrokeWidth != 0 && inverseScaleX == inverseScaleY && |
| 266 | halfStrokeWidth * inverseScaleX < 0.5f) { |
| 267 | maxAlpha *= (2 * halfStrokeWidth) / inverseScaleX; |
| 268 | halfStrokeWidth = 0.0f; |
| 269 | } |
| 270 | |
| 271 | // there is no outer/inner here, using them for consistency with below approach |
| 272 | int offset = 2 * (vertices.size() - 2); |
| 273 | int currentAAOuterIndex = 2; |
| 274 | int currentAAInnerIndex = 2 * offset + 5; // reversed |
| 275 | int currentStrokeIndex = currentAAInnerIndex + 7; |
| 276 | |
| 277 | const Vertex* last = &(vertices[0]); |
| 278 | const Vertex* current = &(vertices[1]); |
| 279 | vec2 lastNormal(current->position[1] - last->position[1], |
| 280 | last->position[0] - current->position[0]); |
| 281 | lastNormal.normalize(); |
| 282 | |
| 283 | { |
| 284 | // start cap |
| 285 | vec2 totalOffset = lastNormal; |
| 286 | vec2 AAOffset = totalOffset; |
| 287 | AAOffset.x *= 0.5f * inverseScaleX; |
| 288 | AAOffset.y *= 0.5f * inverseScaleY; |
| 289 | |
| 290 | vec2 innerOffset = totalOffset; |
| 291 | scaleOffsetForStrokeWidth(innerOffset, halfStrokeWidth, inverseScaleX, inverseScaleY); |
| 292 | vec2 outerOffset = innerOffset + AAOffset; |
| 293 | innerOffset -= AAOffset; |
| 294 | |
| 295 | // TODO: support square cap by changing this offset to incorporate halfStrokeWidth |
| 296 | vec2 capAAOffset(AAOffset.y, -AAOffset.x); |
| 297 | AlphaVertex::set(&buffer[0], |
| 298 | last->position[0] + outerOffset.x + capAAOffset.x, |
| 299 | last->position[1] + outerOffset.y + capAAOffset.y, |
| 300 | 0.0f); |
| 301 | AlphaVertex::set(&buffer[1], |
| 302 | last->position[0] + innerOffset.x - capAAOffset.x, |
| 303 | last->position[1] + innerOffset.y - capAAOffset.y, |
| 304 | maxAlpha); |
| 305 | |
| 306 | AlphaVertex::set(&buffer[2 * offset + 6], |
| 307 | last->position[0] - outerOffset.x + capAAOffset.x, |
| 308 | last->position[1] - outerOffset.y + capAAOffset.y, |
| 309 | 0.0f); |
| 310 | AlphaVertex::set(&buffer[2 * offset + 7], |
| 311 | last->position[0] - innerOffset.x - capAAOffset.x, |
| 312 | last->position[1] - innerOffset.y - capAAOffset.y, |
| 313 | maxAlpha); |
| 314 | copyAlphaVertex(&buffer[2 * offset + 8], &buffer[0]); |
| 315 | copyAlphaVertex(&buffer[2 * offset + 9], &buffer[1]); |
| 316 | copyAlphaVertex(&buffer[2 * offset + 10], &buffer[1]); // degenerate tris (the only two!) |
| 317 | copyAlphaVertex(&buffer[2 * offset + 11], &buffer[2 * offset + 7]); |
| 318 | } |
| 319 | |
| 320 | for (unsigned int i = 1; i < vertices.size() - 1; i++) { |
| 321 | const Vertex* next = &(vertices[i + 1]); |
| 322 | vec2 nextNormal(next->position[1] - current->position[1], |
| 323 | current->position[0] - next->position[0]); |
| 324 | nextNormal.normalize(); |
| 325 | |
| 326 | vec2 totalOffset = totalOffsetFromNormals(lastNormal, nextNormal); |
| 327 | vec2 AAOffset = totalOffset; |
| 328 | AAOffset.x *= 0.5f * inverseScaleX; |
| 329 | AAOffset.y *= 0.5f * inverseScaleY; |
| 330 | |
| 331 | vec2 innerOffset = totalOffset; |
| 332 | scaleOffsetForStrokeWidth(innerOffset, halfStrokeWidth, inverseScaleX, inverseScaleY); |
| 333 | vec2 outerOffset = innerOffset + AAOffset; |
| 334 | innerOffset -= AAOffset; |
| 335 | |
| 336 | AlphaVertex::set(&buffer[currentAAOuterIndex++], |
| 337 | current->position[0] + outerOffset.x, |
| 338 | current->position[1] + outerOffset.y, |
| 339 | 0.0f); |
| 340 | AlphaVertex::set(&buffer[currentAAOuterIndex++], |
| 341 | current->position[0] + innerOffset.x, |
| 342 | current->position[1] + innerOffset.y, |
| 343 | maxAlpha); |
| 344 | |
| 345 | AlphaVertex::set(&buffer[currentStrokeIndex++], |
| 346 | current->position[0] + innerOffset.x, |
| 347 | current->position[1] + innerOffset.y, |
| 348 | maxAlpha); |
| 349 | AlphaVertex::set(&buffer[currentStrokeIndex++], |
| 350 | current->position[0] - innerOffset.x, |
| 351 | current->position[1] - innerOffset.y, |
| 352 | maxAlpha); |
| 353 | |
| 354 | AlphaVertex::set(&buffer[currentAAInnerIndex--], |
| 355 | current->position[0] - innerOffset.x, |
| 356 | current->position[1] - innerOffset.y, |
| 357 | maxAlpha); |
| 358 | AlphaVertex::set(&buffer[currentAAInnerIndex--], |
| 359 | current->position[0] - outerOffset.x, |
| 360 | current->position[1] - outerOffset.y, |
| 361 | 0.0f); |
| 362 | |
| 363 | last = current; |
| 364 | current = next; |
| 365 | lastNormal = nextNormal; |
| 366 | } |
| 367 | |
| 368 | { |
| 369 | // end cap |
| 370 | vec2 totalOffset = lastNormal; |
| 371 | vec2 AAOffset = totalOffset; |
| 372 | AAOffset.x *= 0.5f * inverseScaleX; |
| 373 | AAOffset.y *= 0.5f * inverseScaleY; |
| 374 | |
| 375 | vec2 innerOffset = totalOffset; |
| 376 | scaleOffsetForStrokeWidth(innerOffset, halfStrokeWidth, inverseScaleX, inverseScaleY); |
| 377 | vec2 outerOffset = innerOffset + AAOffset; |
| 378 | innerOffset -= AAOffset; |
| 379 | |
| 380 | // TODO: support square cap by changing this offset to incorporate halfStrokeWidth |
| 381 | vec2 capAAOffset(-AAOffset.y, AAOffset.x); |
| 382 | |
| 383 | AlphaVertex::set(&buffer[offset + 2], |
| 384 | current->position[0] + outerOffset.x + capAAOffset.x, |
| 385 | current->position[1] + outerOffset.y + capAAOffset.y, |
| 386 | 0.0f); |
| 387 | AlphaVertex::set(&buffer[offset + 3], |
| 388 | current->position[0] + innerOffset.x - capAAOffset.x, |
| 389 | current->position[1] + innerOffset.y - capAAOffset.y, |
| 390 | maxAlpha); |
| 391 | |
| 392 | AlphaVertex::set(&buffer[offset + 4], |
| 393 | current->position[0] - outerOffset.x + capAAOffset.x, |
| 394 | current->position[1] - outerOffset.y + capAAOffset.y, |
| 395 | 0.0f); |
| 396 | AlphaVertex::set(&buffer[offset + 5], |
| 397 | current->position[0] - innerOffset.x - capAAOffset.x, |
| 398 | current->position[1] - innerOffset.y - capAAOffset.y, |
| 399 | maxAlpha); |
| 400 | |
| 401 | copyAlphaVertex(&buffer[vertexBuffer.getSize() - 2], &buffer[offset + 3]); |
| 402 | copyAlphaVertex(&buffer[vertexBuffer.getSize() - 1], &buffer[offset + 5]); |
| 403 | } |
| 404 | |
| 405 | #if VERTEX_DEBUG |
| 406 | for (unsigned int i = 0; i < vertexBuffer.getSize(); i++) { |
| 407 | ALOGD("point at %f %f, alpha %f", buffer[i].position[0], buffer[i].position[1], buffer[i].alpha); |
| 408 | } |
| 409 | #endif |
| 410 | } |
| 411 | |
| 412 | |
| 413 | void getStrokeVerticesFromPerimeterAA(const Vector<Vertex>& perimeter, float halfStrokeWidth, |
| 414 | VertexBuffer& vertexBuffer, float inverseScaleX, float inverseScaleY) { |
| 415 | AlphaVertex* buffer = vertexBuffer.alloc<AlphaVertex>(6 * perimeter.size() + 8); |
| 416 | |
| 417 | // avoid lines smaller than hairline since they break triangle based sampling. instead reducing |
| 418 | // alpha value (TODO: support different X/Y scale) |
| 419 | float maxAlpha = 1.0f; |
| 420 | if (halfStrokeWidth != 0 && inverseScaleX == inverseScaleY && |
| 421 | halfStrokeWidth * inverseScaleX < 0.5f) { |
| 422 | maxAlpha *= (2 * halfStrokeWidth) / inverseScaleX; |
| 423 | halfStrokeWidth = 0.0f; |
| 424 | } |
| 425 | |
| 426 | int offset = 2 * perimeter.size() + 3; |
| 427 | int currentAAOuterIndex = 0; |
| 428 | int currentStrokeIndex = offset; |
| 429 | int currentAAInnerIndex = offset * 2; |
| 430 | |
| 431 | const Vertex* last = &(perimeter[perimeter.size() - 1]); |
| 432 | const Vertex* current = &(perimeter[0]); |
| 433 | vec2 lastNormal(current->position[1] - last->position[1], |
| 434 | last->position[0] - current->position[0]); |
| 435 | lastNormal.normalize(); |
| 436 | for (unsigned int i = 0; i < perimeter.size(); i++) { |
| 437 | const Vertex* next = &(perimeter[i + 1 >= perimeter.size() ? 0 : i + 1]); |
| 438 | vec2 nextNormal(next->position[1] - current->position[1], |
| 439 | current->position[0] - next->position[0]); |
| 440 | nextNormal.normalize(); |
| 441 | |
| 442 | vec2 totalOffset = totalOffsetFromNormals(lastNormal, nextNormal); |
| 443 | vec2 AAOffset = totalOffset; |
| 444 | AAOffset.x *= 0.5f * inverseScaleX; |
| 445 | AAOffset.y *= 0.5f * inverseScaleY; |
| 446 | |
| 447 | vec2 innerOffset = totalOffset; |
| 448 | scaleOffsetForStrokeWidth(innerOffset, halfStrokeWidth, inverseScaleX, inverseScaleY); |
| 449 | vec2 outerOffset = innerOffset + AAOffset; |
| 450 | innerOffset -= AAOffset; |
| 451 | |
| 452 | AlphaVertex::set(&buffer[currentAAOuterIndex++], |
| 453 | current->position[0] + outerOffset.x, |
| 454 | current->position[1] + outerOffset.y, |
| 455 | 0.0f); |
| 456 | AlphaVertex::set(&buffer[currentAAOuterIndex++], |
| 457 | current->position[0] + innerOffset.x, |
| 458 | current->position[1] + innerOffset.y, |
| 459 | maxAlpha); |
| 460 | |
| 461 | AlphaVertex::set(&buffer[currentStrokeIndex++], |
| 462 | current->position[0] + innerOffset.x, |
| 463 | current->position[1] + innerOffset.y, |
| 464 | maxAlpha); |
| 465 | AlphaVertex::set(&buffer[currentStrokeIndex++], |
| 466 | current->position[0] - innerOffset.x, |
| 467 | current->position[1] - innerOffset.y, |
| 468 | maxAlpha); |
| 469 | |
| 470 | AlphaVertex::set(&buffer[currentAAInnerIndex++], |
| 471 | current->position[0] - innerOffset.x, |
| 472 | current->position[1] - innerOffset.y, |
| 473 | maxAlpha); |
| 474 | AlphaVertex::set(&buffer[currentAAInnerIndex++], |
| 475 | current->position[0] - outerOffset.x, |
| 476 | current->position[1] - outerOffset.y, |
| 477 | 0.0f); |
| 478 | |
| 479 | last = current; |
| 480 | current = next; |
| 481 | lastNormal = nextNormal; |
| 482 | } |
| 483 | |
| 484 | // wrap each strip around to beginning, creating degenerate tris to bridge strips |
| 485 | copyAlphaVertex(&buffer[currentAAOuterIndex++], &buffer[0]); |
| 486 | copyAlphaVertex(&buffer[currentAAOuterIndex++], &buffer[1]); |
| 487 | copyAlphaVertex(&buffer[currentAAOuterIndex++], &buffer[1]); |
| 488 | |
| 489 | copyAlphaVertex(&buffer[currentStrokeIndex++], &buffer[offset]); |
| 490 | copyAlphaVertex(&buffer[currentStrokeIndex++], &buffer[offset + 1]); |
| 491 | copyAlphaVertex(&buffer[currentStrokeIndex++], &buffer[offset + 1]); |
| 492 | |
| 493 | copyAlphaVertex(&buffer[currentAAInnerIndex++], &buffer[2 * offset]); |
| 494 | copyAlphaVertex(&buffer[currentAAInnerIndex++], &buffer[2 * offset + 1]); |
| 495 | // don't need to create last degenerate tri |
| 496 | |
| 497 | #if VERTEX_DEBUG |
| 498 | for (unsigned int i = 0; i < vertexBuffer.getSize(); i++) { |
| 499 | ALOGD("point at %f %f, alpha %f", buffer[i].position[0], buffer[i].position[1], buffer[i].alpha); |
| 500 | } |
| 501 | #endif |
| 502 | } |
| 503 | |
| 504 | void PathRenderer::convexPathVertices(const SkPath &path, const SkPaint* paint, |
| 505 | const mat4 *transform, VertexBuffer& vertexBuffer) { |
| 506 | ATRACE_CALL(); |
| 507 | |
| 508 | SkPaint::Style style = paint->getStyle(); |
| 509 | bool isAA = paint->isAntiAlias(); |
| 510 | |
| 511 | float inverseScaleX, inverseScaleY; |
| 512 | computeInverseScales(transform, inverseScaleX, inverseScaleY); |
| 513 | |
| 514 | Vector<Vertex> tempVertices; |
| 515 | float threshInvScaleX = inverseScaleX; |
| 516 | float threshInvScaleY = inverseScaleY; |
| 517 | if (style == SkPaint::kStroke_Style) { |
| 518 | // alter the bezier recursion threshold values we calculate in order to compensate for |
| 519 | // expansion done after the path vertices are found |
| 520 | SkRect bounds = path.getBounds(); |
| 521 | if (!bounds.isEmpty()) { |
| 522 | threshInvScaleX *= bounds.width() / (bounds.width() + paint->getStrokeWidth()); |
| 523 | threshInvScaleY *= bounds.height() / (bounds.height() + paint->getStrokeWidth()); |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | // force close if we're filling the path, since fill path expects closed perimeter. |
| 528 | bool forceClose = style != SkPaint::kStroke_Style; |
| 529 | bool wasClosed = convexPathPerimeterVertices(path, forceClose, threshInvScaleX * threshInvScaleX, |
| 530 | threshInvScaleY * threshInvScaleY, tempVertices); |
| 531 | |
| 532 | if (!tempVertices.size()) { |
| 533 | // path was empty, return without allocating vertex buffer |
| 534 | return; |
| 535 | } |
| 536 | |
| 537 | #if VERTEX_DEBUG |
| 538 | for (unsigned int i = 0; i < tempVertices.size(); i++) { |
| 539 | ALOGD("orig path: point at %f %f", tempVertices[i].position[0], tempVertices[i].position[1]); |
| 540 | } |
| 541 | #endif |
| 542 | |
| 543 | if (style == SkPaint::kStroke_Style) { |
| 544 | float halfStrokeWidth = paint->getStrokeWidth() * 0.5f; |
| 545 | if (!isAA) { |
| 546 | if (wasClosed) { |
| 547 | getStrokeVerticesFromPerimeter(tempVertices, halfStrokeWidth, vertexBuffer, |
| 548 | inverseScaleX, inverseScaleY); |
| 549 | } else { |
| 550 | getStrokeVerticesFromUnclosedVertices(tempVertices, halfStrokeWidth, vertexBuffer, |
| 551 | inverseScaleX, inverseScaleY); |
| 552 | } |
| 553 | |
| 554 | } else { |
| 555 | if (wasClosed) { |
| 556 | getStrokeVerticesFromPerimeterAA(tempVertices, halfStrokeWidth, vertexBuffer, |
| 557 | inverseScaleX, inverseScaleY); |
| 558 | } else { |
| 559 | getStrokeVerticesFromUnclosedVerticesAA(tempVertices, halfStrokeWidth, vertexBuffer, |
| 560 | inverseScaleX, inverseScaleY); |
| 561 | } |
| 562 | } |
| 563 | } else { |
| 564 | // For kStrokeAndFill style, the path should be adjusted externally, as it will be treated as a fill here. |
| 565 | if (!isAA) { |
| 566 | getFillVerticesFromPerimeter(tempVertices, vertexBuffer); |
| 567 | } else { |
| 568 | getFillVerticesFromPerimeterAA(tempVertices, vertexBuffer, inverseScaleX, inverseScaleY); |
| 569 | } |
| 570 | } |
| 571 | } |
| 572 | |
| 573 | |
| 574 | void pushToVector(Vector<Vertex>& vertices, float x, float y) { |
| 575 | // TODO: make this not yuck |
| 576 | vertices.push(); |
| 577 | Vertex* newVertex = &(vertices.editArray()[vertices.size() - 1]); |
| 578 | Vertex::set(newVertex, x, y); |
| 579 | } |
| 580 | |
| 581 | bool PathRenderer::convexPathPerimeterVertices(const SkPath& path, bool forceClose, |
| 582 | float sqrInvScaleX, float sqrInvScaleY, Vector<Vertex>& outputVertices) { |
| 583 | ATRACE_CALL(); |
| 584 | |
| 585 | // TODO: to support joins other than sharp miter, join vertices should be labelled in the |
| 586 | // perimeter, or resolved into more vertices. Reconsider forceClose-ing in that case. |
| 587 | SkPath::Iter iter(path, forceClose); |
| 588 | SkPoint pts[4]; |
| 589 | SkPath::Verb v; |
| 590 | Vertex* newVertex = 0; |
| 591 | while (SkPath::kDone_Verb != (v = iter.next(pts))) { |
| 592 | switch (v) { |
| 593 | case SkPath::kMove_Verb: |
| 594 | pushToVector(outputVertices, pts[0].x(), pts[0].y()); |
| 595 | ALOGV("Move to pos %f %f", pts[0].x(), pts[0].y()); |
| 596 | break; |
| 597 | case SkPath::kClose_Verb: |
| 598 | ALOGV("Close at pos %f %f", pts[0].x(), pts[0].y()); |
| 599 | break; |
| 600 | case SkPath::kLine_Verb: |
| 601 | ALOGV("kLine_Verb %f %f -> %f %f", |
| 602 | pts[0].x(), pts[0].y(), |
| 603 | pts[1].x(), pts[1].y()); |
| 604 | |
| 605 | pushToVector(outputVertices, pts[1].x(), pts[1].y()); |
| 606 | break; |
| 607 | case SkPath::kQuad_Verb: |
| 608 | ALOGV("kQuad_Verb"); |
| 609 | recursiveQuadraticBezierVertices( |
| 610 | pts[0].x(), pts[0].y(), |
| 611 | pts[2].x(), pts[2].y(), |
| 612 | pts[1].x(), pts[1].y(), |
| 613 | sqrInvScaleX, sqrInvScaleY, outputVertices); |
| 614 | break; |
| 615 | case SkPath::kCubic_Verb: |
| 616 | ALOGV("kCubic_Verb"); |
| 617 | recursiveCubicBezierVertices( |
| 618 | pts[0].x(), pts[0].y(), |
| 619 | pts[1].x(), pts[1].y(), |
| 620 | pts[3].x(), pts[3].y(), |
| 621 | pts[2].x(), pts[2].y(), |
| 622 | sqrInvScaleX, sqrInvScaleY, outputVertices); |
| 623 | break; |
| 624 | default: |
| 625 | break; |
| 626 | } |
| 627 | } |
| 628 | |
| 629 | int size = outputVertices.size(); |
| 630 | if (size >= 2 && outputVertices[0].position[0] == outputVertices[size - 1].position[0] && |
| 631 | outputVertices[0].position[1] == outputVertices[size - 1].position[1]) { |
| 632 | outputVertices.pop(); |
| 633 | return true; |
| 634 | } |
| 635 | return false; |
| 636 | } |
| 637 | |
| 638 | void PathRenderer::recursiveCubicBezierVertices( |
| 639 | float p1x, float p1y, float c1x, float c1y, |
| 640 | float p2x, float p2y, float c2x, float c2y, |
| 641 | float sqrInvScaleX, float sqrInvScaleY, Vector<Vertex>& outputVertices) { |
| 642 | float dx = p2x - p1x; |
| 643 | float dy = p2y - p1y; |
| 644 | float d1 = fabs((c1x - p2x) * dy - (c1y - p2y) * dx); |
| 645 | float d2 = fabs((c2x - p2x) * dy - (c2y - p2y) * dx); |
| 646 | float d = d1 + d2; |
| 647 | |
| 648 | // multiplying by sqrInvScaleY/X equivalent to multiplying in dimensional scale factors |
| 649 | |
| 650 | if (d * d < THRESHOLD * THRESHOLD * (dx * dx * sqrInvScaleY + dy * dy * sqrInvScaleX)) { |
| 651 | // below thresh, draw line by adding endpoint |
| 652 | pushToVector(outputVertices, p2x, p2y); |
| 653 | } else { |
| 654 | float p1c1x = (p1x + c1x) * 0.5f; |
| 655 | float p1c1y = (p1y + c1y) * 0.5f; |
| 656 | float p2c2x = (p2x + c2x) * 0.5f; |
| 657 | float p2c2y = (p2y + c2y) * 0.5f; |
| 658 | |
| 659 | float c1c2x = (c1x + c2x) * 0.5f; |
| 660 | float c1c2y = (c1y + c2y) * 0.5f; |
| 661 | |
| 662 | float p1c1c2x = (p1c1x + c1c2x) * 0.5f; |
| 663 | float p1c1c2y = (p1c1y + c1c2y) * 0.5f; |
| 664 | |
| 665 | float p2c1c2x = (p2c2x + c1c2x) * 0.5f; |
| 666 | float p2c1c2y = (p2c2y + c1c2y) * 0.5f; |
| 667 | |
| 668 | float mx = (p1c1c2x + p2c1c2x) * 0.5f; |
| 669 | float my = (p1c1c2y + p2c1c2y) * 0.5f; |
| 670 | |
| 671 | recursiveCubicBezierVertices( |
| 672 | p1x, p1y, p1c1x, p1c1y, |
| 673 | mx, my, p1c1c2x, p1c1c2y, |
| 674 | sqrInvScaleX, sqrInvScaleY, outputVertices); |
| 675 | recursiveCubicBezierVertices( |
| 676 | mx, my, p2c1c2x, p2c1c2y, |
| 677 | p2x, p2y, p2c2x, p2c2y, |
| 678 | sqrInvScaleX, sqrInvScaleY, outputVertices); |
| 679 | } |
| 680 | } |
| 681 | |
| 682 | void PathRenderer::recursiveQuadraticBezierVertices( |
| 683 | float ax, float ay, |
| 684 | float bx, float by, |
| 685 | float cx, float cy, |
| 686 | float sqrInvScaleX, float sqrInvScaleY, Vector<Vertex>& outputVertices) { |
| 687 | float dx = bx - ax; |
| 688 | float dy = by - ay; |
| 689 | float d = (cx - bx) * dy - (cy - by) * dx; |
| 690 | |
| 691 | if (d * d < THRESHOLD * THRESHOLD * (dx * dx * sqrInvScaleY + dy * dy * sqrInvScaleX)) { |
| 692 | // below thresh, draw line by adding endpoint |
| 693 | pushToVector(outputVertices, bx, by); |
| 694 | } else { |
| 695 | float acx = (ax + cx) * 0.5f; |
| 696 | float bcx = (bx + cx) * 0.5f; |
| 697 | float acy = (ay + cy) * 0.5f; |
| 698 | float bcy = (by + cy) * 0.5f; |
| 699 | |
| 700 | // midpoint |
| 701 | float mx = (acx + bcx) * 0.5f; |
| 702 | float my = (acy + bcy) * 0.5f; |
| 703 | |
| 704 | recursiveQuadraticBezierVertices(ax, ay, mx, my, acx, acy, |
| 705 | sqrInvScaleX, sqrInvScaleY, outputVertices); |
| 706 | recursiveQuadraticBezierVertices(mx, my, bx, by, bcx, bcy, |
| 707 | sqrInvScaleX, sqrInvScaleY, outputVertices); |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | }; // namespace uirenderer |
| 712 | }; // namespace android |