blob: 94398cfaf8e48452b3dd2d6a5fde20cb6acb3783 [file] [log] [blame]
reed@google.com873cb1e2010-12-23 15:00:45 +00001/*
2 Copyright 2010 Google Inc.
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
reed@google.comac10a2d2010-12-22 21:39:39 +000017
18#include "GrContext.h"
19#include "GrTextureCache.h"
20#include "GrTextStrike.h"
21#include "GrMemory.h"
22#include "GrPathIter.h"
23#include "GrClipIterator.h"
24#include "GrIndexBuffer.h"
25
26#define DEFER_TEXT_RENDERING 1
27
28static const size_t MAX_TEXTURE_CACHE_COUNT = 128;
29static const size_t MAX_TEXTURE_CACHE_BYTES = 8 * 1024 * 1024;
30
31#if DEFER_TEXT_RENDERING
32 static const uint32_t POOL_VB_SIZE = 2048 *
bsalomon@google.com1572b072011-01-18 15:30:57 +000033 GrDrawTarget::VertexSize(
34 GrDrawTarget::kTextFormat_VertexLayoutBit |
35 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0));
reed@google.comac10a2d2010-12-22 21:39:39 +000036 static const uint32_t NUM_POOL_VBS = 8;
37#else
38 static const uint32_t POOL_VB_SIZE = 0;
39 static const uint32_t NUM_POOL_VBS = 0;
40
41#endif
42
43GrContext* GrContext::Create(GrGpu::Engine engine,
44 GrGpu::Platform3DContext context3D) {
45 GrContext* ctx = NULL;
46 GrGpu* fGpu = GrGpu::Create(engine, context3D);
47 if (NULL != fGpu) {
48 ctx = new GrContext(fGpu);
49 fGpu->unref();
50 }
51 return ctx;
52}
53
reed@google.com873cb1e2010-12-23 15:00:45 +000054GrContext* GrContext::CreateGLShaderContext() {
55 return GrContext::Create(GrGpu::kOpenGL_Shaders_Engine, NULL);
56}
57
reed@google.comac10a2d2010-12-22 21:39:39 +000058GrContext::~GrContext() {
59 fGpu->unref();
60 delete fTextureCache;
61 delete fFontCache;
62}
63
64void GrContext::abandonAllTextures() {
65 fTextureCache->deleteAll(GrTextureCache::kAbandonTexture_DeleteMode);
66 fFontCache->abandonAll();
67}
68
69GrTextureEntry* GrContext::findAndLockTexture(GrTextureKey* key,
70 const GrSamplerState& sampler) {
71 finalizeTextureKey(key, sampler);
72 return fTextureCache->findAndLock(*key);
73}
74
75static void stretchImage(void* dst,
76 int dstW,
77 int dstH,
78 void* src,
79 int srcW,
80 int srcH,
81 int bpp) {
82 GrFixed dx = (srcW << 16) / dstW;
83 GrFixed dy = (srcH << 16) / dstH;
84
85 GrFixed y = dy >> 1;
86
87 int dstXLimit = dstW*bpp;
88 for (int j = 0; j < dstH; ++j) {
89 GrFixed x = dx >> 1;
90 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
91 void* dstRow = (uint8_t*)dst + j*dstW*bpp;
92 for (int i = 0; i < dstXLimit; i += bpp) {
93 memcpy((uint8_t*) dstRow + i,
94 (uint8_t*) srcRow + (x>>16)*bpp,
95 bpp);
96 x += dx;
97 }
98 y += dy;
99 }
100}
101
102GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
103 const GrSamplerState& sampler,
104 const GrGpu::TextureDesc& desc,
105 void* srcData, size_t rowBytes) {
106 GrAssert(key->width() == desc.fWidth);
107 GrAssert(key->height() == desc.fHeight);
108
109#if GR_DUMP_TEXTURE_UPLOAD
110 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
111#endif
112
113 GrTextureEntry* entry = NULL;
114 bool special = finalizeTextureKey(key, sampler);
115 if (special) {
116 GrTextureEntry* clampEntry;
117 GrTextureKey clampKey(*key);
118 clampEntry = findAndLockTexture(&clampKey, GrSamplerState::ClampNoFilter());
119
120 if (NULL == clampEntry) {
121 clampEntry = createAndLockTexture(&clampKey,
122 GrSamplerState::ClampNoFilter(),
123 desc, srcData, rowBytes);
124 GrAssert(NULL != clampEntry);
125 if (NULL == clampEntry) {
126 return NULL;
127 }
128 }
129 GrTexture* clampTexture = clampEntry->texture();
130 GrGpu::TextureDesc rtDesc = desc;
131 rtDesc.fFlags |= GrGpu::kRenderTarget_TextureFlag |
132 GrGpu::kNoPathRendering_TextureFlag;
133 rtDesc.fWidth = GrNextPow2(GrMax<int>(desc.fWidth,
134 fGpu->minRenderTargetWidth()));
135 rtDesc.fHeight = GrNextPow2(GrMax<int>(desc.fHeight,
136 fGpu->minRenderTargetHeight()));
137
138 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
139
140 if (NULL != texture) {
141 GrGpu::AutoStateRestore asr(fGpu);
142 fGpu->setRenderTarget(texture->asRenderTarget());
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000143 fGpu->setTexture(0, clampEntry->texture());
reed@google.comac10a2d2010-12-22 21:39:39 +0000144 fGpu->setStencilPass(GrGpu::kNone_StencilPass);
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000145 fGpu->setTextureMatrix(0, GrMatrix::I());
reed@google.comac10a2d2010-12-22 21:39:39 +0000146 fGpu->setViewMatrix(GrMatrix::I());
147 fGpu->setAlpha(0xff);
148 fGpu->setBlendFunc(GrGpu::kOne_BlendCoeff, GrGpu::kZero_BlendCoeff);
149 fGpu->disableState(GrGpu::kDither_StateBit |
150 GrGpu::kClip_StateBit |
151 GrGpu::kAntialias_StateBit);
152 GrSamplerState stretchSampler(GrSamplerState::kClamp_WrapMode,
153 GrSamplerState::kClamp_WrapMode,
154 sampler.isFilter());
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000155 fGpu->setSamplerState(0, stretchSampler);
reed@google.comac10a2d2010-12-22 21:39:39 +0000156
157 static const GrVertexLayout layout =
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000158 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
reed@google.comac10a2d2010-12-22 21:39:39 +0000159 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
160
161 if (arg.succeeded()) {
162 GrPoint* verts = (GrPoint*) arg.vertices();
163 verts[0].setIRectFan(0, 0,
164 texture->contentWidth(),
165 texture->contentHeight(),
166 2*sizeof(GrPoint));
167 GrScalar tw = GrFixedToScalar(GR_Fixed1 *
168 clampTexture->contentWidth() /
169 clampTexture->allocWidth());
170 GrScalar th = GrFixedToScalar(GR_Fixed1 *
171 clampTexture->contentHeight() /
172 clampTexture->allocHeight());
173 verts[1].setRectFan(0, 0, tw, th, 2*sizeof(GrPoint));
174 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
175 0, 4);
176 entry = fTextureCache->createAndLock(*key, texture);
177 }
178 texture->removeRenderTarget();
179 } else {
180 // TODO: Our CPU stretch doesn't filter. But we create separate
181 // stretched textures when the sampler state is either filtered or
182 // not. Either implement filtered stretch blit on CPU or just create
183 // one when FBO case fails.
184
185 rtDesc.fFlags = 0;
186 // no longer need to clamp at min RT size.
187 rtDesc.fWidth = GrNextPow2(desc.fWidth);
188 rtDesc.fHeight = GrNextPow2(desc.fHeight);
189 int bpp = GrTexture::BytesPerPixel(desc.fFormat);
190 GrAutoSMalloc<128*128*4> stretchedPixels(bpp *
191 rtDesc.fWidth *
192 rtDesc.fHeight);
193 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
194 srcData, desc.fWidth, desc.fHeight, bpp);
195
196 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
197
198 GrTexture* texture = fGpu->createTexture(rtDesc,
199 stretchedPixels.get(),
200 stretchedRowBytes);
201 GrAssert(NULL != texture);
202 entry = fTextureCache->createAndLock(*key, texture);
203 }
204 fTextureCache->unlock(clampEntry);
205
206 } else {
207 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
208 if (NULL != texture) {
209 entry = fTextureCache->createAndLock(*key, texture);
210 } else {
211 entry = NULL;
212 }
213 }
214 return entry;
215}
216
217void GrContext::unlockTexture(GrTextureEntry* entry) {
218 fTextureCache->unlock(entry);
219}
220
221void GrContext::detachCachedTexture(GrTextureEntry* entry) {
222 fTextureCache->detach(entry);
223}
224
225void GrContext::reattachAndUnlockCachedTexture(GrTextureEntry* entry) {
226 fTextureCache->reattachAndUnlock(entry);
227}
228
229GrTexture* GrContext::createUncachedTexture(const GrGpu::TextureDesc& desc,
230 void* srcData,
231 size_t rowBytes) {
232 return fGpu->createTexture(desc, srcData, rowBytes);
233}
234
235GrRenderTarget* GrContext::createPlatformRenderTarget(intptr_t platformRenderTarget,
236 int width, int height) {
237 return fGpu->createPlatformRenderTarget(platformRenderTarget,
238 width, height);
239}
240
241bool GrContext::supportsIndex8PixelConfig(const GrSamplerState& sampler,
242 int width, int height) {
243 if (!fGpu->supports8BitPalette()) {
244 return false;
245 }
246
247 bool needsRepeat = sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
248 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode;
249 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
250
251 switch (fGpu->npotTextureSupport()) {
252 case GrGpu::kNone_NPOTTextureType:
253 return isPow2;
254 case GrGpu::kNoRepeat_NPOTTextureType:
255 return isPow2 || !needsRepeat;
256 case GrGpu::kNonRendertarget_NPOTTextureType:
257 case GrGpu::kFull_NPOTTextureType:
258 return true;
259 }
260 // should never get here
261 GrAssert(!"Bad enum from fGpu->npotTextureSupport");
262 return false;
263}
264
265////////////////////////////////////////////////////////////////////////////////
266
267void GrContext::eraseColor(GrColor color) {
268 fGpu->eraseColor(color);
269}
270
271void GrContext::drawFull(bool useTexture) {
272 // set rect to be big enough to fill the space, but not super-huge, so we
273 // don't overflow fixed-point implementations
274 GrRect r(fGpu->getClip().getBounds());
275 GrMatrix inverse;
276 if (fGpu->getViewInverse(&inverse)) {
277 inverse.mapRect(&r);
278 } else {
279 GrPrintf("---- fGpu->getViewInverse failed\n");
280 }
281
282 this->fillRect(r, useTexture);
283}
284
285/* create a triangle strip that strokes the specified triangle. There are 8
286 unique vertices, but we repreat the last 2 to close up. Alternatively we
287 could use an indices array, and then only send 8 verts, but not sure that
288 would be faster.
289 */
290static void setStrokeRectStrip(GrPoint verts[10], const GrRect& rect,
291 GrScalar width) {
292 const GrScalar rad = GrScalarHalf(width);
293
294 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
295 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
296 verts[2].set(rect.fRight - rad, rect.fTop + rad);
297 verts[3].set(rect.fRight + rad, rect.fTop - rad);
298 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
299 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
300 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
301 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
302 verts[8] = verts[0];
303 verts[9] = verts[1];
304}
305
306void GrContext::drawRect(const GrRect& rect, bool useTexture, GrScalar width) {
307 GrVertexLayout layout = useTexture ?
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000308 GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0) :
reed@google.comac10a2d2010-12-22 21:39:39 +0000309 0;
310
311 static const int worstCaseVertCount = 10;
312 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, worstCaseVertCount, 0);
313 if (!geo.succeeded()) {
314 return;
315 }
316
317 this->flushText();
318
319 int vertCount;
320 GrGpu::PrimitiveType primType;
321 GrPoint* vertex = geo.positions();
322
323 if (width >= 0) {
324 if (width > 0) {
325 vertCount = 10;
326 primType = GrGpu::kTriangleStrip_PrimitiveType;
327 setStrokeRectStrip(vertex, rect, width);
328 } else {
329 // hairline
330 vertCount = 5;
331 primType = GrGpu::kLineStrip_PrimitiveType;
332 vertex[0].set(rect.fLeft, rect.fTop);
333 vertex[1].set(rect.fRight, rect.fTop);
334 vertex[2].set(rect.fRight, rect.fBottom);
335 vertex[3].set(rect.fLeft, rect.fBottom);
336 vertex[4].set(rect.fLeft, rect.fTop);
337 }
338 } else {
339 vertCount = 4;
340 primType = GrGpu::kTriangleFan_PrimitiveType;
341 vertex->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
342 }
343
344 fGpu->drawNonIndexed(primType, 0, vertCount);
345}
346
347////////////////////////////////////////////////////////////////////////////////
348
349#define NEW_EVAL 1 // Use adaptive path tesselation
350#define STENCIL_OFF 0 // Always disable stencil (even when needed)
351#define CPU_TRANSFORM 0 // Transform path verts on CPU
352
353#if NEW_EVAL
354
355#define EVAL_TOL GR_Scalar1
356
357static uint32_t quadratic_point_count(const GrPoint points[], GrScalar tol) {
358 GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
359 // TODO: fixed points sqrt
360 if (d < tol) {
361 return 1;
362 } else {
363 // Each time we subdivide, d should be cut in 4. So we need to
364 // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
365 // points.
366 // 2^(log4(x)) = sqrt(x);
367 d = ceilf(sqrtf(d/tol));
368 return GrNextPow2((uint32_t)d);
369 }
370}
371
372static uint32_t generate_quadratic_points(const GrPoint& p0,
373 const GrPoint& p1,
374 const GrPoint& p2,
375 GrScalar tolSqd,
376 GrPoint** points,
377 uint32_t pointsLeft) {
378 if (pointsLeft < 2 ||
379 (p1.distanceToLineSegmentBetweenSqd(p0, p2)) < tolSqd) {
380 (*points)[0] = p2;
381 *points += 1;
382 return 1;
383 }
384
385 GrPoint q[] = {
386 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
387 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
388 };
389 GrPoint r(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY));
390
391 pointsLeft >>= 1;
392 uint32_t a = generate_quadratic_points(p0, q[0], r, tolSqd, points, pointsLeft);
393 uint32_t b = generate_quadratic_points(r, q[1], p2, tolSqd, points, pointsLeft);
394 return a + b;
395}
396
397static uint32_t cubic_point_count(const GrPoint points[], GrScalar tol) {
398 GrScalar d = GrMax(points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
399 points[2].distanceToLineSegmentBetweenSqd(points[0], points[3]));
400 d = sqrtf(d);
401 if (d < tol) {
402 return 1;
403 } else {
404 d = ceilf(sqrtf(d/tol));
405 return GrNextPow2((uint32_t)d);
406 }
407}
408
409static uint32_t generate_cubic_points(const GrPoint& p0,
410 const GrPoint& p1,
411 const GrPoint& p2,
412 const GrPoint& p3,
413 GrScalar tolSqd,
414 GrPoint** points,
415 uint32_t pointsLeft) {
416 if (pointsLeft < 2 ||
417 (p1.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd &&
418 p2.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd)) {
419 (*points)[0] = p3;
420 *points += 1;
421 return 1;
422 }
423 GrPoint q[] = {
424 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
425 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
426 GrPoint(GrScalarAve(p2.fX, p3.fX), GrScalarAve(p2.fY, p3.fY))
427 };
428 GrPoint r[] = {
429 GrPoint(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY)),
430 GrPoint(GrScalarAve(q[1].fX, q[2].fX), GrScalarAve(q[1].fY, q[2].fY))
431 };
432 GrPoint s(GrScalarAve(r[0].fX, r[1].fX), GrScalarAve(r[0].fY, r[1].fY));
433 pointsLeft >>= 1;
434 uint32_t a = generate_cubic_points(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
435 uint32_t b = generate_cubic_points(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
436 return a + b;
437}
438
439#else // !NEW_EVAL
440
441static GrScalar gr_eval_quad(const GrScalar coord[], GrScalar t) {
442 GrScalar A = coord[0] - 2 * coord[2] + coord[4];
443 GrScalar B = 2 * (coord[2] - coord[0]);
444 GrScalar C = coord[0];
445
446 return GrMul(GrMul(A, t) + B, t) + C;
447}
448
449static void gr_eval_quad_at(const GrPoint src[3], GrScalar t, GrPoint* pt) {
450 GrAssert(src);
451 GrAssert(pt);
452 GrAssert(t >= 0 && t <= GR_Scalar1);
453 pt->set(gr_eval_quad(&src[0].fX, t), gr_eval_quad(&src[0].fY, t));
454}
455
456static GrScalar gr_eval_cubic(const GrScalar coord[], GrScalar t) {
457 GrScalar A = coord[6] - coord[0] + 3 * (coord[2] - coord[4]);
458 GrScalar B = 3 * (coord[0] - 2 * coord[2] + coord[4]);
459 GrScalar C = 3 * (coord[2] - coord[0]);
460 GrScalar D = coord[0];
461
462 return GrMul(GrMul(GrMul(A, t) + B, t) + C, t) + D;
463}
464
465static void gr_eval_cubic_at(const GrPoint src[4], GrScalar t, GrPoint* pt) {
466 GrAssert(src);
467 GrAssert(pt);
468 GrAssert(t >= 0 && t <= GR_Scalar1);
469
470 pt->set(gr_eval_cubic(&src[0].fX, t), gr_eval_cubic(&src[0].fY, t));
471}
472
473#endif // !NEW_EVAL
474
475static int worst_case_point_count(GrPathIter* path,
476 int* subpaths,
477 const GrMatrix& matrix,
478 GrScalar tol) {
479 int pointCount = 0;
480 *subpaths = 1;
481
482 bool first = true;
483
484 GrPathIter::Command cmd;
485
486 GrPoint pts[4];
487 while ((cmd = path->next(pts)) != GrPathIter::kEnd_Command) {
488
489 switch (cmd) {
490 case GrPathIter::kLine_Command:
491 pointCount += 1;
492 break;
493 case GrPathIter::kQuadratic_Command:
494#if NEW_EVAL
495 matrix.mapPoints(pts, pts, 3);
496 pointCount += quadratic_point_count(pts, tol);
497#else
498 pointCount += 9;
499#endif
500 break;
501 case GrPathIter::kCubic_Command:
502#if NEW_EVAL
503 matrix.mapPoints(pts, pts, 4);
504 pointCount += cubic_point_count(pts, tol);
505#else
506 pointCount += 17;
507#endif
508 break;
509 case GrPathIter::kMove_Command:
510 pointCount += 1;
511 if (!first) {
512 ++(*subpaths);
513 }
514 break;
515 default:
516 break;
517 }
518 first = false;
519 }
520 return pointCount;
521}
522
523static inline bool single_pass_path(const GrPathIter& path,
524 GrContext::PathFills fill,
525 bool useTex,
526 const GrGpu& gpu) {
527#if STENCIL_OFF
528 return true;
529#else
530 if (GrContext::kEvenOdd_PathFill == fill) {
531 GrPathIter::ConvexHint hint = path.hint();
532 return hint == GrPathIter::kConvex_ConvexHint ||
533 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint;
534 } else if (GrContext::kWinding_PathFill == fill) {
535 GrPathIter::ConvexHint hint = path.hint();
536 return hint == GrPathIter::kConvex_ConvexHint ||
537 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint ||
538 (hint == GrPathIter::kSameWindingConvexPieces_ConvexHint &&
539 gpu.canDisableBlend() && !gpu.isDitherState());
540
541 }
542 return false;
543#endif
544}
545
546void GrContext::drawPath(GrPathIter* path, PathFills fill,
547 bool useTexture, const GrPoint* translate) {
548
549 flushText();
550
551 GrGpu::AutoStateRestore asr(fGpu);
552
553#if NEW_EVAL
554 GrMatrix viewM;
555 fGpu->getViewMatrix(&viewM);
556 // In order to tesselate the path we get a bound on how much the matrix can
557 // stretch when mapping to screen coordinates.
558 GrScalar stretch = viewM.getMaxStretch();
559 bool useStretch = stretch > 0;
560 GrScalar tol = EVAL_TOL;
561 if (!useStretch) {
562 // TODO: deal with perspective in some better way.
563 tol /= 10;
564 } else {
565 // TODO: fixed point divide
566 GrScalar sinv = 1 / stretch;
567 tol = GrMul(tol, sinv);
568 viewM = GrMatrix::I();
569 }
570 GrScalar tolSqd = GrMul(tol, tol);
571#else
572 // pass to worst_case... but won't be used.
573 static const GrScalar tol = -1;
574#endif
575
576 int subpathCnt;
577 int maxPts = worst_case_point_count(path,
578 &subpathCnt,
579#if CPU_TRANSFORM
580 cpuMatrix,
581#else
582 GrMatrix::I(),
583#endif
584 tol);
585 GrVertexLayout layout = 0;
586 if (useTexture) {
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000587 layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0);
reed@google.comac10a2d2010-12-22 21:39:39 +0000588 }
589 // add 4 to hold the bounding rect
590 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, maxPts + 4, 0);
591
592 GrPoint* base = (GrPoint*) arg.vertices();
593 GrPoint* vert = base;
594 GrPoint* subpathBase = base;
595
596 GrAutoSTMalloc<8, uint16_t> subpathVertCount(subpathCnt);
597
598 path->rewind();
599
600 // TODO: use primitve restart if available rather than multiple draws
601 GrGpu::PrimitiveType type;
602 int passCount = 0;
603 GrGpu::StencilPass passes[3];
604 bool reverse = false;
605
606 if (kHairLine_PathFill == fill) {
607 type = GrGpu::kLineStrip_PrimitiveType;
608 passCount = 1;
609 passes[0] = GrGpu::kNone_StencilPass;
610 } else {
611 type = GrGpu::kTriangleFan_PrimitiveType;
612 if (single_pass_path(*path, fill, useTexture, *fGpu)) {
613 passCount = 1;
614 passes[0] = GrGpu::kNone_StencilPass;
615 } else {
616 switch (fill) {
617 case kInverseEvenOdd_PathFill:
618 reverse = true;
619 // fallthrough
620 case kEvenOdd_PathFill:
621 passCount = 2;
622 passes[0] = GrGpu::kEvenOddStencil_StencilPass;
623 passes[1] = GrGpu::kEvenOddColor_StencilPass;
624 break;
625
626 case kInverseWinding_PathFill:
627 reverse = true;
628 // fallthrough
629 case kWinding_PathFill:
630 passes[0] = GrGpu::kWindingStencil1_StencilPass;
631 if (fGpu->supportsSingleStencilPassWinding()) {
632 passes[1] = GrGpu::kWindingColor_StencilPass;
633 passCount = 2;
634 } else {
635 passes[1] = GrGpu::kWindingStencil2_StencilPass;
636 passes[2] = GrGpu::kWindingColor_StencilPass;
637 passCount = 3;
638 }
639 break;
640 default:
641 GrAssert(!"Unknown path fill!");
642 return;
643 }
644 }
645 }
646 fGpu->setReverseFill(reverse);
647#if CPU_TRANSFORM
648 GrMatrix cpuMatrix;
649 fGpu->getViewMatrix(&cpuMatrix);
650 fGpu->setViewMatrix(GrMatrix::I());
651#endif
652
653 GrPoint pts[4];
654
655 bool first = true;
656 int subpath = 0;
657
658 for (;;) {
659 GrPathIter::Command cmd = path->next(pts);
660#if CPU_TRANSFORM
661 int numPts = GrPathIter::NumCommandPoints(cmd);
662 cpuMatrix.mapPoints(pts, pts, numPts);
663#endif
664 switch (cmd) {
665 case GrPathIter::kMove_Command:
666 if (!first) {
667 subpathVertCount[subpath] = vert-subpathBase;
668 subpathBase = vert;
669 ++subpath;
670 }
671 *vert = pts[0];
672 vert++;
673 break;
674 case GrPathIter::kLine_Command:
675 *vert = pts[1];
676 vert++;
677 break;
678 case GrPathIter::kQuadratic_Command: {
679#if NEW_EVAL
680
681 generate_quadratic_points(pts[0], pts[1], pts[2],
682 tolSqd, &vert,
683 quadratic_point_count(pts, tol));
684#else
685 const int n = 8;
686 const GrScalar dt = GR_Scalar1 / n;
687 GrScalar t = dt;
688 for (int i = 1; i < n; i++) {
689 gr_eval_quad_at(pts, t, (GrPoint*)vert);
690 t += dt;
691 vert++;
692 }
693 vert->set(pts[2].fX, pts[2].fY);
694 vert++;
695#endif
696 break;
697 }
698 case GrPathIter::kCubic_Command: {
699#if NEW_EVAL
700 generate_cubic_points(pts[0], pts[1], pts[2], pts[3],
701 tolSqd, &vert,
702 cubic_point_count(pts, tol));
703#else
704 const int n = 16;
705 const GrScalar dt = GR_Scalar1 / n;
706 GrScalar t = dt;
707 for (int i = 1; i < n; i++) {
708 gr_eval_cubic_at(pts, t, (GrPoint*)vert);
709 t += dt;
710 vert++;
711 }
712 vert->set(pts[3].fX, pts[3].fY);
713 vert++;
714#endif
715 break;
716 }
717 case GrPathIter::kClose_Command:
718 break;
719 case GrPathIter::kEnd_Command:
720 subpathVertCount[subpath] = vert-subpathBase;
721 ++subpath; // this could be only in debug
722 goto FINISHED;
723 }
724 first = false;
725 }
726FINISHED:
727 GrAssert(subpath == subpathCnt);
728 GrAssert((vert - base) <= maxPts);
729
730 if (translate) {
731 int count = vert - base;
732 for (int i = 0; i < count; i++) {
733 base[i].offset(translate->fX, translate->fY);
734 }
735 }
736
737 // arbitrary path complexity cutoff
738 bool useBounds = fill != kHairLine_PathFill &&
739 (reverse || (vert - base) > 8);
740 GrPoint* boundsVerts = base + maxPts;
741 if (useBounds) {
742 GrRect bounds;
743 if (reverse) {
744 GrAssert(NULL != fGpu->currentRenderTarget());
745 // draw over the whole world.
746 bounds.setLTRB(0, 0,
747 GrIntToScalar(fGpu->currentRenderTarget()->width()),
748 GrIntToScalar(fGpu->currentRenderTarget()->height()));
749 } else {
750 bounds.setBounds((GrPoint*)base, vert - base);
751 }
752 boundsVerts[0].setRectFan(bounds.fLeft, bounds.fTop, bounds.fRight,
753 bounds.fBottom);
754 }
755
756 for (int p = 0; p < passCount; ++p) {
757 fGpu->setStencilPass(passes[p]);
758 if (useBounds && (GrGpu::kEvenOddColor_StencilPass == passes[p] ||
759 GrGpu::kWindingColor_StencilPass == passes[p])) {
760 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
761 maxPts, 4);
762 } else {
763 int baseVertex = 0;
764 for (int sp = 0; sp < subpathCnt; ++sp) {
765 fGpu->drawNonIndexed(type,
766 baseVertex,
767 subpathVertCount[sp]);
768 baseVertex += subpathVertCount[sp];
769 }
770 }
771 }
772}
773
774void GrContext::flush(bool flushRenderTarget) {
775 flushText();
776 if (flushRenderTarget) {
777 fGpu->forceRenderTargetFlush();
778 }
779}
780
781void GrContext::flushText() {
782 fTextDrawBuffer.playback(fGpu);
783 fTextDrawBuffer.reset();
784}
785
786bool GrContext::readPixels(int left, int top, int width, int height,
787 GrTexture::PixelConfig config, void* buffer) {
788 this->flush(true);
789 return fGpu->readPixels(left, top, width, height, config, buffer);
790}
791
792void GrContext::writePixels(int left, int top, int width, int height,
793 GrTexture::PixelConfig config, const void* buffer,
794 size_t stride) {
795 const GrGpu::TextureDesc desc = {
796 0, GrGpu::kNone_AALevel, width, height, config
797 };
798 GrTexture* texture = fGpu->createTexture(desc, buffer, stride);
799 if (NULL == texture) {
800 return;
801 }
802
803 this->flush(true);
804
805 GrAutoUnref aur(texture);
806 GrDrawTarget::AutoStateRestore asr(fGpu);
807
808 GrMatrix matrix;
809 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
810 fGpu->setViewMatrix(matrix);
811 matrix.setScale(GR_Scalar1 / texture->allocWidth(),
812 GR_Scalar1 / texture->allocHeight());
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000813 fGpu->setTextureMatrix(0, matrix);
reed@google.comac10a2d2010-12-22 21:39:39 +0000814
815 fGpu->disableState(GrDrawTarget::kClip_StateBit);
816 fGpu->setAlpha(0xFF);
817 fGpu->setBlendFunc(GrDrawTarget::kOne_BlendCoeff,
818 GrDrawTarget::kZero_BlendCoeff);
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000819 fGpu->setTexture(0, texture);
820 fGpu->setSamplerState(0, GrSamplerState::ClampNoFilter());
reed@google.comac10a2d2010-12-22 21:39:39 +0000821
822 this->fillRect(GrRect(0, 0, GrIntToScalar(width), GrIntToScalar(height)),
823 true);
824}
825
826////////////////////////////////////////////////////////////////////////////////
827
828
829/* -------------------------------------------------------
830 * Mimicking the GrGpu interface for now
831 * TODO: define appropriate higher-level API for context
832 */
833
834void GrContext::resetContext() {
835 fGpu->resetContext();
836}
837
838GrVertexBuffer* GrContext::createVertexBuffer(uint32_t size, bool dynamic) {
839 return fGpu->createVertexBuffer(size, dynamic);
840}
841
842GrIndexBuffer* GrContext::createIndexBuffer(uint32_t size, bool dynamic) {
843 return fGpu->createIndexBuffer(size, dynamic);
844}
845
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000846void GrContext::setTexture(int stage, GrTexture* texture) {
847 fGpu->setTexture(stage, texture);
reed@google.comac10a2d2010-12-22 21:39:39 +0000848}
849
850void GrContext::setRenderTarget(GrRenderTarget* target) {
851 flushText();
852 fGpu->setRenderTarget(target);
853}
854
855GrRenderTarget* GrContext::currentRenderTarget() const {
856 return fGpu->currentRenderTarget();
857}
858
859void GrContext::setDefaultRenderTargetSize(uint32_t width, uint32_t height) {
860 fGpu->setDefaultRenderTargetSize(width, height);
861}
862
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000863void GrContext::setSamplerState(int stage, const GrSamplerState& samplerState) {
864 fGpu->setSamplerState(stage, samplerState);
reed@google.comac10a2d2010-12-22 21:39:39 +0000865}
866
bsalomon@google.com8531c1c2011-01-13 19:52:45 +0000867void GrContext::setTextureMatrix(int stage, const GrMatrix& m) {
868 fGpu->setTextureMatrix(stage, m);
reed@google.comac10a2d2010-12-22 21:39:39 +0000869}
870
871void GrContext::getViewMatrix(GrMatrix* m) const {
872 fGpu->getViewMatrix(m);
873}
874
875void GrContext::setViewMatrix(const GrMatrix& m) {
876 fGpu->setViewMatrix(m);
877}
878
879bool GrContext::reserveAndLockGeometry(GrVertexLayout vertexLayout,
880 uint32_t vertexCount,
881 uint32_t indexCount,
882 void** vertices,
883 void** indices) {
884 return fGpu->reserveAndLockGeometry(vertexLayout,
885 vertexCount,
886 indexCount,
887 vertices,
888 indices);
889}
890
891void GrContext::drawIndexed(GrGpu::PrimitiveType type,
892 uint32_t startVertex,
893 uint32_t startIndex,
894 uint32_t vertexCount,
895 uint32_t indexCount) {
896 flushText();
897 fGpu->drawIndexed(type,
898 startVertex,
899 startIndex,
900 vertexCount,
901 indexCount);
902}
903
904void GrContext::drawNonIndexed(GrGpu::PrimitiveType type,
905 uint32_t startVertex,
906 uint32_t vertexCount) {
907 flushText();
908 fGpu->drawNonIndexed(type,
909 startVertex,
910 vertexCount);
911}
912
913void GrContext::setVertexSourceToArray(const void* array,
914 GrVertexLayout vertexLayout) {
915 fGpu->setVertexSourceToArray(array, vertexLayout);
916}
917
918void GrContext::setIndexSourceToArray(const void* array) {
919 fGpu->setIndexSourceToArray(array);
920}
921
922void GrContext::setVertexSourceToBuffer(GrVertexBuffer* buffer,
923 GrVertexLayout vertexLayout) {
924 fGpu->setVertexSourceToBuffer(buffer, vertexLayout);
925}
926
927void GrContext::setIndexSourceToBuffer(GrIndexBuffer* buffer) {
928 fGpu->setIndexSourceToBuffer(buffer);
929}
930
931void GrContext::releaseReservedGeometry() {
932 fGpu->releaseReservedGeometry();
933}
934
935void GrContext::setClip(const GrClip& clip) {
936 fGpu->setClip(clip);
937 fGpu->enableState(GrDrawTarget::kClip_StateBit);
938}
939
940void GrContext::setAlpha(uint8_t alpha) {
941 fGpu->setAlpha(alpha);
942}
943
944void GrContext::setColor(GrColor color) {
945 fGpu->setColor(color);
946}
947
948static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
949 intptr_t mask = 1 << shift;
950 if (pred) {
951 bits |= mask;
952 } else {
953 bits &= ~mask;
954 }
955 return bits;
956}
957
958void GrContext::setAntiAlias(bool aa) {
959 if (aa) {
960 fGpu->enableState(GrGpu::kAntialias_StateBit);
961 } else {
962 fGpu->disableState(GrGpu::kAntialias_StateBit);
963 }
964}
965
966void GrContext::setDither(bool dither) {
967 // hack for now, since iPad dither is hella-slow
968 dither = false;
969
970 if (dither) {
971 fGpu->enableState(GrGpu::kDither_StateBit);
972 } else {
973 fGpu->disableState(GrGpu::kDither_StateBit);
974 }
975}
976
977void GrContext::setPointSize(float size) {
978 fGpu->setPointSize(size);
979}
980
981void GrContext::setBlendFunc(GrGpu::BlendCoeff srcCoef,
982 GrGpu::BlendCoeff dstCoef) {
983 fGpu->setBlendFunc(srcCoef, dstCoef);
984}
985
986void GrContext::resetStats() {
987 fGpu->resetStats();
988}
989
990const GrGpu::Stats& GrContext::getStats() const {
991 return fGpu->getStats();
992}
993
994void GrContext::printStats() const {
995 fGpu->printStats();
996}
997
998GrContext::GrContext(GrGpu* gpu) :
999 fVBAllocPool(gpu,
1000 gpu->supportsBufferLocking() ? POOL_VB_SIZE : 0,
1001 gpu->supportsBufferLocking() ? NUM_POOL_VBS : 0),
1002 fTextDrawBuffer(gpu->supportsBufferLocking() ? &fVBAllocPool : NULL) {
1003 fGpu = gpu;
1004 fGpu->ref();
1005 fTextureCache = new GrTextureCache(MAX_TEXTURE_CACHE_COUNT,
1006 MAX_TEXTURE_CACHE_BYTES);
1007 fFontCache = new GrFontCache(fGpu);
1008}
1009
1010bool GrContext::finalizeTextureKey(GrTextureKey* key,
1011 const GrSamplerState& sampler) const {
1012 uint32_t bits = 0;
1013 uint16_t width = key->width();
1014 uint16_t height = key->height();
1015 if (fGpu->npotTextureSupport() < GrGpu::kNonRendertarget_NPOTTextureType) {
1016 if ((sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
1017 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode) &&
1018 (!GrIsPow2(width) || !GrIsPow2(height))) {
1019 bits |= 1;
1020 bits |= sampler.isFilter() ? 2 : 0;
1021 }
1022 }
1023 key->finalize(bits);
1024 return 0 != bits;
1025}
1026
1027GrDrawTarget* GrContext::getTextTarget() {
1028#if DEFER_TEXT_RENDERING
1029 fTextDrawBuffer.initializeDrawStateAndClip(*fGpu);
1030 return &fTextDrawBuffer;
1031#else
1032 return fGpu;
1033#endif
1034}
1035
1036const GrIndexBuffer* GrContext::quadIndexBuffer() const {
1037 return fGpu->quadIndexBuffer();
1038}
1039
1040int GrContext::maxQuadsInIndexBuffer() const {
1041 return fGpu->maxQuadsInIndexBuffer();
1042}
1043
1044
1045
reed@google.com873cb1e2010-12-23 15:00:45 +00001046