blob: 942afa302185d7396541b05d9d97a41171f20e19 [file] [log] [blame]
reed@google.com873cb1e2010-12-23 15:00:45 +00001/*
2 Copyright 2010 Google Inc.
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
reed@google.comac10a2d2010-12-22 21:39:39 +000017
18#include "GrContext.h"
19#include "GrTextureCache.h"
20#include "GrTextStrike.h"
21#include "GrMemory.h"
22#include "GrPathIter.h"
23#include "GrClipIterator.h"
24#include "GrIndexBuffer.h"
25
26#define DEFER_TEXT_RENDERING 1
27
28static const size_t MAX_TEXTURE_CACHE_COUNT = 128;
29static const size_t MAX_TEXTURE_CACHE_BYTES = 8 * 1024 * 1024;
30
31#if DEFER_TEXT_RENDERING
32 static const uint32_t POOL_VB_SIZE = 2048 *
33 GrDrawTarget::VertexSize(GrDrawTarget::kTextFormat_VertexLayoutBit);
34 static const uint32_t NUM_POOL_VBS = 8;
35#else
36 static const uint32_t POOL_VB_SIZE = 0;
37 static const uint32_t NUM_POOL_VBS = 0;
38
39#endif
40
41GrContext* GrContext::Create(GrGpu::Engine engine,
42 GrGpu::Platform3DContext context3D) {
43 GrContext* ctx = NULL;
44 GrGpu* fGpu = GrGpu::Create(engine, context3D);
45 if (NULL != fGpu) {
46 ctx = new GrContext(fGpu);
47 fGpu->unref();
48 }
49 return ctx;
50}
51
reed@google.com873cb1e2010-12-23 15:00:45 +000052GrContext* GrContext::CreateGLShaderContext() {
53 return GrContext::Create(GrGpu::kOpenGL_Shaders_Engine, NULL);
54}
55
reed@google.comac10a2d2010-12-22 21:39:39 +000056GrContext::~GrContext() {
57 fGpu->unref();
58 delete fTextureCache;
59 delete fFontCache;
60}
61
62void GrContext::abandonAllTextures() {
63 fTextureCache->deleteAll(GrTextureCache::kAbandonTexture_DeleteMode);
64 fFontCache->abandonAll();
65}
66
67GrTextureEntry* GrContext::findAndLockTexture(GrTextureKey* key,
68 const GrSamplerState& sampler) {
69 finalizeTextureKey(key, sampler);
70 return fTextureCache->findAndLock(*key);
71}
72
73static void stretchImage(void* dst,
74 int dstW,
75 int dstH,
76 void* src,
77 int srcW,
78 int srcH,
79 int bpp) {
80 GrFixed dx = (srcW << 16) / dstW;
81 GrFixed dy = (srcH << 16) / dstH;
82
83 GrFixed y = dy >> 1;
84
85 int dstXLimit = dstW*bpp;
86 for (int j = 0; j < dstH; ++j) {
87 GrFixed x = dx >> 1;
88 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
89 void* dstRow = (uint8_t*)dst + j*dstW*bpp;
90 for (int i = 0; i < dstXLimit; i += bpp) {
91 memcpy((uint8_t*) dstRow + i,
92 (uint8_t*) srcRow + (x>>16)*bpp,
93 bpp);
94 x += dx;
95 }
96 y += dy;
97 }
98}
99
100GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
101 const GrSamplerState& sampler,
102 const GrGpu::TextureDesc& desc,
103 void* srcData, size_t rowBytes) {
104 GrAssert(key->width() == desc.fWidth);
105 GrAssert(key->height() == desc.fHeight);
106
107#if GR_DUMP_TEXTURE_UPLOAD
108 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
109#endif
110
111 GrTextureEntry* entry = NULL;
112 bool special = finalizeTextureKey(key, sampler);
113 if (special) {
114 GrTextureEntry* clampEntry;
115 GrTextureKey clampKey(*key);
116 clampEntry = findAndLockTexture(&clampKey, GrSamplerState::ClampNoFilter());
117
118 if (NULL == clampEntry) {
119 clampEntry = createAndLockTexture(&clampKey,
120 GrSamplerState::ClampNoFilter(),
121 desc, srcData, rowBytes);
122 GrAssert(NULL != clampEntry);
123 if (NULL == clampEntry) {
124 return NULL;
125 }
126 }
127 GrTexture* clampTexture = clampEntry->texture();
128 GrGpu::TextureDesc rtDesc = desc;
129 rtDesc.fFlags |= GrGpu::kRenderTarget_TextureFlag |
130 GrGpu::kNoPathRendering_TextureFlag;
131 rtDesc.fWidth = GrNextPow2(GrMax<int>(desc.fWidth,
132 fGpu->minRenderTargetWidth()));
133 rtDesc.fHeight = GrNextPow2(GrMax<int>(desc.fHeight,
134 fGpu->minRenderTargetHeight()));
135
136 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
137
138 if (NULL != texture) {
139 GrGpu::AutoStateRestore asr(fGpu);
140 fGpu->setRenderTarget(texture->asRenderTarget());
141 fGpu->setTexture(clampEntry->texture());
142 fGpu->setStencilPass(GrGpu::kNone_StencilPass);
143 fGpu->setTextureMatrix(GrMatrix::I());
144 fGpu->setViewMatrix(GrMatrix::I());
145 fGpu->setAlpha(0xff);
146 fGpu->setBlendFunc(GrGpu::kOne_BlendCoeff, GrGpu::kZero_BlendCoeff);
147 fGpu->disableState(GrGpu::kDither_StateBit |
148 GrGpu::kClip_StateBit |
149 GrGpu::kAntialias_StateBit);
150 GrSamplerState stretchSampler(GrSamplerState::kClamp_WrapMode,
151 GrSamplerState::kClamp_WrapMode,
152 sampler.isFilter());
153 fGpu->setSamplerState(stretchSampler);
154
155 static const GrVertexLayout layout =
156 GrDrawTarget::kSeparateTexCoord_VertexLayoutBit;
157 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
158
159 if (arg.succeeded()) {
160 GrPoint* verts = (GrPoint*) arg.vertices();
161 verts[0].setIRectFan(0, 0,
162 texture->contentWidth(),
163 texture->contentHeight(),
164 2*sizeof(GrPoint));
165 GrScalar tw = GrFixedToScalar(GR_Fixed1 *
166 clampTexture->contentWidth() /
167 clampTexture->allocWidth());
168 GrScalar th = GrFixedToScalar(GR_Fixed1 *
169 clampTexture->contentHeight() /
170 clampTexture->allocHeight());
171 verts[1].setRectFan(0, 0, tw, th, 2*sizeof(GrPoint));
172 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
173 0, 4);
174 entry = fTextureCache->createAndLock(*key, texture);
175 }
176 texture->removeRenderTarget();
177 } else {
178 // TODO: Our CPU stretch doesn't filter. But we create separate
179 // stretched textures when the sampler state is either filtered or
180 // not. Either implement filtered stretch blit on CPU or just create
181 // one when FBO case fails.
182
183 rtDesc.fFlags = 0;
184 // no longer need to clamp at min RT size.
185 rtDesc.fWidth = GrNextPow2(desc.fWidth);
186 rtDesc.fHeight = GrNextPow2(desc.fHeight);
187 int bpp = GrTexture::BytesPerPixel(desc.fFormat);
188 GrAutoSMalloc<128*128*4> stretchedPixels(bpp *
189 rtDesc.fWidth *
190 rtDesc.fHeight);
191 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
192 srcData, desc.fWidth, desc.fHeight, bpp);
193
194 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
195
196 GrTexture* texture = fGpu->createTexture(rtDesc,
197 stretchedPixels.get(),
198 stretchedRowBytes);
199 GrAssert(NULL != texture);
200 entry = fTextureCache->createAndLock(*key, texture);
201 }
202 fTextureCache->unlock(clampEntry);
203
204 } else {
205 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
206 if (NULL != texture) {
207 entry = fTextureCache->createAndLock(*key, texture);
208 } else {
209 entry = NULL;
210 }
211 }
212 return entry;
213}
214
215void GrContext::unlockTexture(GrTextureEntry* entry) {
216 fTextureCache->unlock(entry);
217}
218
219void GrContext::detachCachedTexture(GrTextureEntry* entry) {
220 fTextureCache->detach(entry);
221}
222
223void GrContext::reattachAndUnlockCachedTexture(GrTextureEntry* entry) {
224 fTextureCache->reattachAndUnlock(entry);
225}
226
227GrTexture* GrContext::createUncachedTexture(const GrGpu::TextureDesc& desc,
228 void* srcData,
229 size_t rowBytes) {
230 return fGpu->createTexture(desc, srcData, rowBytes);
231}
232
233GrRenderTarget* GrContext::createPlatformRenderTarget(intptr_t platformRenderTarget,
234 int width, int height) {
235 return fGpu->createPlatformRenderTarget(platformRenderTarget,
236 width, height);
237}
238
239bool GrContext::supportsIndex8PixelConfig(const GrSamplerState& sampler,
240 int width, int height) {
241 if (!fGpu->supports8BitPalette()) {
242 return false;
243 }
244
245 bool needsRepeat = sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
246 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode;
247 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
248
249 switch (fGpu->npotTextureSupport()) {
250 case GrGpu::kNone_NPOTTextureType:
251 return isPow2;
252 case GrGpu::kNoRepeat_NPOTTextureType:
253 return isPow2 || !needsRepeat;
254 case GrGpu::kNonRendertarget_NPOTTextureType:
255 case GrGpu::kFull_NPOTTextureType:
256 return true;
257 }
258 // should never get here
259 GrAssert(!"Bad enum from fGpu->npotTextureSupport");
260 return false;
261}
262
263////////////////////////////////////////////////////////////////////////////////
264
265void GrContext::eraseColor(GrColor color) {
266 fGpu->eraseColor(color);
267}
268
269void GrContext::drawFull(bool useTexture) {
270 // set rect to be big enough to fill the space, but not super-huge, so we
271 // don't overflow fixed-point implementations
272 GrRect r(fGpu->getClip().getBounds());
273 GrMatrix inverse;
274 if (fGpu->getViewInverse(&inverse)) {
275 inverse.mapRect(&r);
276 } else {
277 GrPrintf("---- fGpu->getViewInverse failed\n");
278 }
279
280 this->fillRect(r, useTexture);
281}
282
283/* create a triangle strip that strokes the specified triangle. There are 8
284 unique vertices, but we repreat the last 2 to close up. Alternatively we
285 could use an indices array, and then only send 8 verts, but not sure that
286 would be faster.
287 */
288static void setStrokeRectStrip(GrPoint verts[10], const GrRect& rect,
289 GrScalar width) {
290 const GrScalar rad = GrScalarHalf(width);
291
292 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
293 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
294 verts[2].set(rect.fRight - rad, rect.fTop + rad);
295 verts[3].set(rect.fRight + rad, rect.fTop - rad);
296 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
297 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
298 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
299 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
300 verts[8] = verts[0];
301 verts[9] = verts[1];
302}
303
304void GrContext::drawRect(const GrRect& rect, bool useTexture, GrScalar width) {
305 GrVertexLayout layout = useTexture ?
306 GrDrawTarget::kPositionAsTexCoord_VertexLayoutBit :
307 0;
308
309 static const int worstCaseVertCount = 10;
310 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, worstCaseVertCount, 0);
311 if (!geo.succeeded()) {
312 return;
313 }
314
315 this->flushText();
316
317 int vertCount;
318 GrGpu::PrimitiveType primType;
319 GrPoint* vertex = geo.positions();
320
321 if (width >= 0) {
322 if (width > 0) {
323 vertCount = 10;
324 primType = GrGpu::kTriangleStrip_PrimitiveType;
325 setStrokeRectStrip(vertex, rect, width);
326 } else {
327 // hairline
328 vertCount = 5;
329 primType = GrGpu::kLineStrip_PrimitiveType;
330 vertex[0].set(rect.fLeft, rect.fTop);
331 vertex[1].set(rect.fRight, rect.fTop);
332 vertex[2].set(rect.fRight, rect.fBottom);
333 vertex[3].set(rect.fLeft, rect.fBottom);
334 vertex[4].set(rect.fLeft, rect.fTop);
335 }
336 } else {
337 vertCount = 4;
338 primType = GrGpu::kTriangleFan_PrimitiveType;
339 vertex->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
340 }
341
342 fGpu->drawNonIndexed(primType, 0, vertCount);
343}
344
345////////////////////////////////////////////////////////////////////////////////
346
347#define NEW_EVAL 1 // Use adaptive path tesselation
348#define STENCIL_OFF 0 // Always disable stencil (even when needed)
349#define CPU_TRANSFORM 0 // Transform path verts on CPU
350
351#if NEW_EVAL
352
353#define EVAL_TOL GR_Scalar1
354
355static uint32_t quadratic_point_count(const GrPoint points[], GrScalar tol) {
356 GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
357 // TODO: fixed points sqrt
358 if (d < tol) {
359 return 1;
360 } else {
361 // Each time we subdivide, d should be cut in 4. So we need to
362 // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
363 // points.
364 // 2^(log4(x)) = sqrt(x);
365 d = ceilf(sqrtf(d/tol));
366 return GrNextPow2((uint32_t)d);
367 }
368}
369
370static uint32_t generate_quadratic_points(const GrPoint& p0,
371 const GrPoint& p1,
372 const GrPoint& p2,
373 GrScalar tolSqd,
374 GrPoint** points,
375 uint32_t pointsLeft) {
376 if (pointsLeft < 2 ||
377 (p1.distanceToLineSegmentBetweenSqd(p0, p2)) < tolSqd) {
378 (*points)[0] = p2;
379 *points += 1;
380 return 1;
381 }
382
383 GrPoint q[] = {
384 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
385 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
386 };
387 GrPoint r(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY));
388
389 pointsLeft >>= 1;
390 uint32_t a = generate_quadratic_points(p0, q[0], r, tolSqd, points, pointsLeft);
391 uint32_t b = generate_quadratic_points(r, q[1], p2, tolSqd, points, pointsLeft);
392 return a + b;
393}
394
395static uint32_t cubic_point_count(const GrPoint points[], GrScalar tol) {
396 GrScalar d = GrMax(points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
397 points[2].distanceToLineSegmentBetweenSqd(points[0], points[3]));
398 d = sqrtf(d);
399 if (d < tol) {
400 return 1;
401 } else {
402 d = ceilf(sqrtf(d/tol));
403 return GrNextPow2((uint32_t)d);
404 }
405}
406
407static uint32_t generate_cubic_points(const GrPoint& p0,
408 const GrPoint& p1,
409 const GrPoint& p2,
410 const GrPoint& p3,
411 GrScalar tolSqd,
412 GrPoint** points,
413 uint32_t pointsLeft) {
414 if (pointsLeft < 2 ||
415 (p1.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd &&
416 p2.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd)) {
417 (*points)[0] = p3;
418 *points += 1;
419 return 1;
420 }
421 GrPoint q[] = {
422 GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
423 GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
424 GrPoint(GrScalarAve(p2.fX, p3.fX), GrScalarAve(p2.fY, p3.fY))
425 };
426 GrPoint r[] = {
427 GrPoint(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY)),
428 GrPoint(GrScalarAve(q[1].fX, q[2].fX), GrScalarAve(q[1].fY, q[2].fY))
429 };
430 GrPoint s(GrScalarAve(r[0].fX, r[1].fX), GrScalarAve(r[0].fY, r[1].fY));
431 pointsLeft >>= 1;
432 uint32_t a = generate_cubic_points(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
433 uint32_t b = generate_cubic_points(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
434 return a + b;
435}
436
437#else // !NEW_EVAL
438
439static GrScalar gr_eval_quad(const GrScalar coord[], GrScalar t) {
440 GrScalar A = coord[0] - 2 * coord[2] + coord[4];
441 GrScalar B = 2 * (coord[2] - coord[0]);
442 GrScalar C = coord[0];
443
444 return GrMul(GrMul(A, t) + B, t) + C;
445}
446
447static void gr_eval_quad_at(const GrPoint src[3], GrScalar t, GrPoint* pt) {
448 GrAssert(src);
449 GrAssert(pt);
450 GrAssert(t >= 0 && t <= GR_Scalar1);
451 pt->set(gr_eval_quad(&src[0].fX, t), gr_eval_quad(&src[0].fY, t));
452}
453
454static GrScalar gr_eval_cubic(const GrScalar coord[], GrScalar t) {
455 GrScalar A = coord[6] - coord[0] + 3 * (coord[2] - coord[4]);
456 GrScalar B = 3 * (coord[0] - 2 * coord[2] + coord[4]);
457 GrScalar C = 3 * (coord[2] - coord[0]);
458 GrScalar D = coord[0];
459
460 return GrMul(GrMul(GrMul(A, t) + B, t) + C, t) + D;
461}
462
463static void gr_eval_cubic_at(const GrPoint src[4], GrScalar t, GrPoint* pt) {
464 GrAssert(src);
465 GrAssert(pt);
466 GrAssert(t >= 0 && t <= GR_Scalar1);
467
468 pt->set(gr_eval_cubic(&src[0].fX, t), gr_eval_cubic(&src[0].fY, t));
469}
470
471#endif // !NEW_EVAL
472
473static int worst_case_point_count(GrPathIter* path,
474 int* subpaths,
475 const GrMatrix& matrix,
476 GrScalar tol) {
477 int pointCount = 0;
478 *subpaths = 1;
479
480 bool first = true;
481
482 GrPathIter::Command cmd;
483
484 GrPoint pts[4];
485 while ((cmd = path->next(pts)) != GrPathIter::kEnd_Command) {
486
487 switch (cmd) {
488 case GrPathIter::kLine_Command:
489 pointCount += 1;
490 break;
491 case GrPathIter::kQuadratic_Command:
492#if NEW_EVAL
493 matrix.mapPoints(pts, pts, 3);
494 pointCount += quadratic_point_count(pts, tol);
495#else
496 pointCount += 9;
497#endif
498 break;
499 case GrPathIter::kCubic_Command:
500#if NEW_EVAL
501 matrix.mapPoints(pts, pts, 4);
502 pointCount += cubic_point_count(pts, tol);
503#else
504 pointCount += 17;
505#endif
506 break;
507 case GrPathIter::kMove_Command:
508 pointCount += 1;
509 if (!first) {
510 ++(*subpaths);
511 }
512 break;
513 default:
514 break;
515 }
516 first = false;
517 }
518 return pointCount;
519}
520
521static inline bool single_pass_path(const GrPathIter& path,
522 GrContext::PathFills fill,
523 bool useTex,
524 const GrGpu& gpu) {
525#if STENCIL_OFF
526 return true;
527#else
528 if (GrContext::kEvenOdd_PathFill == fill) {
529 GrPathIter::ConvexHint hint = path.hint();
530 return hint == GrPathIter::kConvex_ConvexHint ||
531 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint;
532 } else if (GrContext::kWinding_PathFill == fill) {
533 GrPathIter::ConvexHint hint = path.hint();
534 return hint == GrPathIter::kConvex_ConvexHint ||
535 hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint ||
536 (hint == GrPathIter::kSameWindingConvexPieces_ConvexHint &&
537 gpu.canDisableBlend() && !gpu.isDitherState());
538
539 }
540 return false;
541#endif
542}
543
544void GrContext::drawPath(GrPathIter* path, PathFills fill,
545 bool useTexture, const GrPoint* translate) {
546
547 flushText();
548
549 GrGpu::AutoStateRestore asr(fGpu);
550
551#if NEW_EVAL
552 GrMatrix viewM;
553 fGpu->getViewMatrix(&viewM);
554 // In order to tesselate the path we get a bound on how much the matrix can
555 // stretch when mapping to screen coordinates.
556 GrScalar stretch = viewM.getMaxStretch();
557 bool useStretch = stretch > 0;
558 GrScalar tol = EVAL_TOL;
559 if (!useStretch) {
560 // TODO: deal with perspective in some better way.
561 tol /= 10;
562 } else {
563 // TODO: fixed point divide
564 GrScalar sinv = 1 / stretch;
565 tol = GrMul(tol, sinv);
566 viewM = GrMatrix::I();
567 }
568 GrScalar tolSqd = GrMul(tol, tol);
569#else
570 // pass to worst_case... but won't be used.
571 static const GrScalar tol = -1;
572#endif
573
574 int subpathCnt;
575 int maxPts = worst_case_point_count(path,
576 &subpathCnt,
577#if CPU_TRANSFORM
578 cpuMatrix,
579#else
580 GrMatrix::I(),
581#endif
582 tol);
583 GrVertexLayout layout = 0;
584 if (useTexture) {
585 layout = GrDrawTarget::kPositionAsTexCoord_VertexLayoutBit;
586 }
587 // add 4 to hold the bounding rect
588 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, maxPts + 4, 0);
589
590 GrPoint* base = (GrPoint*) arg.vertices();
591 GrPoint* vert = base;
592 GrPoint* subpathBase = base;
593
594 GrAutoSTMalloc<8, uint16_t> subpathVertCount(subpathCnt);
595
596 path->rewind();
597
598 // TODO: use primitve restart if available rather than multiple draws
599 GrGpu::PrimitiveType type;
600 int passCount = 0;
601 GrGpu::StencilPass passes[3];
602 bool reverse = false;
603
604 if (kHairLine_PathFill == fill) {
605 type = GrGpu::kLineStrip_PrimitiveType;
606 passCount = 1;
607 passes[0] = GrGpu::kNone_StencilPass;
608 } else {
609 type = GrGpu::kTriangleFan_PrimitiveType;
610 if (single_pass_path(*path, fill, useTexture, *fGpu)) {
611 passCount = 1;
612 passes[0] = GrGpu::kNone_StencilPass;
613 } else {
614 switch (fill) {
615 case kInverseEvenOdd_PathFill:
616 reverse = true;
617 // fallthrough
618 case kEvenOdd_PathFill:
619 passCount = 2;
620 passes[0] = GrGpu::kEvenOddStencil_StencilPass;
621 passes[1] = GrGpu::kEvenOddColor_StencilPass;
622 break;
623
624 case kInverseWinding_PathFill:
625 reverse = true;
626 // fallthrough
627 case kWinding_PathFill:
628 passes[0] = GrGpu::kWindingStencil1_StencilPass;
629 if (fGpu->supportsSingleStencilPassWinding()) {
630 passes[1] = GrGpu::kWindingColor_StencilPass;
631 passCount = 2;
632 } else {
633 passes[1] = GrGpu::kWindingStencil2_StencilPass;
634 passes[2] = GrGpu::kWindingColor_StencilPass;
635 passCount = 3;
636 }
637 break;
638 default:
639 GrAssert(!"Unknown path fill!");
640 return;
641 }
642 }
643 }
644 fGpu->setReverseFill(reverse);
645#if CPU_TRANSFORM
646 GrMatrix cpuMatrix;
647 fGpu->getViewMatrix(&cpuMatrix);
648 fGpu->setViewMatrix(GrMatrix::I());
649#endif
650
651 GrPoint pts[4];
652
653 bool first = true;
654 int subpath = 0;
655
656 for (;;) {
657 GrPathIter::Command cmd = path->next(pts);
658#if CPU_TRANSFORM
659 int numPts = GrPathIter::NumCommandPoints(cmd);
660 cpuMatrix.mapPoints(pts, pts, numPts);
661#endif
662 switch (cmd) {
663 case GrPathIter::kMove_Command:
664 if (!first) {
665 subpathVertCount[subpath] = vert-subpathBase;
666 subpathBase = vert;
667 ++subpath;
668 }
669 *vert = pts[0];
670 vert++;
671 break;
672 case GrPathIter::kLine_Command:
673 *vert = pts[1];
674 vert++;
675 break;
676 case GrPathIter::kQuadratic_Command: {
677#if NEW_EVAL
678
679 generate_quadratic_points(pts[0], pts[1], pts[2],
680 tolSqd, &vert,
681 quadratic_point_count(pts, tol));
682#else
683 const int n = 8;
684 const GrScalar dt = GR_Scalar1 / n;
685 GrScalar t = dt;
686 for (int i = 1; i < n; i++) {
687 gr_eval_quad_at(pts, t, (GrPoint*)vert);
688 t += dt;
689 vert++;
690 }
691 vert->set(pts[2].fX, pts[2].fY);
692 vert++;
693#endif
694 break;
695 }
696 case GrPathIter::kCubic_Command: {
697#if NEW_EVAL
698 generate_cubic_points(pts[0], pts[1], pts[2], pts[3],
699 tolSqd, &vert,
700 cubic_point_count(pts, tol));
701#else
702 const int n = 16;
703 const GrScalar dt = GR_Scalar1 / n;
704 GrScalar t = dt;
705 for (int i = 1; i < n; i++) {
706 gr_eval_cubic_at(pts, t, (GrPoint*)vert);
707 t += dt;
708 vert++;
709 }
710 vert->set(pts[3].fX, pts[3].fY);
711 vert++;
712#endif
713 break;
714 }
715 case GrPathIter::kClose_Command:
716 break;
717 case GrPathIter::kEnd_Command:
718 subpathVertCount[subpath] = vert-subpathBase;
719 ++subpath; // this could be only in debug
720 goto FINISHED;
721 }
722 first = false;
723 }
724FINISHED:
725 GrAssert(subpath == subpathCnt);
726 GrAssert((vert - base) <= maxPts);
727
728 if (translate) {
729 int count = vert - base;
730 for (int i = 0; i < count; i++) {
731 base[i].offset(translate->fX, translate->fY);
732 }
733 }
734
735 // arbitrary path complexity cutoff
736 bool useBounds = fill != kHairLine_PathFill &&
737 (reverse || (vert - base) > 8);
738 GrPoint* boundsVerts = base + maxPts;
739 if (useBounds) {
740 GrRect bounds;
741 if (reverse) {
742 GrAssert(NULL != fGpu->currentRenderTarget());
743 // draw over the whole world.
744 bounds.setLTRB(0, 0,
745 GrIntToScalar(fGpu->currentRenderTarget()->width()),
746 GrIntToScalar(fGpu->currentRenderTarget()->height()));
747 } else {
748 bounds.setBounds((GrPoint*)base, vert - base);
749 }
750 boundsVerts[0].setRectFan(bounds.fLeft, bounds.fTop, bounds.fRight,
751 bounds.fBottom);
752 }
753
754 for (int p = 0; p < passCount; ++p) {
755 fGpu->setStencilPass(passes[p]);
756 if (useBounds && (GrGpu::kEvenOddColor_StencilPass == passes[p] ||
757 GrGpu::kWindingColor_StencilPass == passes[p])) {
758 fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
759 maxPts, 4);
760 } else {
761 int baseVertex = 0;
762 for (int sp = 0; sp < subpathCnt; ++sp) {
763 fGpu->drawNonIndexed(type,
764 baseVertex,
765 subpathVertCount[sp]);
766 baseVertex += subpathVertCount[sp];
767 }
768 }
769 }
770}
771
772void GrContext::flush(bool flushRenderTarget) {
773 flushText();
774 if (flushRenderTarget) {
775 fGpu->forceRenderTargetFlush();
776 }
777}
778
779void GrContext::flushText() {
780 fTextDrawBuffer.playback(fGpu);
781 fTextDrawBuffer.reset();
782}
783
784bool GrContext::readPixels(int left, int top, int width, int height,
785 GrTexture::PixelConfig config, void* buffer) {
786 this->flush(true);
787 return fGpu->readPixels(left, top, width, height, config, buffer);
788}
789
790void GrContext::writePixels(int left, int top, int width, int height,
791 GrTexture::PixelConfig config, const void* buffer,
792 size_t stride) {
793 const GrGpu::TextureDesc desc = {
794 0, GrGpu::kNone_AALevel, width, height, config
795 };
796 GrTexture* texture = fGpu->createTexture(desc, buffer, stride);
797 if (NULL == texture) {
798 return;
799 }
800
801 this->flush(true);
802
803 GrAutoUnref aur(texture);
804 GrDrawTarget::AutoStateRestore asr(fGpu);
805
806 GrMatrix matrix;
807 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
808 fGpu->setViewMatrix(matrix);
809 matrix.setScale(GR_Scalar1 / texture->allocWidth(),
810 GR_Scalar1 / texture->allocHeight());
811 fGpu->setTextureMatrix(matrix);
812
813 fGpu->disableState(GrDrawTarget::kClip_StateBit);
814 fGpu->setAlpha(0xFF);
815 fGpu->setBlendFunc(GrDrawTarget::kOne_BlendCoeff,
816 GrDrawTarget::kZero_BlendCoeff);
817 fGpu->setTexture(texture);
818 fGpu->setSamplerState(GrSamplerState::ClampNoFilter());
819
820 this->fillRect(GrRect(0, 0, GrIntToScalar(width), GrIntToScalar(height)),
821 true);
822}
823
824////////////////////////////////////////////////////////////////////////////////
825
826
827/* -------------------------------------------------------
828 * Mimicking the GrGpu interface for now
829 * TODO: define appropriate higher-level API for context
830 */
831
832void GrContext::resetContext() {
833 fGpu->resetContext();
834}
835
836GrVertexBuffer* GrContext::createVertexBuffer(uint32_t size, bool dynamic) {
837 return fGpu->createVertexBuffer(size, dynamic);
838}
839
840GrIndexBuffer* GrContext::createIndexBuffer(uint32_t size, bool dynamic) {
841 return fGpu->createIndexBuffer(size, dynamic);
842}
843
844void GrContext::setTexture(GrTexture* texture) {
845 fGpu->setTexture(texture);
846}
847
848void GrContext::setRenderTarget(GrRenderTarget* target) {
849 flushText();
850 fGpu->setRenderTarget(target);
851}
852
853GrRenderTarget* GrContext::currentRenderTarget() const {
854 return fGpu->currentRenderTarget();
855}
856
857void GrContext::setDefaultRenderTargetSize(uint32_t width, uint32_t height) {
858 fGpu->setDefaultRenderTargetSize(width, height);
859}
860
861void GrContext::setSamplerState(const GrSamplerState& samplerState) {
862 fGpu->setSamplerState(samplerState);
863}
864
865void GrContext::setTextureMatrix(const GrMatrix& m) {
866 fGpu->setTextureMatrix(m);
867}
868
869void GrContext::getViewMatrix(GrMatrix* m) const {
870 fGpu->getViewMatrix(m);
871}
872
873void GrContext::setViewMatrix(const GrMatrix& m) {
874 fGpu->setViewMatrix(m);
875}
876
877bool GrContext::reserveAndLockGeometry(GrVertexLayout vertexLayout,
878 uint32_t vertexCount,
879 uint32_t indexCount,
880 void** vertices,
881 void** indices) {
882 return fGpu->reserveAndLockGeometry(vertexLayout,
883 vertexCount,
884 indexCount,
885 vertices,
886 indices);
887}
888
889void GrContext::drawIndexed(GrGpu::PrimitiveType type,
890 uint32_t startVertex,
891 uint32_t startIndex,
892 uint32_t vertexCount,
893 uint32_t indexCount) {
894 flushText();
895 fGpu->drawIndexed(type,
896 startVertex,
897 startIndex,
898 vertexCount,
899 indexCount);
900}
901
902void GrContext::drawNonIndexed(GrGpu::PrimitiveType type,
903 uint32_t startVertex,
904 uint32_t vertexCount) {
905 flushText();
906 fGpu->drawNonIndexed(type,
907 startVertex,
908 vertexCount);
909}
910
911void GrContext::setVertexSourceToArray(const void* array,
912 GrVertexLayout vertexLayout) {
913 fGpu->setVertexSourceToArray(array, vertexLayout);
914}
915
916void GrContext::setIndexSourceToArray(const void* array) {
917 fGpu->setIndexSourceToArray(array);
918}
919
920void GrContext::setVertexSourceToBuffer(GrVertexBuffer* buffer,
921 GrVertexLayout vertexLayout) {
922 fGpu->setVertexSourceToBuffer(buffer, vertexLayout);
923}
924
925void GrContext::setIndexSourceToBuffer(GrIndexBuffer* buffer) {
926 fGpu->setIndexSourceToBuffer(buffer);
927}
928
929void GrContext::releaseReservedGeometry() {
930 fGpu->releaseReservedGeometry();
931}
932
933void GrContext::setClip(const GrClip& clip) {
934 fGpu->setClip(clip);
935 fGpu->enableState(GrDrawTarget::kClip_StateBit);
936}
937
938void GrContext::setAlpha(uint8_t alpha) {
939 fGpu->setAlpha(alpha);
940}
941
942void GrContext::setColor(GrColor color) {
943 fGpu->setColor(color);
944}
945
946static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
947 intptr_t mask = 1 << shift;
948 if (pred) {
949 bits |= mask;
950 } else {
951 bits &= ~mask;
952 }
953 return bits;
954}
955
956void GrContext::setAntiAlias(bool aa) {
957 if (aa) {
958 fGpu->enableState(GrGpu::kAntialias_StateBit);
959 } else {
960 fGpu->disableState(GrGpu::kAntialias_StateBit);
961 }
962}
963
964void GrContext::setDither(bool dither) {
965 // hack for now, since iPad dither is hella-slow
966 dither = false;
967
968 if (dither) {
969 fGpu->enableState(GrGpu::kDither_StateBit);
970 } else {
971 fGpu->disableState(GrGpu::kDither_StateBit);
972 }
973}
974
975void GrContext::setPointSize(float size) {
976 fGpu->setPointSize(size);
977}
978
979void GrContext::setBlendFunc(GrGpu::BlendCoeff srcCoef,
980 GrGpu::BlendCoeff dstCoef) {
981 fGpu->setBlendFunc(srcCoef, dstCoef);
982}
983
984void GrContext::resetStats() {
985 fGpu->resetStats();
986}
987
988const GrGpu::Stats& GrContext::getStats() const {
989 return fGpu->getStats();
990}
991
992void GrContext::printStats() const {
993 fGpu->printStats();
994}
995
996GrContext::GrContext(GrGpu* gpu) :
997 fVBAllocPool(gpu,
998 gpu->supportsBufferLocking() ? POOL_VB_SIZE : 0,
999 gpu->supportsBufferLocking() ? NUM_POOL_VBS : 0),
1000 fTextDrawBuffer(gpu->supportsBufferLocking() ? &fVBAllocPool : NULL) {
1001 fGpu = gpu;
1002 fGpu->ref();
1003 fTextureCache = new GrTextureCache(MAX_TEXTURE_CACHE_COUNT,
1004 MAX_TEXTURE_CACHE_BYTES);
1005 fFontCache = new GrFontCache(fGpu);
1006}
1007
1008bool GrContext::finalizeTextureKey(GrTextureKey* key,
1009 const GrSamplerState& sampler) const {
1010 uint32_t bits = 0;
1011 uint16_t width = key->width();
1012 uint16_t height = key->height();
1013 if (fGpu->npotTextureSupport() < GrGpu::kNonRendertarget_NPOTTextureType) {
1014 if ((sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
1015 sampler.getWrapY() != GrSamplerState::kClamp_WrapMode) &&
1016 (!GrIsPow2(width) || !GrIsPow2(height))) {
1017 bits |= 1;
1018 bits |= sampler.isFilter() ? 2 : 0;
1019 }
1020 }
1021 key->finalize(bits);
1022 return 0 != bits;
1023}
1024
1025GrDrawTarget* GrContext::getTextTarget() {
1026#if DEFER_TEXT_RENDERING
1027 fTextDrawBuffer.initializeDrawStateAndClip(*fGpu);
1028 return &fTextDrawBuffer;
1029#else
1030 return fGpu;
1031#endif
1032}
1033
1034const GrIndexBuffer* GrContext::quadIndexBuffer() const {
1035 return fGpu->quadIndexBuffer();
1036}
1037
1038int GrContext::maxQuadsInIndexBuffer() const {
1039 return fGpu->maxQuadsInIndexBuffer();
1040}
1041
1042
1043
reed@google.com873cb1e2010-12-23 15:00:45 +00001044