cdalton | 855d83f | 2014-09-18 13:51:53 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "GrPathRange.h" |
| 9 | #include "SkPath.h" |
| 10 | |
| 11 | enum { |
| 12 | kPathsPerGroup = 16 // Paths get tracked in groups of 16 for lazy loading. |
| 13 | }; |
| 14 | |
| 15 | GrPathRange::GrPathRange(GrGpu* gpu, |
| 16 | PathGenerator* pathGenerator, |
| 17 | const SkStrokeRec& stroke) |
| 18 | : INHERITED(gpu, kIsWrapped), |
| 19 | fPathGenerator(SkRef(pathGenerator)), |
| 20 | fNumPaths(fPathGenerator->getNumPaths()), |
| 21 | fStroke(stroke) { |
| 22 | const int numGroups = (fNumPaths + kPathsPerGroup - 1) / kPathsPerGroup; |
| 23 | fGeneratedPaths.reset((numGroups + 7) / 8); // 1 bit per path group. |
| 24 | memset(&fGeneratedPaths.front(), 0, fGeneratedPaths.count()); |
| 25 | } |
| 26 | |
| 27 | GrPathRange::GrPathRange(GrGpu* gpu, |
| 28 | int numPaths, |
| 29 | const SkStrokeRec& stroke) |
| 30 | : INHERITED(gpu, kIsWrapped), |
| 31 | fNumPaths(numPaths), |
| 32 | fStroke(stroke) { |
| 33 | } |
| 34 | |
cdalton | 55b24af | 2014-11-25 11:00:56 -0800 | [diff] [blame] | 35 | void GrPathRange::willDrawPaths(const void* indices, PathIndexType indexType, int count) const { |
| 36 | if (!fPathGenerator) { |
cdalton | 855d83f | 2014-09-18 13:51:53 -0700 | [diff] [blame] | 37 | return; |
| 38 | } |
| 39 | |
cdalton | 55b24af | 2014-11-25 11:00:56 -0800 | [diff] [blame] | 40 | switch (indexType) { |
| 41 | case kU8_PathIndexType: return this->willDrawPaths<uint8_t>(indices, count); |
| 42 | case kU16_PathIndexType: return this->willDrawPaths<uint16_t>(indices, count); |
| 43 | case kU32_PathIndexType: return this->willDrawPaths<uint32_t>(indices, count); |
| 44 | default: SkFAIL("Unknown path index type"); |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | template<typename IndexType> void GrPathRange::willDrawPaths(const void* indices, int count) const { |
| 49 | SkASSERT(fPathGenerator); |
| 50 | |
| 51 | const IndexType* indexArray = reinterpret_cast<const IndexType*>(indices); |
cdalton | 855d83f | 2014-09-18 13:51:53 -0700 | [diff] [blame] | 52 | bool didLoadPaths = false; |
| 53 | |
| 54 | for (int i = 0; i < count; ++i) { |
cdalton | 55b24af | 2014-11-25 11:00:56 -0800 | [diff] [blame] | 55 | SkASSERT(indexArray[i] < static_cast<uint32_t>(fNumPaths)); |
cdalton | 855d83f | 2014-09-18 13:51:53 -0700 | [diff] [blame] | 56 | |
cdalton | 55b24af | 2014-11-25 11:00:56 -0800 | [diff] [blame] | 57 | const int groupIndex = indexArray[i] / kPathsPerGroup; |
cdalton | 855d83f | 2014-09-18 13:51:53 -0700 | [diff] [blame] | 58 | const int groupByte = groupIndex / 8; |
| 59 | const uint8_t groupBit = 1 << (groupIndex % 8); |
| 60 | |
| 61 | const bool hasPath = SkToBool(fGeneratedPaths[groupByte] & groupBit); |
| 62 | if (!hasPath) { |
| 63 | // We track which paths are loaded in groups of kPathsPerGroup. To |
| 64 | // mark a path as loaded we need to load the entire group. |
| 65 | const int groupFirstPath = groupIndex * kPathsPerGroup; |
| 66 | const int groupLastPath = SkTMin(groupFirstPath + kPathsPerGroup, fNumPaths) - 1; |
| 67 | |
| 68 | SkPath path; |
| 69 | for (int pathIdx = groupFirstPath; pathIdx <= groupLastPath; ++pathIdx) { |
| 70 | fPathGenerator->generatePath(pathIdx, &path); |
| 71 | this->onInitPath(pathIdx, path); |
| 72 | } |
| 73 | |
| 74 | fGeneratedPaths[groupByte] |= groupBit; |
| 75 | didLoadPaths = true; |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | if (didLoadPaths) { |
| 80 | this->didChangeGpuMemorySize(); |
| 81 | } |
| 82 | } |