blob: c5d5492d4fd18643c97a96ecf3008355dd0f48b3 [file] [log] [blame]
Chris Craik5ea17242016-01-11 14:07:59 -08001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Chris Craikf158b492016-01-12 14:45:08 -080017#include "LayerBuilder.h"
Chris Craik5ea17242016-01-11 14:07:59 -080018
19#include "BakedOpState.h"
20#include "RenderNode.h"
21#include "utils/PaintUtils.h"
22#include "utils/TraceUtils.h"
23
24#include <utils/TypeHelpers.h>
25
26namespace android {
27namespace uirenderer {
28
29class BatchBase {
30public:
31 BatchBase(batchid_t batchId, BakedOpState* op, bool merging)
32 : mBatchId(batchId)
33 , mMerging(merging) {
34 mBounds = op->computedState.clippedBounds;
35 mOps.push_back(op);
36 }
37
38 bool intersects(const Rect& rect) const {
39 if (!rect.intersects(mBounds)) return false;
40
41 for (const BakedOpState* op : mOps) {
42 if (rect.intersects(op->computedState.clippedBounds)) {
43 return true;
44 }
45 }
46 return false;
47 }
48
49 batchid_t getBatchId() const { return mBatchId; }
50 bool isMerging() const { return mMerging; }
51
52 const std::vector<BakedOpState*>& getOps() const { return mOps; }
53
54 void dump() const {
55 ALOGD(" Batch %p, id %d, merging %d, count %d, bounds " RECT_STRING,
Chris Craikb250a832016-01-11 19:28:17 -080056 this, mBatchId, mMerging, (int) mOps.size(), RECT_ARGS(mBounds));
Chris Craik5ea17242016-01-11 14:07:59 -080057 }
58protected:
59 batchid_t mBatchId;
60 Rect mBounds;
61 std::vector<BakedOpState*> mOps;
62 bool mMerging;
63};
64
65class OpBatch : public BatchBase {
66public:
Chris Craik5ea17242016-01-11 14:07:59 -080067 OpBatch(batchid_t batchId, BakedOpState* op)
68 : BatchBase(batchId, op, false) {
69 }
70
71 void batchOp(BakedOpState* op) {
72 mBounds.unionWith(op->computedState.clippedBounds);
73 mOps.push_back(op);
74 }
75};
76
77class MergingOpBatch : public BatchBase {
78public:
Chris Craik5ea17242016-01-11 14:07:59 -080079 MergingOpBatch(batchid_t batchId, BakedOpState* op)
80 : BatchBase(batchId, op, true)
81 , mClipSideFlags(op->computedState.clipSideFlags) {
82 }
83
84 /*
85 * Helper for determining if a new op can merge with a MergingDrawBatch based on their bounds
86 * and clip side flags. Positive bounds delta means new bounds fit in old.
87 */
88 static inline bool checkSide(const int currentFlags, const int newFlags, const int side,
89 float boundsDelta) {
90 bool currentClipExists = currentFlags & side;
91 bool newClipExists = newFlags & side;
92
93 // if current is clipped, we must be able to fit new bounds in current
94 if (boundsDelta > 0 && currentClipExists) return false;
95
96 // if new is clipped, we must be able to fit current bounds in new
97 if (boundsDelta < 0 && newClipExists) return false;
98
99 return true;
100 }
101
102 static bool paintIsDefault(const SkPaint& paint) {
103 return paint.getAlpha() == 255
104 && paint.getColorFilter() == nullptr
105 && paint.getShader() == nullptr;
106 }
107
108 static bool paintsAreEquivalent(const SkPaint& a, const SkPaint& b) {
109 // Note: don't check color, since all currently mergeable ops can merge across colors
110 return a.getAlpha() == b.getAlpha()
111 && a.getColorFilter() == b.getColorFilter()
112 && a.getShader() == b.getShader();
113 }
114
115 /*
116 * Checks if a (mergeable) op can be merged into this batch
117 *
118 * If true, the op's multiDraw must be guaranteed to handle both ops simultaneously, so it is
119 * important to consider all paint attributes used in the draw calls in deciding both a) if an
120 * op tries to merge at all, and b) if the op can merge with another set of ops
121 *
122 * False positives can lead to information from the paints of subsequent merged operations being
123 * dropped, so we make simplifying qualifications on the ops that can merge, per op type.
124 */
125 bool canMergeWith(BakedOpState* op) const {
126 bool isTextBatch = getBatchId() == OpBatchType::Text
127 || getBatchId() == OpBatchType::ColorText;
128
129 // Overlapping other operations is only allowed for text without shadow. For other ops,
130 // multiDraw isn't guaranteed to overdraw correctly
131 if (!isTextBatch || PaintUtils::hasTextShadow(op->op->paint)) {
132 if (intersects(op->computedState.clippedBounds)) return false;
133 }
134
135 const BakedOpState* lhs = op;
136 const BakedOpState* rhs = mOps[0];
137
138 if (!MathUtils::areEqual(lhs->alpha, rhs->alpha)) return false;
139
140 // Identical round rect clip state means both ops will clip in the same way, or not at all.
141 // As the state objects are const, we can compare their pointers to determine mergeability
142 if (lhs->roundRectClipState != rhs->roundRectClipState) return false;
Chris Craik678ff812016-03-01 13:27:54 -0800143
144 // Local masks prevent merge, since they're potentially in different coordinate spaces
145 if (lhs->computedState.localProjectionPathMask
146 || rhs->computedState.localProjectionPathMask) return false;
Chris Craik5ea17242016-01-11 14:07:59 -0800147
148 /* Clipping compatibility check
149 *
150 * Exploits the fact that if a op or batch is clipped on a side, its bounds will equal its
151 * clip for that side.
152 */
153 const int currentFlags = mClipSideFlags;
154 const int newFlags = op->computedState.clipSideFlags;
155 if (currentFlags != OpClipSideFlags::None || newFlags != OpClipSideFlags::None) {
156 const Rect& opBounds = op->computedState.clippedBounds;
157 float boundsDelta = mBounds.left - opBounds.left;
158 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Left, boundsDelta)) return false;
159 boundsDelta = mBounds.top - opBounds.top;
160 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Top, boundsDelta)) return false;
161
162 // right and bottom delta calculation reversed to account for direction
163 boundsDelta = opBounds.right - mBounds.right;
164 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Right, boundsDelta)) return false;
165 boundsDelta = opBounds.bottom - mBounds.bottom;
166 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Bottom, boundsDelta)) return false;
167 }
168
169 const SkPaint* newPaint = op->op->paint;
170 const SkPaint* oldPaint = mOps[0]->op->paint;
171
172 if (newPaint == oldPaint) {
173 // if paints are equal, then modifiers + paint attribs don't need to be compared
174 return true;
175 } else if (newPaint && !oldPaint) {
176 return paintIsDefault(*newPaint);
177 } else if (!newPaint && oldPaint) {
178 return paintIsDefault(*oldPaint);
179 }
180 return paintsAreEquivalent(*newPaint, *oldPaint);
181 }
182
183 void mergeOp(BakedOpState* op) {
184 mBounds.unionWith(op->computedState.clippedBounds);
185 mOps.push_back(op);
186
187 // Because a new op must have passed canMergeWith(), we know it's passed the clipping compat
188 // check, and doesn't extend past a side of the clip that's in use by the merged batch.
189 // Therefore it's safe to simply always merge flags, and use the bounds as the clip rect.
190 mClipSideFlags |= op->computedState.clipSideFlags;
191 }
192
193 int getClipSideFlags() const { return mClipSideFlags; }
194 const Rect& getClipRect() const { return mBounds; }
195
196private:
197 int mClipSideFlags;
198};
199
Chris Craikf158b492016-01-12 14:45:08 -0800200LayerBuilder::LayerBuilder(uint32_t width, uint32_t height,
Chris Craik5ea17242016-01-11 14:07:59 -0800201 const Rect& repaintRect, const BeginLayerOp* beginLayerOp, RenderNode* renderNode)
202 : width(width)
203 , height(height)
204 , repaintRect(repaintRect)
Chris Craik4876de12016-02-25 16:54:08 -0800205 , repaintClip(repaintRect)
Chris Craik5ea17242016-01-11 14:07:59 -0800206 , offscreenBuffer(renderNode ? renderNode->getLayer() : nullptr)
207 , beginLayerOp(beginLayerOp)
Chris Craik4876de12016-02-25 16:54:08 -0800208 , renderNode(renderNode) {}
Chris Craik5ea17242016-01-11 14:07:59 -0800209
210// iterate back toward target to see if anything drawn since should overlap the new op
211// if no target, merging ops still iterate to find similar batch to insert after
Chris Craikf158b492016-01-12 14:45:08 -0800212void LayerBuilder::locateInsertIndex(int batchId, const Rect& clippedBounds,
Chris Craik5ea17242016-01-11 14:07:59 -0800213 BatchBase** targetBatch, size_t* insertBatchIndex) const {
214 for (int i = mBatches.size() - 1; i >= 0; i--) {
215 BatchBase* overBatch = mBatches[i];
216
217 if (overBatch == *targetBatch) break;
218
219 // TODO: also consider shader shared between batch types
220 if (batchId == overBatch->getBatchId()) {
221 *insertBatchIndex = i + 1;
222 if (!*targetBatch) break; // found insert position, quit
223 }
224
225 if (overBatch->intersects(clippedBounds)) {
226 // NOTE: it may be possible to optimize for special cases where two operations
227 // of the same batch/paint could swap order, such as with a non-mergeable
228 // (clipped) and a mergeable text operation
229 *targetBatch = nullptr;
230 break;
231 }
232 }
233}
234
Chris Craikf158b492016-01-12 14:45:08 -0800235void LayerBuilder::deferLayerClear(const Rect& rect) {
Chris Craik5ea17242016-01-11 14:07:59 -0800236 mClearRects.push_back(rect);
237}
238
Chris Craik80d2ade2016-03-28 12:54:07 -0700239void LayerBuilder::onDeferOp(LinearAllocator& allocator, const BakedOpState* bakedState) {
240 if (bakedState->op->opId != RecordedOpId::CopyToLayerOp) {
241 // First non-CopyToLayer, so stop stashing up layer clears for unclipped save layers,
242 // and issue them together in one draw.
243 flushLayerClears(allocator);
244
245 if (CC_UNLIKELY(activeUnclippedSaveLayers.empty()
246 && bakedState->computedState.opaqueOverClippedBounds
Chris Craik9cd1bbe2016-04-14 16:08:25 -0700247 && bakedState->computedState.clippedBounds.contains(repaintRect)
248 && !Properties::debugOverdraw)) {
Chris Craik80d2ade2016-03-28 12:54:07 -0700249 // discard all deferred drawing ops, since new one will occlude them
250 clear();
251 }
252 }
253}
254
Chris Craikf158b492016-01-12 14:45:08 -0800255void LayerBuilder::flushLayerClears(LinearAllocator& allocator) {
Chris Craik5ea17242016-01-11 14:07:59 -0800256 if (CC_UNLIKELY(!mClearRects.empty())) {
257 const int vertCount = mClearRects.size() * 4;
258 // put the verts in the frame allocator, since
259 // 1) SimpleRectsOps needs verts, not rects
260 // 2) even if mClearRects stored verts, std::vectors will move their contents
Chris Craik7a896002016-02-19 15:51:02 -0800261 Vertex* const verts = (Vertex*) allocator.create_trivial_array<Vertex>(vertCount);
Chris Craik5ea17242016-01-11 14:07:59 -0800262
263 Vertex* currentVert = verts;
264 Rect bounds = mClearRects[0];
265 for (auto&& rect : mClearRects) {
266 bounds.unionWith(rect);
267 Vertex::set(currentVert++, rect.left, rect.top);
268 Vertex::set(currentVert++, rect.right, rect.top);
269 Vertex::set(currentVert++, rect.left, rect.bottom);
270 Vertex::set(currentVert++, rect.right, rect.bottom);
271 }
272 mClearRects.clear(); // discard rects before drawing so this method isn't reentrant
273
274 // One or more unclipped saveLayers have been enqueued, with deferred clears.
275 // Flush all of these clears with a single draw
276 SkPaint* paint = allocator.create<SkPaint>();
Mike Reed260ab722016-10-07 15:59:20 -0400277 paint->setBlendMode(SkBlendMode::kClear);
John Reck7df9ff22016-02-10 16:08:08 -0800278 SimpleRectsOp* op = allocator.create_trivial<SimpleRectsOp>(bounds,
Chris Craik5ea17242016-01-11 14:07:59 -0800279 Matrix4::identity(), nullptr, paint,
280 verts, vertCount);
281 BakedOpState* bakedState = BakedOpState::directConstruct(allocator,
Chris Craik4876de12016-02-25 16:54:08 -0800282 &repaintClip, bounds, *op);
Chris Craik5ea17242016-01-11 14:07:59 -0800283 deferUnmergeableOp(allocator, bakedState, OpBatchType::Vertices);
284 }
285}
286
Chris Craikf158b492016-01-12 14:45:08 -0800287void LayerBuilder::deferUnmergeableOp(LinearAllocator& allocator,
Chris Craik5ea17242016-01-11 14:07:59 -0800288 BakedOpState* op, batchid_t batchId) {
Chris Craik80d2ade2016-03-28 12:54:07 -0700289 onDeferOp(allocator, op);
Chris Craik5ea17242016-01-11 14:07:59 -0800290 OpBatch* targetBatch = mBatchLookup[batchId];
291
292 size_t insertBatchIndex = mBatches.size();
293 if (targetBatch) {
294 locateInsertIndex(batchId, op->computedState.clippedBounds,
295 (BatchBase**)(&targetBatch), &insertBatchIndex);
296 }
297
298 if (targetBatch) {
299 targetBatch->batchOp(op);
300 } else {
301 // new non-merging batch
John Reck7df9ff22016-02-10 16:08:08 -0800302 targetBatch = allocator.create<OpBatch>(batchId, op);
Chris Craik5ea17242016-01-11 14:07:59 -0800303 mBatchLookup[batchId] = targetBatch;
304 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
305 }
306}
307
Chris Craikf158b492016-01-12 14:45:08 -0800308void LayerBuilder::deferMergeableOp(LinearAllocator& allocator,
Chris Craik5ea17242016-01-11 14:07:59 -0800309 BakedOpState* op, batchid_t batchId, mergeid_t mergeId) {
Chris Craik80d2ade2016-03-28 12:54:07 -0700310 onDeferOp(allocator, op);
Chris Craik5ea17242016-01-11 14:07:59 -0800311 MergingOpBatch* targetBatch = nullptr;
312
313 // Try to merge with any existing batch with same mergeId
314 auto getResult = mMergingBatchLookup[batchId].find(mergeId);
315 if (getResult != mMergingBatchLookup[batchId].end()) {
316 targetBatch = getResult->second;
317 if (!targetBatch->canMergeWith(op)) {
318 targetBatch = nullptr;
319 }
320 }
321
322 size_t insertBatchIndex = mBatches.size();
323 locateInsertIndex(batchId, op->computedState.clippedBounds,
324 (BatchBase**)(&targetBatch), &insertBatchIndex);
325
326 if (targetBatch) {
327 targetBatch->mergeOp(op);
328 } else {
329 // new merging batch
John Reck7df9ff22016-02-10 16:08:08 -0800330 targetBatch = allocator.create<MergingOpBatch>(batchId, op);
Chris Craik5ea17242016-01-11 14:07:59 -0800331 mMergingBatchLookup[batchId].insert(std::make_pair(mergeId, targetBatch));
332
333 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
334 }
335}
336
Chris Craikf158b492016-01-12 14:45:08 -0800337void LayerBuilder::replayBakedOpsImpl(void* arg,
Chris Craik5ea17242016-01-11 14:07:59 -0800338 BakedOpReceiver* unmergedReceivers, MergedOpReceiver* mergedReceivers) const {
Chris Craikaff230f2016-05-04 16:27:28 -0700339 if (renderNode) {
340 ATRACE_FORMAT_BEGIN("Issue HW Layer DisplayList %s %ux%u",
341 renderNode->getName(), width, height);
342 } else {
343 ATRACE_BEGIN("flush drawing commands");
344 }
345
Chris Craik5ea17242016-01-11 14:07:59 -0800346 for (const BatchBase* batch : mBatches) {
347 size_t size = batch->getOps().size();
348 if (size > 1 && batch->isMerging()) {
349 int opId = batch->getOps()[0]->op->opId;
350 const MergingOpBatch* mergingBatch = static_cast<const MergingOpBatch*>(batch);
351 MergedBakedOpList data = {
352 batch->getOps().data(),
353 size,
354 mergingBatch->getClipSideFlags(),
355 mergingBatch->getClipRect()
356 };
357 mergedReceivers[opId](arg, data);
358 } else {
359 for (const BakedOpState* op : batch->getOps()) {
360 unmergedReceivers[op->op->opId](arg, *op);
361 }
362 }
363 }
Chris Craikaff230f2016-05-04 16:27:28 -0700364 ATRACE_END();
Chris Craik5ea17242016-01-11 14:07:59 -0800365}
366
Chris Craik80d2ade2016-03-28 12:54:07 -0700367void LayerBuilder::clear() {
368 mBatches.clear();
369 for (int i = 0; i < OpBatchType::Count; i++) {
370 mBatchLookup[i] = nullptr;
371 mMergingBatchLookup[i].clear();
372 }
373}
374
Chris Craikf158b492016-01-12 14:45:08 -0800375void LayerBuilder::dump() const {
Chris Craik02806282016-03-11 19:16:21 -0800376 ALOGD("LayerBuilder %p, %ux%u buffer %p, blo %p, rn %p (%s)",
377 this, width, height, offscreenBuffer, beginLayerOp,
378 renderNode, renderNode ? renderNode->getName() : "-");
Chris Craik5ea17242016-01-11 14:07:59 -0800379 for (const BatchBase* batch : mBatches) {
380 batch->dump();
381 }
382}
383
384} // namespace uirenderer
385} // namespace android