blob: 72a428f1c70c78c3be415b519b0218e45c101910 [file] [log] [blame]
John Reckcec24ae2013-11-05 13:27:50 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
John Reckcec24ae2013-11-05 13:27:50 -080017#include "RenderThread.h"
18
Stan Iliev7bc3bc62017-05-24 13:28:36 -040019#include "hwui/Bitmap.h"
20#include "renderstate/RenderState.h"
21#include "renderthread/OpenGLPipeline.h"
22#include "pipeline/skia/SkiaOpenGLReadback.h"
23#include "pipeline/skia/SkiaOpenGLPipeline.h"
24#include "pipeline/skia/SkiaVulkanPipeline.h"
John Reck4f02bf42014-01-03 18:09:17 -080025#include "CanvasContext.h"
John Reck3b202512014-06-23 13:13:08 -070026#include "EglManager.h"
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -050027#include "OpenGLReadback.h"
John Reck4f02bf42014-01-03 18:09:17 -080028#include "RenderProxy.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050029#include "VulkanManager.h"
John Reck12efa552016-11-15 10:22:01 -080030#include "utils/FatVector.h"
John Reckcec24ae2013-11-05 13:27:50 -080031
Chris Craik65fe5ee2015-01-26 18:06:29 -080032#include <gui/DisplayEventReceiver.h>
John Reckb36016c2015-03-11 08:50:53 -070033#include <gui/ISurfaceComposer.h>
34#include <gui/SurfaceComposerClient.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080035#include <sys/resource.h>
John Reckcba287b2015-11-10 12:52:44 -080036#include <utils/Condition.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080037#include <utils/Log.h>
John Reckcba287b2015-11-10 12:52:44 -080038#include <utils/Mutex.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080039
John Reckcec24ae2013-11-05 13:27:50 -080040namespace android {
John Reckcec24ae2013-11-05 13:27:50 -080041namespace uirenderer {
42namespace renderthread {
43
John Recke45b1fd2014-04-15 09:50:16 -070044// Number of events to read at a time from the DisplayEventReceiver pipe.
45// The value should be large enough that we can quickly drain the pipe
46// using just a few large reads.
47static const size_t EVENT_BUFFER_SIZE = 100;
48
49// Slight delay to give the UI time to push us a new frame before we replay
John Recka733f892014-12-19 11:37:21 -080050static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4);
John Recke45b1fd2014-04-15 09:50:16 -070051
Chris Craikd41c4d82015-01-05 15:51:13 -080052TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {}
John Reck4f02bf42014-01-03 18:09:17 -080053
54RenderTask* TaskQueue::next() {
55 RenderTask* ret = mHead;
56 if (ret) {
57 mHead = ret->mNext;
58 if (!mHead) {
Chris Craikd41c4d82015-01-05 15:51:13 -080059 mTail = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080060 }
Chris Craikd41c4d82015-01-05 15:51:13 -080061 ret->mNext = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080062 }
63 return ret;
64}
65
66RenderTask* TaskQueue::peek() {
67 return mHead;
68}
69
70void TaskQueue::queue(RenderTask* task) {
71 // Since the RenderTask itself forms the linked list it is not allowed
72 // to have the same task queued twice
73 LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!");
74 if (mTail) {
75 // Fast path if we can just append
76 if (mTail->mRunAt <= task->mRunAt) {
77 mTail->mNext = task;
78 mTail = task;
79 } else {
80 // Need to find the proper insertion point
Chris Craikd41c4d82015-01-05 15:51:13 -080081 RenderTask* previous = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080082 RenderTask* next = mHead;
83 while (next && next->mRunAt <= task->mRunAt) {
84 previous = next;
85 next = next->mNext;
86 }
87 if (!previous) {
88 task->mNext = mHead;
89 mHead = task;
90 } else {
91 previous->mNext = task;
92 if (next) {
93 task->mNext = next;
94 } else {
95 mTail = task;
96 }
97 }
98 }
99 } else {
100 mTail = mHead = task;
101 }
102}
103
John Recka5dda642014-05-22 15:43:54 -0700104void TaskQueue::queueAtFront(RenderTask* task) {
John Reck2f944482017-03-27 14:34:28 -0700105 LOG_ALWAYS_FATAL_IF(task->mNext || mHead == task, "Task is already in the queue!");
John Recka5dda642014-05-22 15:43:54 -0700106 if (mTail) {
107 task->mNext = mHead;
108 mHead = task;
109 } else {
110 mTail = mHead = task;
111 }
112}
113
John Reck4f02bf42014-01-03 18:09:17 -0800114void TaskQueue::remove(RenderTask* task) {
115 // TaskQueue is strict here to enforce that users are keeping track of
116 // their RenderTasks due to how their memory is managed
117 LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task,
118 "Cannot remove a task that isn't in the queue!");
119
120 // If task is the head we can just call next() to pop it off
121 // Otherwise we need to scan through to find the task before it
122 if (peek() == task) {
123 next();
124 } else {
125 RenderTask* previous = mHead;
126 while (previous->mNext != task) {
127 previous = previous->mNext;
128 }
129 previous->mNext = task->mNext;
130 if (mTail == task) {
131 mTail = previous;
132 }
133 }
134}
135
John Recke45b1fd2014-04-15 09:50:16 -0700136class DispatchFrameCallbacks : public RenderTask {
137private:
138 RenderThread* mRenderThread;
139public:
Chih-Hung Hsiehc6baf562016-04-27 11:29:23 -0700140 explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {}
John Recke45b1fd2014-04-15 09:50:16 -0700141
Chris Craikd41c4d82015-01-05 15:51:13 -0800142 virtual void run() override {
John Recke45b1fd2014-04-15 09:50:16 -0700143 mRenderThread->dispatchFrameCallbacks();
144 }
145};
146
John Reck6b507802015-11-03 10:09:59 -0800147static bool gHasRenderThreadInstance = false;
148
149bool RenderThread::hasInstance() {
150 return gHasRenderThreadInstance;
151}
152
153RenderThread& RenderThread::getInstance() {
154 // This is a pointer because otherwise __cxa_finalize
155 // will try to delete it like a Good Citizen but that causes us to crash
156 // because we don't want to delete the RenderThread normally.
157 static RenderThread* sInstance = new RenderThread();
158 gHasRenderThreadInstance = true;
159 return *sInstance;
160}
161
162RenderThread::RenderThread() : Thread(true)
John Recke45b1fd2014-04-15 09:50:16 -0700163 , mNextWakeup(LLONG_MAX)
Chris Craikd41c4d82015-01-05 15:51:13 -0800164 , mDisplayEventReceiver(nullptr)
John Recke45b1fd2014-04-15 09:50:16 -0700165 , mVsyncRequested(false)
166 , mFrameCallbackTaskPending(false)
Chris Craikd41c4d82015-01-05 15:51:13 -0800167 , mFrameCallbackTask(nullptr)
168 , mRenderState(nullptr)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500169 , mEglManager(nullptr)
170 , mVkManager(nullptr) {
Chris Craik2507c342015-05-04 14:36:49 -0700171 Properties::load();
John Recke45b1fd2014-04-15 09:50:16 -0700172 mFrameCallbackTask = new DispatchFrameCallbacks(this);
John Reckcec24ae2013-11-05 13:27:50 -0800173 mLooper = new Looper(false);
174 run("RenderThread");
175}
176
177RenderThread::~RenderThread() {
John Reck3b202512014-06-23 13:13:08 -0700178 LOG_ALWAYS_FATAL("Can't destroy the render thread");
John Reckcec24ae2013-11-05 13:27:50 -0800179}
180
John Recke45b1fd2014-04-15 09:50:16 -0700181void RenderThread::initializeDisplayEventReceiver() {
182 LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?");
183 mDisplayEventReceiver = new DisplayEventReceiver();
184 status_t status = mDisplayEventReceiver->initCheck();
185 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver "
186 "failed with status: %d", status);
187
188 // Register the FD
189 mLooper->addFd(mDisplayEventReceiver->getFd(), 0,
190 Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this);
191}
192
John Reck3b202512014-06-23 13:13:08 -0700193void RenderThread::initThreadLocals() {
John Reckb36016c2015-03-11 08:50:53 -0700194 sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay(
195 ISurfaceComposer::eDisplayIdMain));
196 status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo);
197 LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n");
198 nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps);
199 mTimeLord.setFrameInterval(frameIntervalNanos);
John Reck3b202512014-06-23 13:13:08 -0700200 initializeDisplayEventReceiver();
201 mEglManager = new EglManager(*this);
John Reck0e89e2b2014-10-31 14:49:06 -0700202 mRenderState = new RenderState(*this);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500203 mVkManager = new VulkanManager(*this);
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400204 mCacheManager = new CacheManager(mDisplayInfo);
205}
206
207void RenderThread::dumpGraphicsMemory(int fd) {
John Reck34781b22017-07-05 16:39:36 -0700208 globalProfileData()->dump(fd);
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400209
210 String8 cachesOutput;
211 String8 pipeline;
212 auto renderType = Properties::getRenderPipelineType();
213 switch (renderType) {
214 case RenderPipelineType::OpenGL: {
215 if (Caches::hasInstance()) {
216 cachesOutput.appendFormat("Caches:\n");
217 Caches::getInstance().dumpMemoryUsage(cachesOutput);
218 } else {
219 cachesOutput.appendFormat("No caches instance.");
220 }
221 pipeline.appendFormat("FrameBuilder");
222 break;
223 }
224 case RenderPipelineType::SkiaGL: {
225 mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
226 pipeline.appendFormat("Skia (OpenGL)");
227 break;
228 }
229 case RenderPipelineType::SkiaVulkan: {
230 mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
231 pipeline.appendFormat("Skia (Vulkan)");
232 break;
233 }
234 default:
235 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
236 break;
237 }
238
239 FILE *file = fdopen(fd, "a");
240 fprintf(file, "\n%s\n", cachesOutput.string());
241 fprintf(file, "\nPipeline=%s\n", pipeline.string());
242 fflush(file);
John Reck3b202512014-06-23 13:13:08 -0700243}
244
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -0500245Readback& RenderThread::readback() {
246
247 if (!mReadback) {
248 auto renderType = Properties::getRenderPipelineType();
249 switch (renderType) {
250 case RenderPipelineType::OpenGL:
251 mReadback = new OpenGLReadbackImpl(*this);
252 break;
253 case RenderPipelineType::SkiaGL:
254 case RenderPipelineType::SkiaVulkan:
255 // It works to use the OpenGL pipeline for Vulkan but this is not
256 // ideal as it causes us to create an OpenGL context in addition
257 // to the Vulkan one.
258 mReadback = new skiapipeline::SkiaOpenGLReadback(*this);
259 break;
260 default:
261 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
262 break;
263 }
264 }
265
266 return *mReadback;
267}
268
Derek Sollenbergerf9e45d12017-06-01 13:07:39 -0400269void RenderThread::setGrContext(GrContext* context) {
270 mCacheManager->reset(context);
271 if (mGrContext.get()) {
272 mGrContext->releaseResourcesAndAbandonContext();
273 }
274 mGrContext.reset(context);
275}
276
John Recke45b1fd2014-04-15 09:50:16 -0700277int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) {
278 if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) {
279 ALOGE("Display event receiver pipe was closed or an error occurred. "
280 "events=0x%x", events);
281 return 0; // remove the callback
282 }
283
284 if (!(events & Looper::EVENT_INPUT)) {
285 ALOGW("Received spurious callback for unhandled poll event. "
286 "events=0x%x", events);
287 return 1; // keep the callback
288 }
289
290 reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue();
291
292 return 1; // keep the callback
293}
294
295static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) {
296 DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE];
297 nsecs_t latest = 0;
298 ssize_t n;
299 while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) {
300 for (ssize_t i = 0; i < n; i++) {
301 const DisplayEventReceiver::Event& ev = buf[i];
302 switch (ev.header.type) {
303 case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
304 latest = ev.header.timestamp;
305 break;
306 }
307 }
308 }
309 if (n < 0) {
310 ALOGW("Failed to get events from display event receiver, status=%d", status_t(n));
311 }
312 return latest;
313}
314
John Recka733f892014-12-19 11:37:21 -0800315void RenderThread::drainDisplayEventQueue() {
John Recka5dda642014-05-22 15:43:54 -0700316 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700317 nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver);
318 if (vsyncEvent > 0) {
319 mVsyncRequested = false;
John Recka733f892014-12-19 11:37:21 -0800320 if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) {
John Recka5dda642014-05-22 15:43:54 -0700321 ATRACE_NAME("queue mFrameCallbackTask");
John Recke45b1fd2014-04-15 09:50:16 -0700322 mFrameCallbackTaskPending = true;
John Recka733f892014-12-19 11:37:21 -0800323 nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY);
324 queueAt(mFrameCallbackTask, runAt);
John Recke45b1fd2014-04-15 09:50:16 -0700325 }
326 }
327}
328
329void RenderThread::dispatchFrameCallbacks() {
John Recka5dda642014-05-22 15:43:54 -0700330 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700331 mFrameCallbackTaskPending = false;
332
333 std::set<IFrameCallback*> callbacks;
334 mFrameCallbacks.swap(callbacks);
335
John Recka733f892014-12-19 11:37:21 -0800336 if (callbacks.size()) {
337 // Assume one of them will probably animate again so preemptively
338 // request the next vsync in case it occurs mid-frame
339 requestVsync();
340 for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) {
341 (*it)->doFrame();
342 }
John Recke45b1fd2014-04-15 09:50:16 -0700343 }
344}
345
John Recka5dda642014-05-22 15:43:54 -0700346void RenderThread::requestVsync() {
347 if (!mVsyncRequested) {
348 mVsyncRequested = true;
349 status_t status = mDisplayEventReceiver->requestNextVsync();
350 LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
351 "requestNextVsync failed with status: %d", status);
352 }
353}
354
John Reckcec24ae2013-11-05 13:27:50 -0800355bool RenderThread::threadLoop() {
John Reck21be43e2014-08-14 10:25:16 -0700356 setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY);
John Reck3b202512014-06-23 13:13:08 -0700357 initThreadLocals();
John Recke45b1fd2014-04-15 09:50:16 -0700358
John Reck4f02bf42014-01-03 18:09:17 -0800359 int timeoutMillis = -1;
John Reckcec24ae2013-11-05 13:27:50 -0800360 for (;;) {
John Recke45b1fd2014-04-15 09:50:16 -0700361 int result = mLooper->pollOnce(timeoutMillis);
John Reck4f02bf42014-01-03 18:09:17 -0800362 LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR,
363 "RenderThread Looper POLL_ERROR!");
364
365 nsecs_t nextWakeup;
John Reck12efa552016-11-15 10:22:01 -0800366 {
367 FatVector<RenderTask*, 10> workQueue;
368 // Process our queue, if we have anything. By first acquiring
369 // all the pending events then processing them we avoid vsync
370 // starvation if more tasks are queued while we are processing tasks.
371 while (RenderTask* task = nextTask(&nextWakeup)) {
372 workQueue.push_back(task);
373 }
374 for (auto task : workQueue) {
375 task->run();
376 // task may have deleted itself, do not reference it again
377 }
John Reck4f02bf42014-01-03 18:09:17 -0800378 }
379 if (nextWakeup == LLONG_MAX) {
380 timeoutMillis = -1;
381 } else {
John Recka6260b82014-01-29 18:31:51 -0800382 nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC);
383 timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos);
John Reck4f02bf42014-01-03 18:09:17 -0800384 if (timeoutMillis < 0) {
385 timeoutMillis = 0;
386 }
John Reckcec24ae2013-11-05 13:27:50 -0800387 }
John Recka5dda642014-05-22 15:43:54 -0700388
389 if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) {
John Recka733f892014-12-19 11:37:21 -0800390 drainDisplayEventQueue();
John Recka5dda642014-05-22 15:43:54 -0700391 mFrameCallbacks.insert(
392 mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end());
393 mPendingRegistrationFrameCallbacks.clear();
394 requestVsync();
395 }
John Recka22c9b22015-01-14 10:40:15 -0800396
397 if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) {
398 // TODO: Clean this up. This is working around an issue where a combination
399 // of bad timing and slow drawing can result in dropping a stale vsync
400 // on the floor (correct!) but fails to schedule to listen for the
401 // next vsync (oops), so none of the callbacks are run.
402 requestVsync();
403 }
John Reckcec24ae2013-11-05 13:27:50 -0800404 }
405
406 return false;
407}
408
409void RenderThread::queue(RenderTask* task) {
410 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800411 mQueue.queue(task);
412 if (mNextWakeup && task->mRunAt < mNextWakeup) {
413 mNextWakeup = 0;
John Reckcec24ae2013-11-05 13:27:50 -0800414 mLooper->wake();
415 }
416}
417
Chris Craik0a24b142015-10-19 17:10:19 -0700418void RenderThread::queueAndWait(RenderTask* task) {
John Reckcba287b2015-11-10 12:52:44 -0800419 // These need to be local to the thread to avoid the Condition
420 // signaling the wrong thread. The easiest way to achieve that is to just
421 // make this on the stack, although that has a slight cost to it
422 Mutex mutex;
423 Condition condition;
424 SignalingRenderTask syncTask(task, &mutex, &condition);
425
426 AutoMutex _lock(mutex);
Chris Craik0a24b142015-10-19 17:10:19 -0700427 queue(&syncTask);
Tom Cherry298a1462017-02-28 14:07:09 -0800428 while (!syncTask.hasRun()) {
429 condition.wait(mutex);
430 }
Chris Craik0a24b142015-10-19 17:10:19 -0700431}
432
John Recka5dda642014-05-22 15:43:54 -0700433void RenderThread::queueAtFront(RenderTask* task) {
434 AutoMutex _lock(mLock);
435 mQueue.queueAtFront(task);
436 mLooper->wake();
437}
438
John Recka733f892014-12-19 11:37:21 -0800439void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) {
440 task->mRunAt = runAtNs;
John Reck4f02bf42014-01-03 18:09:17 -0800441 queue(task);
442}
443
444void RenderThread::remove(RenderTask* task) {
John Reckcec24ae2013-11-05 13:27:50 -0800445 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800446 mQueue.remove(task);
447}
448
John Recke45b1fd2014-04-15 09:50:16 -0700449void RenderThread::postFrameCallback(IFrameCallback* callback) {
John Recka5dda642014-05-22 15:43:54 -0700450 mPendingRegistrationFrameCallbacks.insert(callback);
John Recke45b1fd2014-04-15 09:50:16 -0700451}
452
John Reck01a5ea32014-12-03 13:01:07 -0800453bool RenderThread::removeFrameCallback(IFrameCallback* callback) {
454 size_t erased;
455 erased = mFrameCallbacks.erase(callback);
456 erased |= mPendingRegistrationFrameCallbacks.erase(callback);
457 return erased;
John Recka5dda642014-05-22 15:43:54 -0700458}
459
460void RenderThread::pushBackFrameCallback(IFrameCallback* callback) {
461 if (mFrameCallbacks.erase(callback)) {
462 mPendingRegistrationFrameCallbacks.insert(callback);
463 }
John Recke45b1fd2014-04-15 09:50:16 -0700464}
465
John Reck4f02bf42014-01-03 18:09:17 -0800466RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) {
467 AutoMutex _lock(mLock);
468 RenderTask* next = mQueue.peek();
469 if (!next) {
470 mNextWakeup = LLONG_MAX;
471 } else {
John Recka5dda642014-05-22 15:43:54 -0700472 mNextWakeup = next->mRunAt;
John Reck4f02bf42014-01-03 18:09:17 -0800473 // Most tasks won't be delayed, so avoid unnecessary systemTime() calls
474 if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) {
475 next = mQueue.next();
John Recka5dda642014-05-22 15:43:54 -0700476 } else {
Chris Craikd41c4d82015-01-05 15:51:13 -0800477 next = nullptr;
John Reckcec24ae2013-11-05 13:27:50 -0800478 }
John Reckcec24ae2013-11-05 13:27:50 -0800479 }
John Reck4f02bf42014-01-03 18:09:17 -0800480 if (nextWakeup) {
481 *nextWakeup = mNextWakeup;
482 }
483 return next;
John Reckcec24ae2013-11-05 13:27:50 -0800484}
485
Stan Iliev7bc3bc62017-05-24 13:28:36 -0400486sk_sp<Bitmap> RenderThread::allocateHardwareBitmap(SkBitmap& skBitmap) {
487 auto renderType = Properties::getRenderPipelineType();
488 switch (renderType) {
489 case RenderPipelineType::OpenGL:
490 return OpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
491 case RenderPipelineType::SkiaGL:
492 return skiapipeline::SkiaOpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
493 case RenderPipelineType::SkiaVulkan:
494 return skiapipeline::SkiaVulkanPipeline::allocateHardwareBitmap(*this, skBitmap);
495 default:
496 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
497 break;
498 }
499 return nullptr;
500}
501
John Reckcec24ae2013-11-05 13:27:50 -0800502} /* namespace renderthread */
503} /* namespace uirenderer */
504} /* namespace android */