blob: 05545839702354cfdb4483cb583eed8b5a4b865c [file] [log] [blame]
John Reckcec24ae2013-11-05 13:27:50 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
John Reckcec24ae2013-11-05 13:27:50 -080017#include "RenderThread.h"
18
Stan Iliev7bc3bc62017-05-24 13:28:36 -040019#include "hwui/Bitmap.h"
20#include "renderstate/RenderState.h"
21#include "renderthread/OpenGLPipeline.h"
22#include "pipeline/skia/SkiaOpenGLReadback.h"
23#include "pipeline/skia/SkiaOpenGLPipeline.h"
24#include "pipeline/skia/SkiaVulkanPipeline.h"
John Reck4f02bf42014-01-03 18:09:17 -080025#include "CanvasContext.h"
John Reck3b202512014-06-23 13:13:08 -070026#include "EglManager.h"
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -050027#include "OpenGLReadback.h"
John Reck4f02bf42014-01-03 18:09:17 -080028#include "RenderProxy.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050029#include "VulkanManager.h"
John Reck12efa552016-11-15 10:22:01 -080030#include "utils/FatVector.h"
John Reckcec24ae2013-11-05 13:27:50 -080031
Chris Craik65fe5ee2015-01-26 18:06:29 -080032#include <gui/DisplayEventReceiver.h>
John Reckb36016c2015-03-11 08:50:53 -070033#include <gui/ISurfaceComposer.h>
34#include <gui/SurfaceComposerClient.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080035#include <sys/resource.h>
John Reckcba287b2015-11-10 12:52:44 -080036#include <utils/Condition.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080037#include <utils/Log.h>
John Reckcba287b2015-11-10 12:52:44 -080038#include <utils/Mutex.h>
Chris Craik65fe5ee2015-01-26 18:06:29 -080039
John Reckcec24ae2013-11-05 13:27:50 -080040namespace android {
John Reckcec24ae2013-11-05 13:27:50 -080041namespace uirenderer {
42namespace renderthread {
43
John Recke45b1fd2014-04-15 09:50:16 -070044// Number of events to read at a time from the DisplayEventReceiver pipe.
45// The value should be large enough that we can quickly drain the pipe
46// using just a few large reads.
47static const size_t EVENT_BUFFER_SIZE = 100;
48
49// Slight delay to give the UI time to push us a new frame before we replay
John Recka733f892014-12-19 11:37:21 -080050static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4);
John Recke45b1fd2014-04-15 09:50:16 -070051
Chris Craikd41c4d82015-01-05 15:51:13 -080052TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {}
John Reck4f02bf42014-01-03 18:09:17 -080053
54RenderTask* TaskQueue::next() {
55 RenderTask* ret = mHead;
56 if (ret) {
57 mHead = ret->mNext;
58 if (!mHead) {
Chris Craikd41c4d82015-01-05 15:51:13 -080059 mTail = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080060 }
Chris Craikd41c4d82015-01-05 15:51:13 -080061 ret->mNext = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080062 }
63 return ret;
64}
65
66RenderTask* TaskQueue::peek() {
67 return mHead;
68}
69
70void TaskQueue::queue(RenderTask* task) {
71 // Since the RenderTask itself forms the linked list it is not allowed
72 // to have the same task queued twice
73 LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!");
74 if (mTail) {
75 // Fast path if we can just append
76 if (mTail->mRunAt <= task->mRunAt) {
77 mTail->mNext = task;
78 mTail = task;
79 } else {
80 // Need to find the proper insertion point
Chris Craikd41c4d82015-01-05 15:51:13 -080081 RenderTask* previous = nullptr;
John Reck4f02bf42014-01-03 18:09:17 -080082 RenderTask* next = mHead;
83 while (next && next->mRunAt <= task->mRunAt) {
84 previous = next;
85 next = next->mNext;
86 }
87 if (!previous) {
88 task->mNext = mHead;
89 mHead = task;
90 } else {
91 previous->mNext = task;
92 if (next) {
93 task->mNext = next;
94 } else {
95 mTail = task;
96 }
97 }
98 }
99 } else {
100 mTail = mHead = task;
101 }
102}
103
John Recka5dda642014-05-22 15:43:54 -0700104void TaskQueue::queueAtFront(RenderTask* task) {
John Reck2f944482017-03-27 14:34:28 -0700105 LOG_ALWAYS_FATAL_IF(task->mNext || mHead == task, "Task is already in the queue!");
John Recka5dda642014-05-22 15:43:54 -0700106 if (mTail) {
107 task->mNext = mHead;
108 mHead = task;
109 } else {
110 mTail = mHead = task;
111 }
112}
113
John Reck4f02bf42014-01-03 18:09:17 -0800114void TaskQueue::remove(RenderTask* task) {
115 // TaskQueue is strict here to enforce that users are keeping track of
116 // their RenderTasks due to how their memory is managed
117 LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task,
118 "Cannot remove a task that isn't in the queue!");
119
120 // If task is the head we can just call next() to pop it off
121 // Otherwise we need to scan through to find the task before it
122 if (peek() == task) {
123 next();
124 } else {
125 RenderTask* previous = mHead;
126 while (previous->mNext != task) {
127 previous = previous->mNext;
128 }
129 previous->mNext = task->mNext;
130 if (mTail == task) {
131 mTail = previous;
132 }
133 }
134}
135
John Recke45b1fd2014-04-15 09:50:16 -0700136class DispatchFrameCallbacks : public RenderTask {
137private:
138 RenderThread* mRenderThread;
139public:
Chih-Hung Hsiehc6baf562016-04-27 11:29:23 -0700140 explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {}
John Recke45b1fd2014-04-15 09:50:16 -0700141
Chris Craikd41c4d82015-01-05 15:51:13 -0800142 virtual void run() override {
John Recke45b1fd2014-04-15 09:50:16 -0700143 mRenderThread->dispatchFrameCallbacks();
144 }
145};
146
John Reck6b507802015-11-03 10:09:59 -0800147static bool gHasRenderThreadInstance = false;
148
149bool RenderThread::hasInstance() {
150 return gHasRenderThreadInstance;
151}
152
153RenderThread& RenderThread::getInstance() {
154 // This is a pointer because otherwise __cxa_finalize
155 // will try to delete it like a Good Citizen but that causes us to crash
156 // because we don't want to delete the RenderThread normally.
157 static RenderThread* sInstance = new RenderThread();
158 gHasRenderThreadInstance = true;
159 return *sInstance;
160}
161
162RenderThread::RenderThread() : Thread(true)
John Recke45b1fd2014-04-15 09:50:16 -0700163 , mNextWakeup(LLONG_MAX)
Chris Craikd41c4d82015-01-05 15:51:13 -0800164 , mDisplayEventReceiver(nullptr)
John Recke45b1fd2014-04-15 09:50:16 -0700165 , mVsyncRequested(false)
166 , mFrameCallbackTaskPending(false)
Chris Craikd41c4d82015-01-05 15:51:13 -0800167 , mFrameCallbackTask(nullptr)
168 , mRenderState(nullptr)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500169 , mEglManager(nullptr)
170 , mVkManager(nullptr) {
Chris Craik2507c342015-05-04 14:36:49 -0700171 Properties::load();
John Recke45b1fd2014-04-15 09:50:16 -0700172 mFrameCallbackTask = new DispatchFrameCallbacks(this);
John Reckcec24ae2013-11-05 13:27:50 -0800173 mLooper = new Looper(false);
174 run("RenderThread");
175}
176
177RenderThread::~RenderThread() {
John Reck3b202512014-06-23 13:13:08 -0700178 LOG_ALWAYS_FATAL("Can't destroy the render thread");
John Reckcec24ae2013-11-05 13:27:50 -0800179}
180
John Recke45b1fd2014-04-15 09:50:16 -0700181void RenderThread::initializeDisplayEventReceiver() {
182 LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?");
183 mDisplayEventReceiver = new DisplayEventReceiver();
184 status_t status = mDisplayEventReceiver->initCheck();
185 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver "
186 "failed with status: %d", status);
187
188 // Register the FD
189 mLooper->addFd(mDisplayEventReceiver->getFd(), 0,
190 Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this);
191}
192
John Reck3b202512014-06-23 13:13:08 -0700193void RenderThread::initThreadLocals() {
John Reckb36016c2015-03-11 08:50:53 -0700194 sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay(
195 ISurfaceComposer::eDisplayIdMain));
196 status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo);
197 LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n");
198 nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps);
199 mTimeLord.setFrameInterval(frameIntervalNanos);
John Reck3b202512014-06-23 13:13:08 -0700200 initializeDisplayEventReceiver();
201 mEglManager = new EglManager(*this);
John Reck0e89e2b2014-10-31 14:49:06 -0700202 mRenderState = new RenderState(*this);
John Reck2d5b8d72016-07-28 15:36:11 -0700203 mJankTracker = new JankTracker(mDisplayInfo);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500204 mVkManager = new VulkanManager(*this);
John Reck3b202512014-06-23 13:13:08 -0700205}
206
Derek Sollenbergerc4fbada2016-11-07 16:05:41 -0500207Readback& RenderThread::readback() {
208
209 if (!mReadback) {
210 auto renderType = Properties::getRenderPipelineType();
211 switch (renderType) {
212 case RenderPipelineType::OpenGL:
213 mReadback = new OpenGLReadbackImpl(*this);
214 break;
215 case RenderPipelineType::SkiaGL:
216 case RenderPipelineType::SkiaVulkan:
217 // It works to use the OpenGL pipeline for Vulkan but this is not
218 // ideal as it causes us to create an OpenGL context in addition
219 // to the Vulkan one.
220 mReadback = new skiapipeline::SkiaOpenGLReadback(*this);
221 break;
222 default:
223 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
224 break;
225 }
226 }
227
228 return *mReadback;
229}
230
John Recke45b1fd2014-04-15 09:50:16 -0700231int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) {
232 if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) {
233 ALOGE("Display event receiver pipe was closed or an error occurred. "
234 "events=0x%x", events);
235 return 0; // remove the callback
236 }
237
238 if (!(events & Looper::EVENT_INPUT)) {
239 ALOGW("Received spurious callback for unhandled poll event. "
240 "events=0x%x", events);
241 return 1; // keep the callback
242 }
243
244 reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue();
245
246 return 1; // keep the callback
247}
248
249static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) {
250 DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE];
251 nsecs_t latest = 0;
252 ssize_t n;
253 while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) {
254 for (ssize_t i = 0; i < n; i++) {
255 const DisplayEventReceiver::Event& ev = buf[i];
256 switch (ev.header.type) {
257 case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
258 latest = ev.header.timestamp;
259 break;
260 }
261 }
262 }
263 if (n < 0) {
264 ALOGW("Failed to get events from display event receiver, status=%d", status_t(n));
265 }
266 return latest;
267}
268
John Recka733f892014-12-19 11:37:21 -0800269void RenderThread::drainDisplayEventQueue() {
John Recka5dda642014-05-22 15:43:54 -0700270 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700271 nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver);
272 if (vsyncEvent > 0) {
273 mVsyncRequested = false;
John Recka733f892014-12-19 11:37:21 -0800274 if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) {
John Recka5dda642014-05-22 15:43:54 -0700275 ATRACE_NAME("queue mFrameCallbackTask");
John Recke45b1fd2014-04-15 09:50:16 -0700276 mFrameCallbackTaskPending = true;
John Recka733f892014-12-19 11:37:21 -0800277 nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY);
278 queueAt(mFrameCallbackTask, runAt);
John Recke45b1fd2014-04-15 09:50:16 -0700279 }
280 }
281}
282
283void RenderThread::dispatchFrameCallbacks() {
John Recka5dda642014-05-22 15:43:54 -0700284 ATRACE_CALL();
John Recke45b1fd2014-04-15 09:50:16 -0700285 mFrameCallbackTaskPending = false;
286
287 std::set<IFrameCallback*> callbacks;
288 mFrameCallbacks.swap(callbacks);
289
John Recka733f892014-12-19 11:37:21 -0800290 if (callbacks.size()) {
291 // Assume one of them will probably animate again so preemptively
292 // request the next vsync in case it occurs mid-frame
293 requestVsync();
294 for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) {
295 (*it)->doFrame();
296 }
John Recke45b1fd2014-04-15 09:50:16 -0700297 }
298}
299
John Recka5dda642014-05-22 15:43:54 -0700300void RenderThread::requestVsync() {
301 if (!mVsyncRequested) {
302 mVsyncRequested = true;
303 status_t status = mDisplayEventReceiver->requestNextVsync();
304 LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
305 "requestNextVsync failed with status: %d", status);
306 }
307}
308
John Reckcec24ae2013-11-05 13:27:50 -0800309bool RenderThread::threadLoop() {
John Reck21be43e2014-08-14 10:25:16 -0700310 setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY);
John Reck3b202512014-06-23 13:13:08 -0700311 initThreadLocals();
John Recke45b1fd2014-04-15 09:50:16 -0700312
John Reck4f02bf42014-01-03 18:09:17 -0800313 int timeoutMillis = -1;
John Reckcec24ae2013-11-05 13:27:50 -0800314 for (;;) {
John Recke45b1fd2014-04-15 09:50:16 -0700315 int result = mLooper->pollOnce(timeoutMillis);
John Reck4f02bf42014-01-03 18:09:17 -0800316 LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR,
317 "RenderThread Looper POLL_ERROR!");
318
319 nsecs_t nextWakeup;
John Reck12efa552016-11-15 10:22:01 -0800320 {
321 FatVector<RenderTask*, 10> workQueue;
322 // Process our queue, if we have anything. By first acquiring
323 // all the pending events then processing them we avoid vsync
324 // starvation if more tasks are queued while we are processing tasks.
325 while (RenderTask* task = nextTask(&nextWakeup)) {
326 workQueue.push_back(task);
327 }
328 for (auto task : workQueue) {
329 task->run();
330 // task may have deleted itself, do not reference it again
331 }
John Reck4f02bf42014-01-03 18:09:17 -0800332 }
333 if (nextWakeup == LLONG_MAX) {
334 timeoutMillis = -1;
335 } else {
John Recka6260b82014-01-29 18:31:51 -0800336 nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC);
337 timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos);
John Reck4f02bf42014-01-03 18:09:17 -0800338 if (timeoutMillis < 0) {
339 timeoutMillis = 0;
340 }
John Reckcec24ae2013-11-05 13:27:50 -0800341 }
John Recka5dda642014-05-22 15:43:54 -0700342
343 if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) {
John Recka733f892014-12-19 11:37:21 -0800344 drainDisplayEventQueue();
John Recka5dda642014-05-22 15:43:54 -0700345 mFrameCallbacks.insert(
346 mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end());
347 mPendingRegistrationFrameCallbacks.clear();
348 requestVsync();
349 }
John Recka22c9b22015-01-14 10:40:15 -0800350
351 if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) {
352 // TODO: Clean this up. This is working around an issue where a combination
353 // of bad timing and slow drawing can result in dropping a stale vsync
354 // on the floor (correct!) but fails to schedule to listen for the
355 // next vsync (oops), so none of the callbacks are run.
356 requestVsync();
357 }
John Reckcec24ae2013-11-05 13:27:50 -0800358 }
359
360 return false;
361}
362
363void RenderThread::queue(RenderTask* task) {
364 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800365 mQueue.queue(task);
366 if (mNextWakeup && task->mRunAt < mNextWakeup) {
367 mNextWakeup = 0;
John Reckcec24ae2013-11-05 13:27:50 -0800368 mLooper->wake();
369 }
370}
371
Chris Craik0a24b142015-10-19 17:10:19 -0700372void RenderThread::queueAndWait(RenderTask* task) {
John Reckcba287b2015-11-10 12:52:44 -0800373 // These need to be local to the thread to avoid the Condition
374 // signaling the wrong thread. The easiest way to achieve that is to just
375 // make this on the stack, although that has a slight cost to it
376 Mutex mutex;
377 Condition condition;
378 SignalingRenderTask syncTask(task, &mutex, &condition);
379
380 AutoMutex _lock(mutex);
Chris Craik0a24b142015-10-19 17:10:19 -0700381 queue(&syncTask);
Tom Cherry298a1462017-02-28 14:07:09 -0800382 while (!syncTask.hasRun()) {
383 condition.wait(mutex);
384 }
Chris Craik0a24b142015-10-19 17:10:19 -0700385}
386
John Recka5dda642014-05-22 15:43:54 -0700387void RenderThread::queueAtFront(RenderTask* task) {
388 AutoMutex _lock(mLock);
389 mQueue.queueAtFront(task);
390 mLooper->wake();
391}
392
John Recka733f892014-12-19 11:37:21 -0800393void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) {
394 task->mRunAt = runAtNs;
John Reck4f02bf42014-01-03 18:09:17 -0800395 queue(task);
396}
397
398void RenderThread::remove(RenderTask* task) {
John Reckcec24ae2013-11-05 13:27:50 -0800399 AutoMutex _lock(mLock);
John Reck4f02bf42014-01-03 18:09:17 -0800400 mQueue.remove(task);
401}
402
John Recke45b1fd2014-04-15 09:50:16 -0700403void RenderThread::postFrameCallback(IFrameCallback* callback) {
John Recka5dda642014-05-22 15:43:54 -0700404 mPendingRegistrationFrameCallbacks.insert(callback);
John Recke45b1fd2014-04-15 09:50:16 -0700405}
406
John Reck01a5ea32014-12-03 13:01:07 -0800407bool RenderThread::removeFrameCallback(IFrameCallback* callback) {
408 size_t erased;
409 erased = mFrameCallbacks.erase(callback);
410 erased |= mPendingRegistrationFrameCallbacks.erase(callback);
411 return erased;
John Recka5dda642014-05-22 15:43:54 -0700412}
413
414void RenderThread::pushBackFrameCallback(IFrameCallback* callback) {
415 if (mFrameCallbacks.erase(callback)) {
416 mPendingRegistrationFrameCallbacks.insert(callback);
417 }
John Recke45b1fd2014-04-15 09:50:16 -0700418}
419
John Reck4f02bf42014-01-03 18:09:17 -0800420RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) {
421 AutoMutex _lock(mLock);
422 RenderTask* next = mQueue.peek();
423 if (!next) {
424 mNextWakeup = LLONG_MAX;
425 } else {
John Recka5dda642014-05-22 15:43:54 -0700426 mNextWakeup = next->mRunAt;
John Reck4f02bf42014-01-03 18:09:17 -0800427 // Most tasks won't be delayed, so avoid unnecessary systemTime() calls
428 if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) {
429 next = mQueue.next();
John Recka5dda642014-05-22 15:43:54 -0700430 } else {
Chris Craikd41c4d82015-01-05 15:51:13 -0800431 next = nullptr;
John Reckcec24ae2013-11-05 13:27:50 -0800432 }
John Reckcec24ae2013-11-05 13:27:50 -0800433 }
John Reck4f02bf42014-01-03 18:09:17 -0800434 if (nextWakeup) {
435 *nextWakeup = mNextWakeup;
436 }
437 return next;
John Reckcec24ae2013-11-05 13:27:50 -0800438}
439
Stan Iliev7bc3bc62017-05-24 13:28:36 -0400440sk_sp<Bitmap> RenderThread::allocateHardwareBitmap(SkBitmap& skBitmap) {
441 auto renderType = Properties::getRenderPipelineType();
442 switch (renderType) {
443 case RenderPipelineType::OpenGL:
444 return OpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
445 case RenderPipelineType::SkiaGL:
446 return skiapipeline::SkiaOpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
447 case RenderPipelineType::SkiaVulkan:
448 return skiapipeline::SkiaVulkanPipeline::allocateHardwareBitmap(*this, skBitmap);
449 default:
450 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
451 break;
452 }
453 return nullptr;
454}
455
John Reckcec24ae2013-11-05 13:27:50 -0800456} /* namespace renderthread */
457} /* namespace uirenderer */
458} /* namespace android */