blob: aeaf5776bf4e04cb162d31283b37323212892c94 [file] [log] [blame]
herbc782b2a2015-06-29 13:46:55 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkSharedMutex.h"
9
herbb906daf2015-09-29 09:37:59 -070010#include "SkAtomics.h"
herbc782b2a2015-06-29 13:46:55 -070011#include "SkTypes.h"
herbe4c17352015-09-29 14:38:01 -070012#include "SkSemaphore.h"
herbc782b2a2015-06-29 13:46:55 -070013
herbdec1afc2015-06-30 14:12:16 -070014#if defined(THREAD_SANITIZER)
15
16/* Report that a lock has been created at address "lock". */
17#define ANNOTATE_RWLOCK_CREATE(lock) \
18 AnnotateRWLockCreate(__FILE__, __LINE__, lock)
19
20/* Report that the lock at address "lock" is about to be destroyed. */
21#define ANNOTATE_RWLOCK_DESTROY(lock) \
22 AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
23
24/* Report that the lock at address "lock" has been acquired.
25 is_w=1 for writer lock, is_w=0 for reader lock. */
26#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
27 AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
28
29/* Report that the lock at address "lock" is about to be released. */
30#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
31 AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
32
33#ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK
34# ifdef __GNUC__
35# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
36# else
37/* TODO(glider): for Windows support we may want to change this macro in order
38 to prepend __declspec(selectany) to the annotations' declarations. */
39# error weak annotations are not supported for your compiler
40# endif
41#else
42# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
43#endif
44
45extern "C" {
46void AnnotateRWLockCreate(
47 const char *file, int line,
48 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
49void AnnotateRWLockDestroy(
50 const char *file, int line,
51 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
52void AnnotateRWLockAcquired(
53 const char *file, int line,
54 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
55void AnnotateRWLockReleased(
56 const char *file, int line,
57 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
58}
59
60#else
61
62#define ANNOTATE_RWLOCK_CREATE(lock)
63#define ANNOTATE_RWLOCK_DESTROY(lock)
64#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
65#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
66
67#endif
68
herb966e3d32015-09-18 07:00:48 -070069#ifdef SK_DEBUG
herbc782b2a2015-06-29 13:46:55 -070070
herbe4c17352015-09-29 14:38:01 -070071 #include "SkThreadID.h"
herb966e3d32015-09-18 07:00:48 -070072 #include "SkTDArray.h"
herbc782b2a2015-06-29 13:46:55 -070073
herb966e3d32015-09-18 07:00:48 -070074 class SkSharedMutex::ThreadIDSet {
75 public:
76 // Returns true if threadID is in the set.
herbe4c17352015-09-29 14:38:01 -070077 bool find(SkThreadID threadID) const {
herb966e3d32015-09-18 07:00:48 -070078 for (auto& t : fThreadIDs) {
79 if (t == threadID) return true;
80 }
81 return false;
82 }
83
84 // Returns true if did not already exist.
herbe4c17352015-09-29 14:38:01 -070085 bool tryAdd(SkThreadID threadID) {
herb966e3d32015-09-18 07:00:48 -070086 for (auto& t : fThreadIDs) {
87 if (t == threadID) return false;
88 }
89 fThreadIDs.append(1, &threadID);
90 return true;
91 }
92 // Returns true if already exists in Set.
herbe4c17352015-09-29 14:38:01 -070093 bool tryRemove(SkThreadID threadID) {
herb966e3d32015-09-18 07:00:48 -070094 for (int i = 0; i < fThreadIDs.count(); ++i) {
95 if (fThreadIDs[i] == threadID) {
96 fThreadIDs.remove(i);
97 return true;
98 }
99 }
100 return false;
101 }
102
103 void swap(ThreadIDSet& other) {
104 fThreadIDs.swap(other.fThreadIDs);
105 }
106
107 int count() const {
108 return fThreadIDs.count();
109 }
110
111 private:
herbe4c17352015-09-29 14:38:01 -0700112 SkTDArray<SkThreadID> fThreadIDs;
herb966e3d32015-09-18 07:00:48 -0700113 };
114
115 SkSharedMutex::SkSharedMutex()
116 : fCurrentShared(new ThreadIDSet)
117 , fWaitingExclusive(new ThreadIDSet)
118 , fWaitingShared(new ThreadIDSet){
119 ANNOTATE_RWLOCK_CREATE(this);
herbc782b2a2015-06-29 13:46:55 -0700120 }
herbc782b2a2015-06-29 13:46:55 -0700121
herb966e3d32015-09-18 07:00:48 -0700122 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
herbdec1afc2015-06-30 14:12:16 -0700123
herb966e3d32015-09-18 07:00:48 -0700124 void SkSharedMutex::acquire() {
herbe4c17352015-09-29 14:38:01 -0700125 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700126 int currentSharedCount;
127 int waitingExclusiveCount;
128 {
129 SkAutoMutexAcquire l(&fMu);
herbc782b2a2015-06-29 13:46:55 -0700130
herb966e3d32015-09-18 07:00:48 -0700131 if (!fWaitingExclusive->tryAdd(threadID)) {
132 SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
133 }
herbc782b2a2015-06-29 13:46:55 -0700134
herb966e3d32015-09-18 07:00:48 -0700135 currentSharedCount = fCurrentShared->count();
136 waitingExclusiveCount = fWaitingExclusive->count();
137 }
herbc782b2a2015-06-29 13:46:55 -0700138
herb966e3d32015-09-18 07:00:48 -0700139 if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
140 fExclusiveQueue.wait();
141 }
142
143 ANNOTATE_RWLOCK_ACQUIRED(this, 1);
144 }
145
146 // Implementation Detail:
147 // The shared threads need two seperate queues to keep the threads that were added after the
148 // exclusive lock separate from the threads added before.
149 void SkSharedMutex::release() {
150 ANNOTATE_RWLOCK_RELEASED(this, 1);
herbe4c17352015-09-29 14:38:01 -0700151 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700152 int sharedWaitingCount;
153 int exclusiveWaitingCount;
154 int sharedQueueSelect;
155 {
156 SkAutoMutexAcquire l(&fMu);
157 SkASSERT(0 == fCurrentShared->count());
158 if (!fWaitingExclusive->tryRemove(threadID)) {
159 SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
160 }
161 exclusiveWaitingCount = fWaitingExclusive->count();
162 sharedWaitingCount = fWaitingShared->count();
163 fWaitingShared.swap(fCurrentShared);
164 sharedQueueSelect = fSharedQueueSelect;
165 if (sharedWaitingCount > 0) {
166 fSharedQueueSelect = 1 - fSharedQueueSelect;
167 }
168 }
169
170 if (sharedWaitingCount > 0) {
171 fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
172 } else if (exclusiveWaitingCount > 0) {
173 fExclusiveQueue.signal();
174 }
175 }
176
177 void SkSharedMutex::assertHeld() const {
herbe4c17352015-09-29 14:38:01 -0700178 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700179 SkAutoMutexAcquire l(&fMu);
180 SkASSERT(0 == fCurrentShared->count());
181 SkASSERT(fWaitingExclusive->find(threadID));
182 }
183
184 void SkSharedMutex::acquireShared() {
herbe4c17352015-09-29 14:38:01 -0700185 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700186 int exclusiveWaitingCount;
187 int sharedQueueSelect;
188 {
189 SkAutoMutexAcquire l(&fMu);
190 exclusiveWaitingCount = fWaitingExclusive->count();
191 if (exclusiveWaitingCount > 0) {
192 if (!fWaitingShared->tryAdd(threadID)) {
193 SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
194 }
195 } else {
196 if (!fCurrentShared->tryAdd(threadID)) {
197 SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
198 }
199 }
200 sharedQueueSelect = fSharedQueueSelect;
201 }
202
203 if (exclusiveWaitingCount > 0) {
204 fSharedQueue[sharedQueueSelect].wait();
205 }
206
207 ANNOTATE_RWLOCK_ACQUIRED(this, 0);
208 }
209
210 void SkSharedMutex::releaseShared() {
211 ANNOTATE_RWLOCK_RELEASED(this, 0);
herbe4c17352015-09-29 14:38:01 -0700212 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700213
214 int currentSharedCount;
215 int waitingExclusiveCount;
216 {
217 SkAutoMutexAcquire l(&fMu);
218 if (!fCurrentShared->tryRemove(threadID)) {
219 SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
220 }
221 currentSharedCount = fCurrentShared->count();
222 waitingExclusiveCount = fWaitingExclusive->count();
223 }
224
225 if (0 == currentSharedCount && waitingExclusiveCount > 0) {
226 fExclusiveQueue.signal();
227 }
228 }
229
230 void SkSharedMutex::assertHeldShared() const {
herbe4c17352015-09-29 14:38:01 -0700231 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700232 SkAutoMutexAcquire l(&fMu);
233 SkASSERT(fCurrentShared->find(threadID));
234 }
235
236#else
237
238 // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
239 // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
240 // the log of the count which is 1024.
241 //
242 // The three counts held in fQueueCounts are:
243 // * Shared - the number of shared lock holders currently running.
244 // * WaitingExclusive - the number of threads waiting for an exclusive lock.
245 // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
246 // to finish.
247 static const int kLogThreadCount = 10;
248
249 enum {
250 kSharedOffset = (0 * kLogThreadCount),
251 kWaitingExlusiveOffset = (1 * kLogThreadCount),
252 kWaitingSharedOffset = (2 * kLogThreadCount),
253 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
254 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
255 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
256 };
257
258 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
259 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
260 void SkSharedMutex::acquire() {
261 // Increment the count of exclusive queue waiters.
262 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
263 sk_memory_order_acquire);
264
265 // If there are no other exclusive waiters and no shared threads are running then run
266 // else wait.
267 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
268 fExclusiveQueue.wait();
269 }
270 ANNOTATE_RWLOCK_ACQUIRED(this, 1);
271 }
272
273 void SkSharedMutex::release() {
274 ANNOTATE_RWLOCK_RELEASED(this, 1);
275
276 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
277 int32_t waitingShared;
278 int32_t newQueueCounts;
279 do {
280 newQueueCounts = oldQueueCounts;
281
282 // Decrement exclusive waiters.
283 newQueueCounts -= 1 << kWaitingExlusiveOffset;
284
285 // The number of threads waiting to acquire a shared lock.
286 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
287
288 // If there are any move the counts of all the shared waiters to actual shared. They are
289 // going to run next.
290 if (waitingShared > 0) {
291
292 // Set waiting shared to zero.
293 newQueueCounts &= ~kWaitingSharedMask;
294
295 // Because this is the exclusive release, then there are zero readers. So, the bits
296 // for shared locks should be zero. Since those bits are zero, we can just |= in the
297 // waitingShared count instead of clearing with an &= and then |= the count.
298 newQueueCounts |= waitingShared << kSharedOffset;
299 }
300
301 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
302 sk_memory_order_release, sk_memory_order_relaxed));
303
herbc782b2a2015-06-29 13:46:55 -0700304 if (waitingShared > 0) {
herb966e3d32015-09-18 07:00:48 -0700305 // Run all the shared.
306 fSharedQueue.signal(waitingShared);
307 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
308 // Run a single exclusive waiter.
309 fExclusiveQueue.signal();
herbc782b2a2015-06-29 13:46:55 -0700310 }
herbc782b2a2015-06-29 13:46:55 -0700311 }
herbc782b2a2015-06-29 13:46:55 -0700312
herb966e3d32015-09-18 07:00:48 -0700313 void SkSharedMutex::acquireShared() {
314 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
315 int32_t newQueueCounts;
316 do {
317 newQueueCounts = oldQueueCounts;
318 // If there are waiting exclusives then this shared lock waits else it runs.
319 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
320 newQueueCounts += 1 << kWaitingSharedOffset;
321 } else {
322 newQueueCounts += 1 << kSharedOffset;
323 }
324 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
325 sk_memory_order_acquire, sk_memory_order_relaxed));
herbab42ec72015-08-19 13:40:12 -0700326
herb966e3d32015-09-18 07:00:48 -0700327 // If there are waiting exclusives, then this shared waits until after it runs.
herbc782b2a2015-06-29 13:46:55 -0700328 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
herb966e3d32015-09-18 07:00:48 -0700329 fSharedQueue.wait();
herbc782b2a2015-06-29 13:46:55 -0700330 }
herb966e3d32015-09-18 07:00:48 -0700331 ANNOTATE_RWLOCK_ACQUIRED(this, 0);
herbc782b2a2015-06-29 13:46:55 -0700332
herbc782b2a2015-06-29 13:46:55 -0700333 }
herbc782b2a2015-06-29 13:46:55 -0700334
herb966e3d32015-09-18 07:00:48 -0700335 void SkSharedMutex::releaseShared() {
336 ANNOTATE_RWLOCK_RELEASED(this, 0);
herbdec1afc2015-06-30 14:12:16 -0700337
herb966e3d32015-09-18 07:00:48 -0700338 // Decrement the shared count.
339 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
340 sk_memory_order_release);
herbc782b2a2015-06-29 13:46:55 -0700341
herb966e3d32015-09-18 07:00:48 -0700342 // If shared count is going to zero (because the old count == 1) and there are exclusive
343 // waiters, then run a single exclusive waiter.
344 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
345 && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
346 fExclusiveQueue.signal();
347 }
herbc782b2a2015-06-29 13:46:55 -0700348 }
herbab42ec72015-08-19 13:40:12 -0700349
350#endif