blob: d3e7663bcab2a1a024ae91c31e7c3ba238e35769 [file] [log] [blame]
herbc782b2a2015-06-29 13:46:55 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkSharedMutex.h"
9
herbb906daf2015-09-29 09:37:59 -070010#include "SkAtomics.h"
herbc782b2a2015-06-29 13:46:55 -070011#include "SkTypes.h"
herbe4c17352015-09-29 14:38:01 -070012#include "SkSemaphore.h"
herbc782b2a2015-06-29 13:46:55 -070013
Mike Klein3b7658a2017-09-20 09:53:39 -040014#if !defined(__has_feature)
15 #define __has_feature(x) 0
16#endif
17
18#if __has_feature(thread_sanitizer)
19
20 /* Report that a lock has been created at address "lock". */
21 #define ANNOTATE_RWLOCK_CREATE(lock) \
22 AnnotateRWLockCreate(__FILE__, __LINE__, lock)
23
24 /* Report that the lock at address "lock" is about to be destroyed. */
25 #define ANNOTATE_RWLOCK_DESTROY(lock) \
26 AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
27
28 /* Report that the lock at address "lock" has been acquired.
29 is_w=1 for writer lock, is_w=0 for reader lock. */
30 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
31 AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
32
33 /* Report that the lock at address "lock" is about to be released. */
34 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
35 AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
36
37 #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
38 #if defined(__GNUC__)
39 #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
40 #else
41 /* TODO(glider): for Windows support we may want to change this macro in order
42 to prepend __declspec(selectany) to the annotations' declarations. */
43 #error weak annotations are not supported for your compiler
44 #endif
45 #else
46 #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
47 #endif
48
49 extern "C" {
50 void AnnotateRWLockCreate(
51 const char *file, int line,
52 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
53 void AnnotateRWLockDestroy(
54 const char *file, int line,
55 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
56 void AnnotateRWLockAcquired(
57 const char *file, int line,
58 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
59 void AnnotateRWLockReleased(
60 const char *file, int line,
61 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
62 }
63
64#else
65
66 #define ANNOTATE_RWLOCK_CREATE(lock)
67 #define ANNOTATE_RWLOCK_DESTROY(lock)
68 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
69 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
70
71#endif
72
herb966e3d32015-09-18 07:00:48 -070073#ifdef SK_DEBUG
herbc782b2a2015-06-29 13:46:55 -070074
herbe4c17352015-09-29 14:38:01 -070075 #include "SkThreadID.h"
herb966e3d32015-09-18 07:00:48 -070076 #include "SkTDArray.h"
herbc782b2a2015-06-29 13:46:55 -070077
herb966e3d32015-09-18 07:00:48 -070078 class SkSharedMutex::ThreadIDSet {
79 public:
80 // Returns true if threadID is in the set.
herbe4c17352015-09-29 14:38:01 -070081 bool find(SkThreadID threadID) const {
herb966e3d32015-09-18 07:00:48 -070082 for (auto& t : fThreadIDs) {
83 if (t == threadID) return true;
84 }
85 return false;
86 }
87
88 // Returns true if did not already exist.
herbe4c17352015-09-29 14:38:01 -070089 bool tryAdd(SkThreadID threadID) {
herb966e3d32015-09-18 07:00:48 -070090 for (auto& t : fThreadIDs) {
91 if (t == threadID) return false;
92 }
93 fThreadIDs.append(1, &threadID);
94 return true;
95 }
96 // Returns true if already exists in Set.
herbe4c17352015-09-29 14:38:01 -070097 bool tryRemove(SkThreadID threadID) {
herb966e3d32015-09-18 07:00:48 -070098 for (int i = 0; i < fThreadIDs.count(); ++i) {
99 if (fThreadIDs[i] == threadID) {
100 fThreadIDs.remove(i);
101 return true;
102 }
103 }
104 return false;
105 }
106
107 void swap(ThreadIDSet& other) {
108 fThreadIDs.swap(other.fThreadIDs);
109 }
110
111 int count() const {
112 return fThreadIDs.count();
113 }
114
115 private:
herbe4c17352015-09-29 14:38:01 -0700116 SkTDArray<SkThreadID> fThreadIDs;
herb966e3d32015-09-18 07:00:48 -0700117 };
118
119 SkSharedMutex::SkSharedMutex()
120 : fCurrentShared(new ThreadIDSet)
121 , fWaitingExclusive(new ThreadIDSet)
122 , fWaitingShared(new ThreadIDSet){
123 ANNOTATE_RWLOCK_CREATE(this);
herbc782b2a2015-06-29 13:46:55 -0700124 }
herbc782b2a2015-06-29 13:46:55 -0700125
herb966e3d32015-09-18 07:00:48 -0700126 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
herbdec1afc2015-06-30 14:12:16 -0700127
herb966e3d32015-09-18 07:00:48 -0700128 void SkSharedMutex::acquire() {
herbe4c17352015-09-29 14:38:01 -0700129 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700130 int currentSharedCount;
131 int waitingExclusiveCount;
132 {
133 SkAutoMutexAcquire l(&fMu);
herbc782b2a2015-06-29 13:46:55 -0700134
Ben Wagnerf169e1f2018-05-14 17:23:07 -0400135 SkASSERTF(!fCurrentShared->find(threadID),
136 "Thread %lx already has an shared lock\n", threadID);
137
herb966e3d32015-09-18 07:00:48 -0700138 if (!fWaitingExclusive->tryAdd(threadID)) {
139 SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
140 }
herbc782b2a2015-06-29 13:46:55 -0700141
herb966e3d32015-09-18 07:00:48 -0700142 currentSharedCount = fCurrentShared->count();
143 waitingExclusiveCount = fWaitingExclusive->count();
144 }
herbc782b2a2015-06-29 13:46:55 -0700145
herb966e3d32015-09-18 07:00:48 -0700146 if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
147 fExclusiveQueue.wait();
148 }
149
150 ANNOTATE_RWLOCK_ACQUIRED(this, 1);
151 }
152
153 // Implementation Detail:
154 // The shared threads need two seperate queues to keep the threads that were added after the
155 // exclusive lock separate from the threads added before.
156 void SkSharedMutex::release() {
157 ANNOTATE_RWLOCK_RELEASED(this, 1);
herbe4c17352015-09-29 14:38:01 -0700158 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700159 int sharedWaitingCount;
160 int exclusiveWaitingCount;
161 int sharedQueueSelect;
162 {
163 SkAutoMutexAcquire l(&fMu);
164 SkASSERT(0 == fCurrentShared->count());
165 if (!fWaitingExclusive->tryRemove(threadID)) {
166 SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
167 }
168 exclusiveWaitingCount = fWaitingExclusive->count();
169 sharedWaitingCount = fWaitingShared->count();
170 fWaitingShared.swap(fCurrentShared);
171 sharedQueueSelect = fSharedQueueSelect;
172 if (sharedWaitingCount > 0) {
173 fSharedQueueSelect = 1 - fSharedQueueSelect;
174 }
175 }
176
177 if (sharedWaitingCount > 0) {
178 fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
179 } else if (exclusiveWaitingCount > 0) {
180 fExclusiveQueue.signal();
181 }
182 }
183
184 void SkSharedMutex::assertHeld() const {
herbe4c17352015-09-29 14:38:01 -0700185 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700186 SkAutoMutexAcquire l(&fMu);
187 SkASSERT(0 == fCurrentShared->count());
188 SkASSERT(fWaitingExclusive->find(threadID));
189 }
190
191 void SkSharedMutex::acquireShared() {
herbe4c17352015-09-29 14:38:01 -0700192 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700193 int exclusiveWaitingCount;
194 int sharedQueueSelect;
195 {
196 SkAutoMutexAcquire l(&fMu);
197 exclusiveWaitingCount = fWaitingExclusive->count();
198 if (exclusiveWaitingCount > 0) {
199 if (!fWaitingShared->tryAdd(threadID)) {
200 SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
201 }
202 } else {
203 if (!fCurrentShared->tryAdd(threadID)) {
204 SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
205 }
206 }
207 sharedQueueSelect = fSharedQueueSelect;
208 }
209
210 if (exclusiveWaitingCount > 0) {
211 fSharedQueue[sharedQueueSelect].wait();
212 }
213
214 ANNOTATE_RWLOCK_ACQUIRED(this, 0);
215 }
216
217 void SkSharedMutex::releaseShared() {
218 ANNOTATE_RWLOCK_RELEASED(this, 0);
herbe4c17352015-09-29 14:38:01 -0700219 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700220
221 int currentSharedCount;
222 int waitingExclusiveCount;
223 {
224 SkAutoMutexAcquire l(&fMu);
225 if (!fCurrentShared->tryRemove(threadID)) {
226 SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
227 }
228 currentSharedCount = fCurrentShared->count();
229 waitingExclusiveCount = fWaitingExclusive->count();
230 }
231
232 if (0 == currentSharedCount && waitingExclusiveCount > 0) {
233 fExclusiveQueue.signal();
234 }
235 }
236
237 void SkSharedMutex::assertHeldShared() const {
herbe4c17352015-09-29 14:38:01 -0700238 SkThreadID threadID(SkGetThreadID());
herb966e3d32015-09-18 07:00:48 -0700239 SkAutoMutexAcquire l(&fMu);
240 SkASSERT(fCurrentShared->find(threadID));
241 }
242
243#else
244
245 // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
246 // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
247 // the log of the count which is 1024.
248 //
249 // The three counts held in fQueueCounts are:
250 // * Shared - the number of shared lock holders currently running.
251 // * WaitingExclusive - the number of threads waiting for an exclusive lock.
252 // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
253 // to finish.
254 static const int kLogThreadCount = 10;
255
256 enum {
257 kSharedOffset = (0 * kLogThreadCount),
258 kWaitingExlusiveOffset = (1 * kLogThreadCount),
259 kWaitingSharedOffset = (2 * kLogThreadCount),
260 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
261 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
262 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
263 };
264
265 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
266 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
267 void SkSharedMutex::acquire() {
268 // Increment the count of exclusive queue waiters.
269 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
270 sk_memory_order_acquire);
271
272 // If there are no other exclusive waiters and no shared threads are running then run
273 // else wait.
274 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
275 fExclusiveQueue.wait();
276 }
277 ANNOTATE_RWLOCK_ACQUIRED(this, 1);
278 }
279
280 void SkSharedMutex::release() {
281 ANNOTATE_RWLOCK_RELEASED(this, 1);
282
283 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
284 int32_t waitingShared;
285 int32_t newQueueCounts;
286 do {
287 newQueueCounts = oldQueueCounts;
288
289 // Decrement exclusive waiters.
290 newQueueCounts -= 1 << kWaitingExlusiveOffset;
291
292 // The number of threads waiting to acquire a shared lock.
293 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
294
295 // If there are any move the counts of all the shared waiters to actual shared. They are
296 // going to run next.
297 if (waitingShared > 0) {
298
299 // Set waiting shared to zero.
300 newQueueCounts &= ~kWaitingSharedMask;
301
302 // Because this is the exclusive release, then there are zero readers. So, the bits
303 // for shared locks should be zero. Since those bits are zero, we can just |= in the
304 // waitingShared count instead of clearing with an &= and then |= the count.
305 newQueueCounts |= waitingShared << kSharedOffset;
306 }
307
308 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
309 sk_memory_order_release, sk_memory_order_relaxed));
310
herbc782b2a2015-06-29 13:46:55 -0700311 if (waitingShared > 0) {
herb966e3d32015-09-18 07:00:48 -0700312 // Run all the shared.
313 fSharedQueue.signal(waitingShared);
314 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
315 // Run a single exclusive waiter.
316 fExclusiveQueue.signal();
herbc782b2a2015-06-29 13:46:55 -0700317 }
herbc782b2a2015-06-29 13:46:55 -0700318 }
herbc782b2a2015-06-29 13:46:55 -0700319
herb966e3d32015-09-18 07:00:48 -0700320 void SkSharedMutex::acquireShared() {
321 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
322 int32_t newQueueCounts;
323 do {
324 newQueueCounts = oldQueueCounts;
325 // If there are waiting exclusives then this shared lock waits else it runs.
326 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
327 newQueueCounts += 1 << kWaitingSharedOffset;
328 } else {
329 newQueueCounts += 1 << kSharedOffset;
330 }
331 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
332 sk_memory_order_acquire, sk_memory_order_relaxed));
herbab42ec72015-08-19 13:40:12 -0700333
herb966e3d32015-09-18 07:00:48 -0700334 // If there are waiting exclusives, then this shared waits until after it runs.
herbc782b2a2015-06-29 13:46:55 -0700335 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
herb966e3d32015-09-18 07:00:48 -0700336 fSharedQueue.wait();
herbc782b2a2015-06-29 13:46:55 -0700337 }
herb966e3d32015-09-18 07:00:48 -0700338 ANNOTATE_RWLOCK_ACQUIRED(this, 0);
herbc782b2a2015-06-29 13:46:55 -0700339
herbc782b2a2015-06-29 13:46:55 -0700340 }
herbc782b2a2015-06-29 13:46:55 -0700341
herb966e3d32015-09-18 07:00:48 -0700342 void SkSharedMutex::releaseShared() {
343 ANNOTATE_RWLOCK_RELEASED(this, 0);
herbdec1afc2015-06-30 14:12:16 -0700344
herb966e3d32015-09-18 07:00:48 -0700345 // Decrement the shared count.
346 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
347 sk_memory_order_release);
herbc782b2a2015-06-29 13:46:55 -0700348
herb966e3d32015-09-18 07:00:48 -0700349 // If shared count is going to zero (because the old count == 1) and there are exclusive
350 // waiters, then run a single exclusive waiter.
351 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
352 && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
353 fExclusiveQueue.signal();
354 }
herbc782b2a2015-06-29 13:46:55 -0700355 }
herbab42ec72015-08-19 13:40:12 -0700356
357#endif