Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
SkSharedMutex.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
12
13#include <cinttypes>
14#include <cstdint>
15
16#if !defined(__has_feature)
17 #define __has_feature(x) 0
18#endif
19
20#if __has_feature(thread_sanitizer)
21
22 /* Report that a lock has been created at address "lock". */
23 #define ANNOTATE_RWLOCK_CREATE(lock) \
24 AnnotateRWLockCreate(__FILE__, __LINE__, lock)
25
26 /* Report that the lock at address "lock" is about to be destroyed. */
27 #define ANNOTATE_RWLOCK_DESTROY(lock) \
28 AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
29
30 /* Report that the lock at address "lock" has been acquired.
31 is_w=1 for writer lock, is_w=0 for reader lock. */
32 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
33 AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
34
35 /* Report that the lock at address "lock" is about to be released. */
36 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
37 AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
38
39 #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
40 #if defined(__GNUC__)
41 #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
42 #else
43 /* TODO(glider): for Windows support we may want to change this macro in order
44 to prepend __declspec(selectany) to the annotations' declarations. */
45 #error weak annotations are not supported for your compiler
46 #endif
47 #else
48 #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
49 #endif
50
51 extern "C" {
52 void AnnotateRWLockCreate(
53 const char *file, int line,
54 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
55 void AnnotateRWLockDestroy(
56 const char *file, int line,
57 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
58 void AnnotateRWLockAcquired(
59 const char *file, int line,
60 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
61 void AnnotateRWLockReleased(
62 const char *file, int line,
63 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
64 }
65
66#else
67
68 #define ANNOTATE_RWLOCK_CREATE(lock)
69 #define ANNOTATE_RWLOCK_DESTROY(lock)
70 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
71 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
72
73#endif
74
75#ifdef SK_DEBUG
76
79
80 class SkSharedMutex::ThreadIDSet {
81 public:
82 // Returns true if threadID is in the set.
83 bool find(SkThreadID threadID) const {
84 for (auto& t : fThreadIDs) {
85 if (t == threadID) return true;
86 }
87 return false;
88 }
89
90 // Returns true if did not already exist.
91 bool tryAdd(SkThreadID threadID) {
92 for (auto& t : fThreadIDs) {
93 if (t == threadID) return false;
94 }
95 fThreadIDs.append(1, &threadID);
96 return true;
97 }
98 // Returns true if already exists in Set.
99 bool tryRemove(SkThreadID threadID) {
100 for (int i = 0; i < fThreadIDs.size(); ++i) {
101 if (fThreadIDs[i] == threadID) {
102 fThreadIDs.remove(i);
103 return true;
104 }
105 }
106 return false;
107 }
108
109 void swap(ThreadIDSet& other) {
110 fThreadIDs.swap(other.fThreadIDs);
111 }
112
113 int count() const {
114 return fThreadIDs.size();
115 }
116
117 private:
118 SkTDArray<SkThreadID> fThreadIDs;
119 };
120
121 SkSharedMutex::SkSharedMutex()
122 : fCurrentShared(new ThreadIDSet)
123 , fWaitingExclusive(new ThreadIDSet)
124 , fWaitingShared(new ThreadIDSet){
126 }
127
128 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
129
130 void SkSharedMutex::acquire() {
131 SkThreadID threadID(SkGetThreadID());
132 int currentSharedCount;
133 int waitingExclusiveCount;
134 {
136
137 SkASSERTF(!fCurrentShared->find(threadID),
138 "Thread %" PRIx64 " already has an shared lock\n", (uint64_t)threadID);
139
140 if (!fWaitingExclusive->tryAdd(threadID)) {
141 SkDEBUGFAILF("Thread %" PRIx64 " already has an exclusive lock\n",
142 (uint64_t)threadID);
143 }
144
145 currentSharedCount = fCurrentShared->count();
146 waitingExclusiveCount = fWaitingExclusive->count();
147 }
148
149 if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
150 fExclusiveQueue.wait();
151 }
152
154 }
155
156 // Implementation Detail:
157 // The shared threads need two separate queues to keep the threads that were added after the
158 // exclusive lock separate from the threads added before.
159 void SkSharedMutex::release() {
161 SkThreadID threadID(SkGetThreadID());
162 int sharedWaitingCount;
163 int exclusiveWaitingCount;
164 int sharedQueueSelect;
165 {
167 SkASSERT(0 == fCurrentShared->count());
168 if (!fWaitingExclusive->tryRemove(threadID)) {
169 SkDEBUGFAILF("Thread %" PRIx64 " did not have the lock held.\n",
170 (uint64_t)threadID);
171 }
172 exclusiveWaitingCount = fWaitingExclusive->count();
173 sharedWaitingCount = fWaitingShared->count();
174 fWaitingShared.swap(fCurrentShared);
175 sharedQueueSelect = fSharedQueueSelect;
176 if (sharedWaitingCount > 0) {
177 fSharedQueueSelect = 1 - fSharedQueueSelect;
178 }
179 }
180
181 if (sharedWaitingCount > 0) {
182 fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
183 } else if (exclusiveWaitingCount > 0) {
184 fExclusiveQueue.signal();
185 }
186 }
187
188 void SkSharedMutex::assertHeld() const {
189 SkThreadID threadID(SkGetThreadID());
191 SkASSERT(0 == fCurrentShared->count());
192 SkASSERT(fWaitingExclusive->find(threadID));
193 }
194
195 void SkSharedMutex::acquireShared() {
196 SkThreadID threadID(SkGetThreadID());
197 int exclusiveWaitingCount;
198 int sharedQueueSelect;
199 {
201 exclusiveWaitingCount = fWaitingExclusive->count();
202 if (exclusiveWaitingCount > 0) {
203 if (!fWaitingShared->tryAdd(threadID)) {
204 SkDEBUGFAILF("Thread %" PRIx64 " was already waiting!\n", (uint64_t)threadID);
205 }
206 } else {
207 if (!fCurrentShared->tryAdd(threadID)) {
208 SkDEBUGFAILF("Thread %" PRIx64 " already holds a shared lock!\n",
209 (uint64_t)threadID);
210 }
211 }
212 sharedQueueSelect = fSharedQueueSelect;
213 }
214
215 if (exclusiveWaitingCount > 0) {
216 fSharedQueue[sharedQueueSelect].wait();
217 }
218
220 }
221
222 void SkSharedMutex::releaseShared() {
224 SkThreadID threadID(SkGetThreadID());
225
226 int currentSharedCount;
227 int waitingExclusiveCount;
228 {
230 if (!fCurrentShared->tryRemove(threadID)) {
231 SkDEBUGFAILF("Thread %" PRIx64 " does not hold a shared lock.\n",
232 (uint64_t)threadID);
233 }
234 currentSharedCount = fCurrentShared->count();
235 waitingExclusiveCount = fWaitingExclusive->count();
236 }
237
238 if (0 == currentSharedCount && waitingExclusiveCount > 0) {
239 fExclusiveQueue.signal();
240 }
241 }
242
243 void SkSharedMutex::assertHeldShared() const {
244 SkThreadID threadID(SkGetThreadID());
246 SkASSERT(fCurrentShared->find(threadID));
247 }
248
249#else
250
251 // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
252 // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
253 // the log of the count which is 1024.
254 //
255 // The three counts held in fQueueCounts are:
256 // * Shared - the number of shared lock holders currently running.
257 // * WaitingExclusive - the number of threads waiting for an exclusive lock.
258 // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
259 // to finish.
260 static const int kLogThreadCount = 10;
261
262 enum {
269 };
270
271 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
272 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
273 void SkSharedMutex::acquire() {
274 // Increment the count of exclusive queue waiters.
275 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
276 std::memory_order_acquire);
277
278 // If there are no other exclusive waiters and no shared threads are running then run
279 // else wait.
280 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
281 fExclusiveQueue.wait();
282 }
284 }
285
286 void SkSharedMutex::release() {
288
289 int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
290 int32_t waitingShared;
291 int32_t newQueueCounts;
292 do {
293 newQueueCounts = oldQueueCounts;
294
295 // Decrement exclusive waiters.
296 newQueueCounts -= 1 << kWaitingExlusiveOffset;
297
298 // The number of threads waiting to acquire a shared lock.
299 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
300
301 // If there are any move the counts of all the shared waiters to actual shared. They are
302 // going to run next.
303 if (waitingShared > 0) {
304
305 // Set waiting shared to zero.
306 newQueueCounts &= ~kWaitingSharedMask;
307
308 // Because this is the exclusive release, then there are zero readers. So, the bits
309 // for shared locks should be zero. Since those bits are zero, we can just |= in the
310 // waitingShared count instead of clearing with an &= and then |= the count.
311 newQueueCounts |= waitingShared << kSharedOffset;
312 }
313
314 } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
315 std::memory_order_release,
316 std::memory_order_relaxed));
317
318 if (waitingShared > 0) {
319 // Run all the shared.
320 fSharedQueue.signal(waitingShared);
321 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
322 // Run a single exclusive waiter.
323 fExclusiveQueue.signal();
324 }
325 }
326
327 void SkSharedMutex::acquireShared() {
328 int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
329 int32_t newQueueCounts;
330 do {
331 newQueueCounts = oldQueueCounts;
332 // If there are waiting exclusives then this shared lock waits else it runs.
333 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
334 newQueueCounts += 1 << kWaitingSharedOffset;
335 } else {
336 newQueueCounts += 1 << kSharedOffset;
337 }
338 } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
339 std::memory_order_acquire,
340 std::memory_order_relaxed));
341
342 // If there are waiting exclusives, then this shared waits until after it runs.
343 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
344 fSharedQueue.wait();
345 }
347
348 }
349
350 void SkSharedMutex::releaseShared() {
352
353 // Decrement the shared count.
354 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
355 std::memory_order_release);
356
357 // If shared count is going to zero (because the old count == 1) and there are exclusive
358 // waiters, then run a single exclusive waiter.
359 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
360 && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
361 fExclusiveQueue.signal();
362 }
363 }
364
365#endif
int count
#define SkDEBUGFAILF(fmt,...)
Definition SkAssert.h:119
#define SkASSERT(cond)
Definition SkAssert.h:116
#define SkASSERTF(cond, fmt,...)
Definition SkAssert.h:117
void swap(sk_sp< T > &a, sk_sp< T > &b)
Definition SkRefCnt.h:341
static const int kLogThreadCount
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
#define ANNOTATE_RWLOCK_DESTROY(lock)
#define ANNOTATE_RWLOCK_CREATE(lock)
@ kWaitingSharedOffset
@ kWaitingExlusiveOffset
@ kSharedOffset
@ kWaitingExclusiveMask
@ kWaitingSharedMask
@ kSharedMask
SkThreadID SkGetThreadID()
int64_t SkThreadID
Definition SkThreadID.h:16
int find(T *array, int N, T item)