16#if !defined(__has_feature)
17 #define __has_feature(x) 0
20#if __has_feature(thread_sanitizer)
23 #define ANNOTATE_RWLOCK_CREATE(lock) \
24 AnnotateRWLockCreate(__FILE__, __LINE__, lock)
27 #define ANNOTATE_RWLOCK_DESTROY(lock) \
28 AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
32 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
33 AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
36 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
37 AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
39 #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
41 #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
45 #error weak annotations are not supported for your compiler
48 #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
52 void AnnotateRWLockCreate(
54 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
55 void AnnotateRWLockDestroy(
57 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
58 void AnnotateRWLockAcquired(
60 const volatile void *lock,
long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
61 void AnnotateRWLockReleased(
63 const volatile void *lock,
long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
68 #define ANNOTATE_RWLOCK_CREATE(lock)
69 #define ANNOTATE_RWLOCK_DESTROY(lock)
70 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
71 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
80 class SkSharedMutex::ThreadIDSet {
84 for (
auto& t : fThreadIDs) {
85 if (t == threadID)
return true;
92 for (
auto& t : fThreadIDs) {
93 if (t == threadID)
return false;
95 fThreadIDs.append(1, &threadID);
100 for (
int i = 0;
i < fThreadIDs.size(); ++
i) {
101 if (fThreadIDs[
i] == threadID) {
102 fThreadIDs.remove(
i);
109 void swap(ThreadIDSet& other) {
110 fThreadIDs.swap(other.fThreadIDs);
114 return fThreadIDs.size();
121 SkSharedMutex::SkSharedMutex()
122 : fCurrentShared(new ThreadIDSet)
123 , fWaitingExclusive(new ThreadIDSet)
124 , fWaitingShared(new ThreadIDSet){
130 void SkSharedMutex::acquire() {
132 int currentSharedCount;
133 int waitingExclusiveCount;
137 SkASSERTF(!fCurrentShared->find(threadID),
138 "Thread %" PRIx64
" already has an shared lock\n", (uint64_t)threadID);
140 if (!fWaitingExclusive->tryAdd(threadID)) {
141 SkDEBUGFAILF(
"Thread %" PRIx64
" already has an exclusive lock\n",
145 currentSharedCount = fCurrentShared->count();
146 waitingExclusiveCount = fWaitingExclusive->count();
149 if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
150 fExclusiveQueue.wait();
159 void SkSharedMutex::release() {
162 int sharedWaitingCount;
163 int exclusiveWaitingCount;
164 int sharedQueueSelect;
167 SkASSERT(0 == fCurrentShared->count());
168 if (!fWaitingExclusive->tryRemove(threadID)) {
169 SkDEBUGFAILF(
"Thread %" PRIx64
" did not have the lock held.\n",
172 exclusiveWaitingCount = fWaitingExclusive->count();
173 sharedWaitingCount = fWaitingShared->count();
174 fWaitingShared.swap(fCurrentShared);
175 sharedQueueSelect = fSharedQueueSelect;
176 if (sharedWaitingCount > 0) {
177 fSharedQueueSelect = 1 - fSharedQueueSelect;
181 if (sharedWaitingCount > 0) {
182 fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
183 }
else if (exclusiveWaitingCount > 0) {
184 fExclusiveQueue.signal();
188 void SkSharedMutex::assertHeld()
const {
191 SkASSERT(0 == fCurrentShared->count());
192 SkASSERT(fWaitingExclusive->find(threadID));
195 void SkSharedMutex::acquireShared() {
197 int exclusiveWaitingCount;
198 int sharedQueueSelect;
201 exclusiveWaitingCount = fWaitingExclusive->count();
202 if (exclusiveWaitingCount > 0) {
203 if (!fWaitingShared->tryAdd(threadID)) {
204 SkDEBUGFAILF(
"Thread %" PRIx64
" was already waiting!\n", (uint64_t)threadID);
207 if (!fCurrentShared->tryAdd(threadID)) {
208 SkDEBUGFAILF(
"Thread %" PRIx64
" already holds a shared lock!\n",
212 sharedQueueSelect = fSharedQueueSelect;
215 if (exclusiveWaitingCount > 0) {
216 fSharedQueue[sharedQueueSelect].wait();
222 void SkSharedMutex::releaseShared() {
226 int currentSharedCount;
227 int waitingExclusiveCount;
230 if (!fCurrentShared->tryRemove(threadID)) {
231 SkDEBUGFAILF(
"Thread %" PRIx64
" does not hold a shared lock.\n",
234 currentSharedCount = fCurrentShared->count();
235 waitingExclusiveCount = fWaitingExclusive->count();
238 if (0 == currentSharedCount && waitingExclusiveCount > 0) {
239 fExclusiveQueue.signal();
243 void SkSharedMutex::assertHeldShared()
const {
246 SkASSERT(fCurrentShared->find(threadID));
273 void SkSharedMutex::acquire() {
276 std::memory_order_acquire);
281 fExclusiveQueue.wait();
286 void SkSharedMutex::release() {
289 int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
290 int32_t waitingShared;
291 int32_t newQueueCounts;
293 newQueueCounts = oldQueueCounts;
303 if (waitingShared > 0) {
306 newQueueCounts &= ~kWaitingSharedMask;
314 }
while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
315 std::memory_order_release,
316 std::memory_order_relaxed));
318 if (waitingShared > 0) {
320 fSharedQueue.signal(waitingShared);
323 fExclusiveQueue.signal();
327 void SkSharedMutex::acquireShared() {
328 int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
329 int32_t newQueueCounts;
331 newQueueCounts = oldQueueCounts;
338 }
while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
339 std::memory_order_acquire,
340 std::memory_order_relaxed));
350 void SkSharedMutex::releaseShared() {
354 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 <<
kSharedOffset,
355 std::memory_order_release);
361 fExclusiveQueue.signal();
#define SkDEBUGFAILF(fmt,...)
#define SkASSERTF(cond, fmt,...)
void swap(sk_sp< T > &a, sk_sp< T > &b)
static const int kLogThreadCount
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
#define ANNOTATE_RWLOCK_DESTROY(lock)
#define ANNOTATE_RWLOCK_CREATE(lock)
SkThreadID SkGetThreadID()
int find(T *array, int N, T item)