blob: 7df778cb5e57722027d38750b0240e2a67738e86 [file] [log] [blame]
epoger@google.comec3ed6a2011-07-28 14:26:00 +00001
reed@android.com8a1c16f2008-12-17 15:59:43 +00002/*
epoger@google.comec3ed6a2011-07-28 14:26:00 +00003 * Copyright 2006 The Android Open Source Project
reed@android.com8a1c16f2008-12-17 15:59:43 +00004 *
epoger@google.comec3ed6a2011-07-28 14:26:00 +00005 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
reed@android.com8a1c16f2008-12-17 15:59:43 +00007 */
8
epoger@google.comec3ed6a2011-07-28 14:26:00 +00009
reed@android.com8a1c16f2008-12-17 15:59:43 +000010#ifndef SkThread_platform_DEFINED
11#define SkThread_platform_DEFINED
12
digit@google.comf66436c2012-01-11 17:44:41 +000013#if defined(SK_BUILD_FOR_ANDROID)
reed@android.com8a1c16f2008-12-17 15:59:43 +000014
commit-bot@chromium.org059a4c72013-03-13 12:48:26 +000015#if !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
digit@google.comf66436c2012-01-11 17:44:41 +000016
17#include <stdint.h>
18
19/* Just use the GCC atomic intrinsics. They're supported by the NDK toolchain,
20 * have reasonable performance, and provide full memory barriers
21 */
bungeman@google.com52695662012-05-16 20:37:39 +000022static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t *addr) {
digit@google.comf66436c2012-01-11 17:44:41 +000023 return __sync_fetch_and_add(addr, 1);
24}
25
scroggo@google.com50ccb0a2012-07-16 16:51:28 +000026static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t *addr, int32_t inc) {
27 return __sync_fetch_and_add(addr, inc);
28}
29
bungeman@google.com52695662012-05-16 20:37:39 +000030static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr) {
digit@google.comf66436c2012-01-11 17:44:41 +000031 return __sync_fetch_and_add(addr, -1);
32}
bungeman@google.com52695662012-05-16 20:37:39 +000033static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_dec() { }
bungeman@google.coma02bc152012-05-16 18:21:56 +000034
bungeman@google.com52695662012-05-16 20:37:39 +000035static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
bungeman@google.coma02bc152012-05-16 18:21:56 +000036 int32_t value = *addr;
37
38 while (true) {
39 if (value == 0) {
40 return 0;
41 }
42
43 int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
44
45 if (before == value) {
46 return value;
47 } else {
48 value = before;
49 }
50 }
51}
bungeman@google.com52695662012-05-16 20:37:39 +000052static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_conditional_inc() { }
digit@google.comf66436c2012-01-11 17:44:41 +000053
commit-bot@chromium.org059a4c72013-03-13 12:48:26 +000054#else // SK_BUILD_FOR_ANDROID_FRAMEWORK
digit@google.comf66436c2012-01-11 17:44:41 +000055
56/* The platform atomics operations are slightly more efficient than the
57 * GCC built-ins, so use them.
58 */
reed@android.com8a1c16f2008-12-17 15:59:43 +000059#include <utils/Atomic.h>
60
scroggo@google.com50ccb0a2012-07-16 16:51:28 +000061#define sk_atomic_inc(addr) android_atomic_inc(addr)
62#define sk_atomic_add(addr, inc) android_atomic_add(inc, addr)
63#define sk_atomic_dec(addr) android_atomic_dec(addr)
djsollen@google.com4bd2bdb2013-03-08 18:35:13 +000064
65static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_dec() {
bungeman@google.coma02bc152012-05-16 18:21:56 +000066 //HACK: Android is actually using full memory barriers.
67 // Should this change, uncomment below.
68 //int dummy;
69 //android_atomic_aquire_store(0, &dummy);
70}
djsollen@google.com4bd2bdb2013-03-08 18:35:13 +000071static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
bungeman@google.coma02bc152012-05-16 18:21:56 +000072 while (true) {
73 int32_t value = *addr;
74 if (value == 0) {
75 return 0;
76 }
77 if (0 == android_atomic_release_cas(value, value + 1, addr)) {
78 return value;
79 }
80 }
81}
djsollen@google.com4bd2bdb2013-03-08 18:35:13 +000082static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_conditional_inc() {
bungeman@google.coma02bc152012-05-16 18:21:56 +000083 //HACK: Android is actually using full memory barriers.
84 // Should this change, uncomment below.
85 //int dummy;
86 //android_atomic_aquire_store(0, &dummy);
87}
reed@android.com8a1c16f2008-12-17 15:59:43 +000088
commit-bot@chromium.org059a4c72013-03-13 12:48:26 +000089#endif // SK_BUILD_FOR_ANDROID_FRAMEWORK
digit@google.comf66436c2012-01-11 17:44:41 +000090
91#else // !SK_BUILD_FOR_ANDROID
92
bungeman@google.coma02bc152012-05-16 18:21:56 +000093/** Implemented by the porting layer, this function adds one to the int
94 specified by the address (in a thread-safe manner), and returns the
95 previous value.
96 No additional memory barrier is required.
97 This must act as a compiler barrier.
digit@google.comf66436c2012-01-11 17:44:41 +000098*/
99SK_API int32_t sk_atomic_inc(int32_t* addr);
bungeman@google.coma02bc152012-05-16 18:21:56 +0000100
scroggo@google.com50ccb0a2012-07-16 16:51:28 +0000101/** Implemented by the porting layer, this function adds inc to the int
102 specified by the address (in a thread-safe manner), and returns the
103 previous value.
104 No additional memory barrier is required.
105 This must act as a compiler barrier.
106 */
107SK_API int32_t sk_atomic_add(int32_t* addr, int32_t inc);
108
bungeman@google.coma02bc152012-05-16 18:21:56 +0000109/** Implemented by the porting layer, this function subtracts one from the int
110 specified by the address (in a thread-safe manner), and returns the
111 previous value.
112 Expected to act as a release (SL/S) memory barrier and a compiler barrier.
digit@google.comf66436c2012-01-11 17:44:41 +0000113*/
114SK_API int32_t sk_atomic_dec(int32_t* addr);
bungeman@google.coma02bc152012-05-16 18:21:56 +0000115/** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected
116 to act as an aquire (L/SL) memory barrier and as a compiler barrier.
117*/
118SK_API void sk_membar_aquire__after_atomic_dec();
119
120/** Implemented by the porting layer, this function adds one to the int
121 specified by the address iff the int specified by the address is not zero
122 (in a thread-safe manner), and returns the previous value.
123 No additional memory barrier is required.
124 This must act as a compiler barrier.
125*/
126SK_API int32_t sk_atomic_conditional_inc(int32_t*);
127/** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this
128 is expected to act as an aquire (L/SL) memory barrier and as a compiler
129 barrier.
130*/
131SK_API void sk_membar_aquire__after_atomic_conditional_inc();
digit@google.comf66436c2012-01-11 17:44:41 +0000132
133#endif // !SK_BUILD_FOR_ANDROID
134
digit@google.com1771cbf2012-01-26 21:26:40 +0000135#ifdef SK_USE_POSIX_THREADS
136
137#include <pthread.h>
138
139// A SkBaseMutex is a POD structure that can be directly initialized
140// at declaration time with SK_DECLARE_STATIC/GLOBAL_MUTEX. This avoids the
141// generation of a static initializer in the final machine code (and
142// a corresponding static finalizer).
143//
144struct SkBaseMutex {
145 void acquire() { pthread_mutex_lock(&fMutex); }
146 void release() { pthread_mutex_unlock(&fMutex); }
147 pthread_mutex_t fMutex;
148};
149
150// Using POD-style initialization prevents the generation of a static initializer
151// and keeps the acquire() implementation small and fast.
152#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name = { PTHREAD_MUTEX_INITIALIZER }
153
154// Special case used when the static mutex must be available globally.
155#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name = { PTHREAD_MUTEX_INITIALIZER }
156
157// A normal mutex that requires to be initialized through normal C++ construction,
158// i.e. when it's a member of another class, or allocated on the heap.
reed@google.com8c0bc9b2013-01-29 13:03:36 +0000159class SK_API SkMutex : public SkBaseMutex, SkNoncopyable {
digit@google.com1771cbf2012-01-26 21:26:40 +0000160public:
161 SkMutex();
162 ~SkMutex();
163};
164
165#else // !SK_USE_POSIX_THREADS
166
167// In the generic case, SkBaseMutex and SkMutex are the same thing, and we
168// can't easily get rid of static initializers.
169//
reed@google.com8c0bc9b2013-01-29 13:03:36 +0000170class SK_API SkMutex : SkNoncopyable {
reed@android.com8a1c16f2008-12-17 15:59:43 +0000171public:
reed@google.comdcbd6e32012-01-12 15:21:16 +0000172 SkMutex();
reed@android.com8a1c16f2008-12-17 15:59:43 +0000173 ~SkMutex();
174
175 void acquire();
176 void release();
177
178private:
179 bool fIsGlobal;
180 enum {
reed@android.come89d3ec2010-05-18 21:23:30 +0000181 kStorageIntCount = 64
reed@android.com8a1c16f2008-12-17 15:59:43 +0000182 };
183 uint32_t fStorage[kStorageIntCount];
184};
185
digit@google.com1771cbf2012-01-26 21:26:40 +0000186typedef SkMutex SkBaseMutex;
187
reed@google.comff0da4f2012-05-17 13:14:52 +0000188#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name
189#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name
digit@google.com1771cbf2012-01-26 21:26:40 +0000190
191#endif // !SK_USE_POSIX_THREADS
192
reed@android.com8a1c16f2008-12-17 15:59:43 +0000193
194#endif