Qualify the source argument of atomic loads as a const pointer.
Also normalizes the opening brace placment in a few locations.
Change-Id: I8f518e933094337d5d3371321326ffc03b3a5f5a
diff --git a/include/cutils/atomic-arm.h b/include/cutils/atomic-arm.h
index 0dd629d..93633c4 100644
--- a/include/cutils/atomic-arm.h
+++ b/include/cutils/atomic-arm.h
@@ -49,14 +49,14 @@
}
#endif
-extern inline int32_t android_atomic_acquire_load(volatile int32_t *ptr)
+extern inline int32_t android_atomic_acquire_load(volatile const int32_t *ptr)
{
int32_t value = *ptr;
android_memory_barrier();
return value;
}
-extern inline int32_t android_atomic_release_load(volatile int32_t *ptr)
+extern inline int32_t android_atomic_release_load(volatile const int32_t *ptr)
{
android_memory_barrier();
return *ptr;
@@ -196,11 +196,13 @@
}
#endif
-extern inline int32_t android_atomic_inc(volatile int32_t *addr) {
+extern inline int32_t android_atomic_inc(volatile int32_t *addr)
+{
return android_atomic_add(1, addr);
}
-extern inline int32_t android_atomic_dec(volatile int32_t *addr) {
+extern inline int32_t android_atomic_dec(volatile int32_t *addr)
+{
return android_atomic_add(-1, addr);
}
diff --git a/include/cutils/atomic-x86.h b/include/cutils/atomic-x86.h
index 06b643f..12a1985 100644
--- a/include/cutils/atomic-x86.h
+++ b/include/cutils/atomic-x86.h
@@ -36,25 +36,29 @@
}
#endif
-extern inline int32_t android_atomic_acquire_load(volatile int32_t *ptr) {
+extern inline int32_t android_atomic_acquire_load(volatile const int32_t *ptr)
+{
int32_t value = *ptr;
android_compiler_barrier();
return value;
}
-extern inline int32_t android_atomic_release_load(volatile int32_t *ptr) {
+extern inline int32_t android_atomic_release_load(volatile const int32_t *ptr)
+{
android_memory_barrier();
return *ptr;
}
extern inline void android_atomic_acquire_store(int32_t value,
- volatile int32_t *ptr) {
+ volatile int32_t *ptr)
+{
*ptr = value;
android_memory_barrier();
}
extern inline void android_atomic_release_store(int32_t value,
- volatile int32_t *ptr) {
+ volatile int32_t *ptr)
+{
android_compiler_barrier();
*ptr = value;
}
@@ -107,11 +111,13 @@
return increment;
}
-extern inline int32_t android_atomic_inc(volatile int32_t *addr) {
+extern inline int32_t android_atomic_inc(volatile int32_t *addr)
+{
return android_atomic_add(1, addr);
}
-extern inline int32_t android_atomic_dec(volatile int32_t *addr) {
+extern inline int32_t android_atomic_dec(volatile int32_t *addr)
+{
return android_atomic_add(-1, addr);
}
diff --git a/include/cutils/atomic.h b/include/cutils/atomic.h
index 3866848..a50bf0f 100644
--- a/include/cutils/atomic.h
+++ b/include/cutils/atomic.h
@@ -77,8 +77,8 @@
* This is only necessary if you need the memory barrier. A 32-bit read
* from a 32-bit aligned address is atomic on all supported platforms.
*/
-int32_t android_atomic_acquire_load(volatile int32_t* addr);
-int32_t android_atomic_release_load(volatile int32_t* addr);
+int32_t android_atomic_acquire_load(volatile const int32_t* addr);
+int32_t android_atomic_release_load(volatile const int32_t* addr);
/*
* Perform an atomic store with "acquire" or "release" ordering.
diff --git a/libcutils/atomic-android-sh.c b/libcutils/atomic-android-sh.c
index abe7d25..f8f1f57 100644
--- a/libcutils/atomic-android-sh.c
+++ b/libcutils/atomic-android-sh.c
@@ -49,12 +49,12 @@
&_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
-int32_t android_atomic_acquire_load(volatile int32_t* addr)
+int32_t android_atomic_acquire_load(volatile const int32_t* addr)
{
return *addr;
}
-int32_t android_atomic_release_load(volatile int32_t* addr)
+int32_t android_atomic_release_load(volatile const int32_t* addr)
{
return *addr;
}