am 41198eea: Merge "Revert "Rewrite atomic.h to use stdatomic.h.""

* commit '41198eea42b7a6f1d4222abd7a5fec965d11b238':
  Revert "Rewrite atomic.h to use stdatomic.h."
diff --git a/include/cutils/atomic-arm.h b/include/cutils/atomic-arm.h
new file mode 100644
index 0000000..6b031b6
--- /dev/null
+++ b/include/cutils/atomic-arm.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_ARM_H
+#define ANDROID_CUTILS_ATOMIC_ARM_H
+
+#include <stdint.h>
+
+#ifndef ANDROID_ATOMIC_INLINE
+#define ANDROID_ATOMIC_INLINE inline __attribute__((always_inline))
+#endif
+
+extern ANDROID_ATOMIC_INLINE void android_compiler_barrier()
+{
+    __asm__ __volatile__ ("" : : : "memory");
+}
+
+extern ANDROID_ATOMIC_INLINE void android_memory_barrier()
+{
+#if ANDROID_SMP == 0
+    android_compiler_barrier();
+#else
+    __asm__ __volatile__ ("dmb" : : : "memory");
+#endif
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_acquire_load(volatile const int32_t *ptr)
+{
+    int32_t value = *ptr;
+    android_memory_barrier();
+    return value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_release_load(volatile const int32_t *ptr)
+{
+    android_memory_barrier();
+    return *ptr;
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_acquire_store(int32_t value, volatile int32_t *ptr)
+{
+    *ptr = value;
+    android_memory_barrier();
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_release_store(int32_t value, volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    *ptr = value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_cas(int32_t old_value, int32_t new_value,
+                       volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    do {
+        __asm__ __volatile__ ("ldrex %0, [%3]\n"
+                              "mov %1, #0\n"
+                              "teq %0, %4\n"
+#ifdef __thumb2__
+                              "it eq\n"
+#endif
+                              "strexeq %1, %5, [%3]"
+                              : "=&r" (prev), "=&r" (status), "+m"(*ptr)
+                              : "r" (ptr), "Ir" (old_value), "r" (new_value)
+                              : "cc");
+    } while (__builtin_expect(status != 0, 0));
+    return prev != old_value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_acquire_cas(int32_t old_value, int32_t new_value,
+                               volatile int32_t *ptr)
+{
+    int status = android_atomic_cas(old_value, new_value, ptr);
+    android_memory_barrier();
+    return status;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_release_cas(int32_t old_value, int32_t new_value,
+                               volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_add(int32_t increment, volatile int32_t *ptr)
+{
+    int32_t prev, tmp, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ ("ldrex %0, [%4]\n"
+                              "add %1, %0, %5\n"
+                              "strex %2, %1, [%4]"
+                              : "=&r" (prev), "=&r" (tmp),
+                                "=&r" (status), "+m" (*ptr)
+                              : "r" (ptr), "Ir" (increment)
+                              : "cc");
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t android_atomic_inc(volatile int32_t *addr)
+{
+    return android_atomic_add(1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t android_atomic_dec(volatile int32_t *addr)
+{
+    return android_atomic_add(-1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, tmp, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ ("ldrex %0, [%4]\n"
+                              "and %1, %0, %5\n"
+                              "strex %2, %1, [%4]"
+                              : "=&r" (prev), "=&r" (tmp),
+                                "=&r" (status), "+m" (*ptr)
+                              : "r" (ptr), "Ir" (value)
+                              : "cc");
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, tmp, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ ("ldrex %0, [%4]\n"
+                              "orr %1, %0, %5\n"
+                              "strex %2, %1, [%4]"
+                              : "=&r" (prev), "=&r" (tmp),
+                                "=&r" (status), "+m" (*ptr)
+                              : "r" (ptr), "Ir" (value)
+                              : "cc");
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+#endif /* ANDROID_CUTILS_ATOMIC_ARM_H */
diff --git a/include/cutils/atomic-arm64.h b/include/cutils/atomic-arm64.h
new file mode 100644
index 0000000..7ae47d7
--- /dev/null
+++ b/include/cutils/atomic-arm64.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_AARCH64_H
+#define ANDROID_CUTILS_ATOMIC_AARCH64_H
+
+#include <stdint.h>
+
+#ifndef ANDROID_ATOMIC_INLINE
+#define ANDROID_ATOMIC_INLINE inline __attribute__((always_inline))
+#endif
+
+/*
+   TODOAArch64: Revisit the below functions and check for potential
+   optimizations using assembly code or otherwise.
+*/
+
+extern ANDROID_ATOMIC_INLINE
+void android_compiler_barrier(void)
+{
+    __asm__ __volatile__ ("" : : : "memory");
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_memory_barrier(void)
+{
+    __asm__ __volatile__ ("dmb ish" : : : "memory");
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_acquire_load(volatile const int32_t *ptr)
+{
+    int32_t value = *ptr;
+    android_memory_barrier();
+    return value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_release_load(volatile const int32_t *ptr)
+{
+    android_memory_barrier();
+    return *ptr;
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_acquire_store(int32_t value, volatile int32_t *ptr)
+{
+    *ptr = value;
+    android_memory_barrier();
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_release_store(int32_t value, volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    *ptr = value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_cas(int32_t old_value, int32_t new_value,
+                       volatile int32_t *ptr)
+{
+    return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_acquire_cas(int32_t old_value, int32_t new_value,
+                               volatile int32_t *ptr)
+{
+    int status = android_atomic_cas(old_value, new_value, ptr);
+    android_memory_barrier();
+    return status;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_release_cas(int32_t old_value, int32_t new_value,
+                               volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_add(int32_t increment, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        prev = *ptr;
+        status = android_atomic_cas(prev, prev + increment, ptr);
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_inc(volatile int32_t *addr)
+{
+    return android_atomic_add(1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_dec(volatile int32_t *addr)
+{
+    return android_atomic_add(-1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        prev = *ptr;
+        status = android_atomic_cas(prev, prev & value, ptr);
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        prev = *ptr;
+        status = android_atomic_cas(prev, prev | value, ptr);
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+#endif /* ANDROID_CUTILS_ATOMIC_AARCH64_H */
diff --git a/include/cutils/atomic-inline.h b/include/cutils/atomic-inline.h
index 103c5b0..a31e913 100644
--- a/include/cutils/atomic-inline.h
+++ b/include/cutils/atomic-inline.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2010 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,6 +14,59 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_CUTILS_ATOMIC_H
-#include <cutils/atomic.h>
+#ifndef ANDROID_CUTILS_ATOMIC_INLINE_H
+#define ANDROID_CUTILS_ATOMIC_INLINE_H
+
+#ifdef __cplusplus
+extern "C" {
 #endif
+
+/*
+ * Inline declarations and macros for some special-purpose atomic
+ * operations.  These are intended for rare circumstances where a
+ * memory barrier needs to be issued inline rather than as a function
+ * call.
+ *
+ * Most code should not use these.
+ *
+ * Anything that does include this file must set ANDROID_SMP to either
+ * 0 or 1, indicating compilation for UP or SMP, respectively.
+ *
+ * Macros defined in this header:
+ *
+ * void ANDROID_MEMBAR_FULL(void)
+ *   Full memory barrier.  Provides a compiler reordering barrier, and
+ *   on SMP systems emits an appropriate instruction.
+ */
+
+#if !defined(ANDROID_SMP)
+# error "Must define ANDROID_SMP before including atomic-inline.h"
+#endif
+
+#if defined(__aarch64__)
+#include <cutils/atomic-arm64.h>
+#elif defined(__arm__)
+#include <cutils/atomic-arm.h>
+#elif defined(__i386__)
+#include <cutils/atomic-x86.h>
+#elif defined(__x86_64__)
+#include <cutils/atomic-x86_64.h>
+#elif defined(__mips64)
+#include <cutils/atomic-mips64.h>
+#elif defined(__mips__)
+#include <cutils/atomic-mips.h>
+#else
+#error atomic operations are unsupported
+#endif
+
+#if ANDROID_SMP == 0
+#define ANDROID_MEMBAR_FULL android_compiler_barrier
+#else
+#define ANDROID_MEMBAR_FULL android_memory_barrier
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ANDROID_CUTILS_ATOMIC_INLINE_H */
diff --git a/include/cutils/atomic-mips.h b/include/cutils/atomic-mips.h
new file mode 100644
index 0000000..5d4f097
--- /dev/null
+++ b/include/cutils/atomic-mips.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_MIPS_H
+#define ANDROID_CUTILS_ATOMIC_MIPS_H
+
+#include <stdint.h>
+
+#ifndef ANDROID_ATOMIC_INLINE
+#define ANDROID_ATOMIC_INLINE inline __attribute__((always_inline))
+#endif
+
+extern ANDROID_ATOMIC_INLINE void android_compiler_barrier(void)
+{
+    __asm__ __volatile__ ("" : : : "memory");
+}
+
+#if ANDROID_SMP == 0
+extern ANDROID_ATOMIC_INLINE void android_memory_barrier(void)
+{
+    android_compiler_barrier();
+}
+#else
+extern ANDROID_ATOMIC_INLINE void android_memory_barrier(void)
+{
+    __asm__ __volatile__ ("sync" : : : "memory");
+}
+#endif
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_acquire_load(volatile const int32_t *ptr)
+{
+    int32_t value = *ptr;
+    android_memory_barrier();
+    return value;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_release_load(volatile const int32_t *ptr)
+{
+    android_memory_barrier();
+    return *ptr;
+}
+
+extern ANDROID_ATOMIC_INLINE void
+android_atomic_acquire_store(int32_t value, volatile int32_t *ptr)
+{
+    *ptr = value;
+    android_memory_barrier();
+}
+
+extern ANDROID_ATOMIC_INLINE void
+android_atomic_release_store(int32_t value, volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    *ptr = value;
+}
+
+extern ANDROID_ATOMIC_INLINE int
+android_atomic_cas(int32_t old_value, int32_t new_value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    do {
+        __asm__ __volatile__ (
+            "    ll     %[prev], (%[ptr])\n"
+            "    li     %[status], 1\n"
+            "    bne    %[prev], %[old], 9f\n"
+            "    move   %[status], %[new_value]\n"
+            "    sc     %[status], (%[ptr])\n"
+            "9:\n"
+            : [prev] "=&r" (prev), [status] "=&r" (status)
+            : [ptr] "r" (ptr), [old] "r" (old_value), [new_value] "r" (new_value)
+            );
+    } while (__builtin_expect(status == 0, 0));
+    return prev != old_value;
+}
+
+extern ANDROID_ATOMIC_INLINE int
+android_atomic_acquire_cas(int32_t old_value,
+                           int32_t new_value,
+                           volatile int32_t *ptr)
+{
+    int status = android_atomic_cas(old_value, new_value, ptr);
+    android_memory_barrier();
+    return status;
+}
+
+extern ANDROID_ATOMIC_INLINE int
+android_atomic_release_cas(int32_t old_value,
+                           int32_t new_value,
+                           volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_add(int32_t increment, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ (
+        "    ll    %[prev], (%[ptr])\n"
+        "    addu  %[status], %[prev], %[inc]\n"
+        "    sc    %[status], (%[ptr])\n"
+        :  [status] "=&r" (status), [prev] "=&r" (prev)
+        :  [ptr] "r" (ptr), [inc] "Ir" (increment)
+        );
+    } while (__builtin_expect(status == 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_inc(volatile int32_t *addr)
+{
+    return android_atomic_add(1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_dec(volatile int32_t *addr)
+{
+    return android_atomic_add(-1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ (
+        "    ll    %[prev], (%[ptr])\n"
+        "    and   %[status], %[prev], %[value]\n"
+        "    sc    %[status], (%[ptr])\n"
+        : [prev] "=&r" (prev), [status] "=&r" (status)
+        : [ptr] "r" (ptr), [value] "Ir" (value)
+            );
+    } while (__builtin_expect(status == 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ (
+        "    ll    %[prev], (%[ptr])\n"
+        "    or    %[status], %[prev], %[value]\n"
+        "    sc    %[status], (%[ptr])\n"
+        : [prev] "=&r" (prev), [status] "=&r" (status)
+        : [ptr] "r" (ptr), [value] "Ir" (value)
+            );
+    } while (__builtin_expect(status == 0, 0));
+    return prev;
+}
+
+#endif /* ANDROID_CUTILS_ATOMIC_MIPS_H */
diff --git a/include/cutils/atomic-mips64.h b/include/cutils/atomic-mips64.h
new file mode 100644
index 0000000..9d8f65e
--- /dev/null
+++ b/include/cutils/atomic-mips64.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_MIPS64_H
+#define ANDROID_CUTILS_ATOMIC_MIPS64_H
+
+#include <stdint.h>
+
+#ifndef ANDROID_ATOMIC_INLINE
+#define ANDROID_ATOMIC_INLINE inline __attribute__((always_inline))
+#endif
+
+extern ANDROID_ATOMIC_INLINE void android_compiler_barrier(void)
+{
+    __asm__ __volatile__ ("" : : : "memory");
+}
+
+extern ANDROID_ATOMIC_INLINE void android_memory_barrier(void)
+{
+    __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_acquire_load(volatile const int32_t *ptr)
+{
+    int32_t value = *ptr;
+    android_memory_barrier();
+    return value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_release_load(volatile const int32_t *ptr)
+{
+    android_memory_barrier();
+    return *ptr;
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_acquire_store(int32_t value, volatile int32_t *ptr)
+{
+    *ptr = value;
+    android_memory_barrier();
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_release_store(int32_t value, volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    *ptr = value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_cas(int32_t old_value, int32_t new_value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    do {
+        __asm__ __volatile__ (
+            "    ll     %[prev], (%[ptr])\n"
+            "    li     %[status], 1\n"
+            "    bne    %[prev], %[old], 9f\n"
+            "    move   %[status], %[new_value]\n"
+            "    sc     %[status], (%[ptr])\n"
+            "9:\n"
+            : [prev] "=&r" (prev), [status] "=&r" (status)
+            : [ptr] "r" (ptr), [old] "r" (old_value), [new_value] "r" (new_value)
+            );
+    } while (__builtin_expect(status == 0, 0));
+    return prev != old_value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_acquire_cas(int32_t old_value,
+                           int32_t new_value,
+                           volatile int32_t *ptr)
+{
+    int status = android_atomic_cas(old_value, new_value, ptr);
+    android_memory_barrier();
+    return status;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_release_cas(int32_t old_value,
+                           int32_t new_value,
+                           volatile int32_t *ptr)
+{
+    android_memory_barrier();
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_add(int32_t increment, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ (
+        "    ll    %[prev], (%[ptr])\n"
+        "    addu  %[status], %[prev], %[inc]\n"
+        "    sc    %[status], (%[ptr])\n"
+        :  [status] "=&r" (status), [prev] "=&r" (prev)
+        :  [ptr] "r" (ptr), [inc] "Ir" (increment)
+        );
+    } while (__builtin_expect(status == 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_inc(volatile int32_t *addr)
+{
+    return android_atomic_add(1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_dec(volatile int32_t *addr)
+{
+    return android_atomic_add(-1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ (
+        "    ll    %[prev], (%[ptr])\n"
+        "    and   %[status], %[prev], %[value]\n"
+        "    sc    %[status], (%[ptr])\n"
+        : [prev] "=&r" (prev), [status] "=&r" (status)
+        : [ptr] "r" (ptr), [value] "Ir" (value)
+            );
+    } while (__builtin_expect(status == 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    android_memory_barrier();
+    do {
+        __asm__ __volatile__ (
+        "    ll    %[prev], (%[ptr])\n"
+        "    or    %[status], %[prev], %[value]\n"
+        "    sc    %[status], (%[ptr])\n"
+        : [prev] "=&r" (prev), [status] "=&r" (status)
+        : [ptr] "r" (ptr), [value] "Ir" (value)
+            );
+    } while (__builtin_expect(status == 0, 0));
+    return prev;
+}
+
+#endif /* ANDROID_CUTILS_ATOMIC_MIPS_H */
diff --git a/include/cutils/atomic-x86.h b/include/cutils/atomic-x86.h
new file mode 100644
index 0000000..06bf1a3
--- /dev/null
+++ b/include/cutils/atomic-x86.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_X86_H
+#define ANDROID_CUTILS_ATOMIC_X86_H
+
+#include <stdint.h>
+
+#ifndef ANDROID_ATOMIC_INLINE
+#define ANDROID_ATOMIC_INLINE inline __attribute__((always_inline))
+#endif
+
+extern ANDROID_ATOMIC_INLINE void android_compiler_barrier(void)
+{
+    __asm__ __volatile__ ("" : : : "memory");
+}
+
+#if ANDROID_SMP == 0
+extern ANDROID_ATOMIC_INLINE void android_memory_barrier(void)
+{
+    android_compiler_barrier();
+}
+#else
+extern ANDROID_ATOMIC_INLINE void android_memory_barrier(void)
+{
+    __asm__ __volatile__ ("mfence" : : : "memory");
+}
+#endif
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_acquire_load(volatile const int32_t *ptr)
+{
+    int32_t value = *ptr;
+    android_compiler_barrier();
+    return value;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_release_load(volatile const int32_t *ptr)
+{
+    android_memory_barrier();
+    return *ptr;
+}
+
+extern ANDROID_ATOMIC_INLINE void
+android_atomic_acquire_store(int32_t value, volatile int32_t *ptr)
+{
+    *ptr = value;
+    android_memory_barrier();
+}
+
+extern ANDROID_ATOMIC_INLINE void
+android_atomic_release_store(int32_t value, volatile int32_t *ptr)
+{
+    android_compiler_barrier();
+    *ptr = value;
+}
+
+extern ANDROID_ATOMIC_INLINE int
+android_atomic_cas(int32_t old_value, int32_t new_value, volatile int32_t *ptr)
+{
+    int32_t prev;
+    __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
+                          : "=a" (prev)
+                          : "q" (new_value), "m" (*ptr), "0" (old_value)
+                          : "memory");
+    return prev != old_value;
+}
+
+extern ANDROID_ATOMIC_INLINE int
+android_atomic_acquire_cas(int32_t old_value,
+                           int32_t new_value,
+                           volatile int32_t *ptr)
+{
+    /* Loads are not reordered with other loads. */
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern ANDROID_ATOMIC_INLINE int
+android_atomic_release_cas(int32_t old_value,
+                           int32_t new_value,
+                           volatile int32_t *ptr)
+{
+    /* Stores are not reordered with other stores. */
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_add(int32_t increment, volatile int32_t *ptr)
+{
+    __asm__ __volatile__ ("lock; xaddl %0, %1"
+                          : "+r" (increment), "+m" (*ptr)
+                          : : "memory");
+    /* increment now holds the old value of *ptr */
+    return increment;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_inc(volatile int32_t *addr)
+{
+    return android_atomic_add(1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_dec(volatile int32_t *addr)
+{
+    return android_atomic_add(-1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        status = android_atomic_cas(prev, prev & value, ptr);
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE int32_t
+android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        status = android_atomic_cas(prev, prev | value, ptr);
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+#endif /* ANDROID_CUTILS_ATOMIC_X86_H */
diff --git a/include/cutils/atomic-x86_64.h b/include/cutils/atomic-x86_64.h
new file mode 100644
index 0000000..99cb070
--- /dev/null
+++ b/include/cutils/atomic-x86_64.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_X86_64_H
+#define ANDROID_CUTILS_ATOMIC_X86_64_H
+
+#include <stdint.h>
+
+#ifndef ANDROID_ATOMIC_INLINE
+#define ANDROID_ATOMIC_INLINE inline __attribute__((always_inline))
+#endif
+
+extern ANDROID_ATOMIC_INLINE
+void android_compiler_barrier(void)
+{
+    __asm__ __volatile__ ("" : : : "memory");
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_memory_barrier(void)
+{
+    __asm__ __volatile__ ("mfence" : : : "memory");
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_acquire_load(volatile const int32_t *ptr)
+{
+    int32_t value = *ptr;
+    android_compiler_barrier();
+    return value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_release_load(volatile const int32_t *ptr)
+{
+    android_memory_barrier();
+    return *ptr;
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_acquire_store(int32_t value, volatile int32_t *ptr)
+{
+    *ptr = value;
+    android_memory_barrier();
+}
+
+extern ANDROID_ATOMIC_INLINE
+void android_atomic_release_store(int32_t value, volatile int32_t *ptr)
+{
+    android_compiler_barrier();
+    *ptr = value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_cas(int32_t old_value, int32_t new_value,
+                       volatile int32_t *ptr)
+{
+    int32_t prev;
+    __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
+                          : "=a" (prev)
+                          : "q" (new_value), "m" (*ptr), "0" (old_value)
+                          : "memory");
+    return prev != old_value;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_acquire_cas(int32_t old_value, int32_t new_value,
+                               volatile int32_t *ptr)
+{
+    /* Loads are not reordered with other loads. */
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int android_atomic_release_cas(int32_t old_value, int32_t new_value,
+                               volatile int32_t *ptr)
+{
+    /* Stores are not reordered with other stores. */
+    return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_add(int32_t increment, volatile int32_t *ptr)
+{
+    __asm__ __volatile__ ("lock; xaddl %0, %1"
+                          : "+r" (increment), "+m" (*ptr)
+                          : : "memory");
+    /* increment now holds the old value of *ptr */
+    return increment;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_inc(volatile int32_t *addr)
+{
+    return android_atomic_add(1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_dec(volatile int32_t *addr)
+{
+    return android_atomic_add(-1, addr);
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        status = android_atomic_cas(prev, prev & value, ptr);
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+extern ANDROID_ATOMIC_INLINE
+int32_t android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+    int32_t prev, status;
+    do {
+        prev = *ptr;
+        status = android_atomic_cas(prev, prev | value, ptr);
+    } while (__builtin_expect(status != 0, 0));
+    return prev;
+}
+
+#endif /* ANDROID_CUTILS_ATOMIC_X86_64_H */
diff --git a/include/cutils/atomic.h b/include/cutils/atomic.h
index e199130..79409a7 100644
--- a/include/cutils/atomic.h
+++ b/include/cutils/atomic.h
@@ -19,10 +19,9 @@
 
 #include <stdint.h>
 #include <sys/types.h>
-#include <stdatomic.h>
 
-#ifndef ANDROID_ATOMIC_INLINE
-#define ANDROID_ATOMIC_INLINE static inline
+#ifdef __cplusplus
+extern "C" {
 #endif
 
 /*
@@ -71,14 +70,6 @@
  * If they are not, atomicity is not guaranteed.
  */
 
-#if ANDROID_SMP == 0
-# define ANDROID_ATOMIC_ACQUIRE memory_order_relaxed
-# define ANDROID_ATOMIC_RELEASE memory_order_relaxed
-#else
-# define ANDROID_ATOMIC_ACQUIRE memory_order_acquire
-# define ANDROID_ATOMIC_RELEASE memory_order_release
-#endif
-
 /*
  * Basic arithmetic and bitwise operations.  These all provide a
  * barrier with "release" ordering, and return the previous value.
@@ -86,41 +77,11 @@
  * These have the same characteristics (e.g. what happens on overflow)
  * as the equivalent non-atomic C operations.
  */
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_inc(volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-        /* Int32_t, if it exists, is the same as int_least32_t. */
-    return atomic_fetch_add_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_dec(volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return atomic_fetch_sub_explicit(a, 1, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_add(int32_t value, volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return atomic_fetch_add_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_and(int32_t value, volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return atomic_fetch_and_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_or(int32_t value, volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return atomic_fetch_or_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
+int32_t android_atomic_inc(volatile int32_t* addr);
+int32_t android_atomic_dec(volatile int32_t* addr);
+int32_t android_atomic_add(int32_t value, volatile int32_t* addr);
+int32_t android_atomic_and(int32_t value, volatile int32_t* addr);
+int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
 
 /*
  * Perform an atomic load with "acquire" or "release" ordering.
@@ -135,53 +96,29 @@
  * this comment, you are in the vast majority, and should not be
  * using release loads or replacing them with anything other than
  * locks or default sequentially consistent atomics.
+ *
+ * This is only necessary if you need the memory barrier.  A 32-bit read
+ * from a 32-bit aligned address is atomic on all supported platforms.
  */
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_acquire_load(volatile const int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return atomic_load_explicit(a, ANDROID_ATOMIC_ACQUIRE);
-}
-
-ANDROID_ATOMIC_INLINE
-int32_t android_atomic_release_load(volatile const int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    atomic_thread_fence(memory_order_seq_cst);
-    /* Any reasonable clients of this interface would probably prefer   */
-    /* something weaker.  But some remaining clients seem to be         */
-    /* abusing this API in strange ways, e.g. by using it as a fence.   */
-    /* Thus we are conservative until we can get rid of remaining       */
-    /* clients (and this function).                                     */
-    return atomic_load_explicit(a, memory_order_relaxed);
-}
+int32_t android_atomic_acquire_load(volatile const int32_t* addr);
+int32_t android_atomic_release_load(volatile const int32_t* addr);
 
 /*
  * Perform an atomic store with "acquire" or "release" ordering.
  *
- * Note that the notion of an "acquire" ordering for a store does not
+ * Note that the notion of a "acquire" ordering for a store does not
  * really fit into the C11 or C++11 memory model.  The extra ordering
  * is normally observable only by code using memory_order_relaxed
  * atomics, or data races.  In the rare cases in which such ordering
  * is called for, use memory_order_relaxed atomics and a trailing
  * atomic_thread_fence (typically with memory_order_release,
  * not memory_order_acquire!) instead.
+ *
+ * This is only necessary if you need the memory barrier.  A 32-bit write
+ * to a 32-bit aligned address is atomic on all supported platforms.
  */
-ANDROID_ATOMIC_INLINE
-void android_atomic_acquire_store(int32_t value, volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    atomic_store_explicit(a, value, memory_order_relaxed);
-    atomic_thread_fence(memory_order_seq_cst);
-    /* Again overly conservative to accomodate weird clients.   */
-}
-
-ANDROID_ATOMIC_INLINE
-void android_atomic_release_store(int32_t value, volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    atomic_store_explicit(a, value, ANDROID_ATOMIC_RELEASE);
-}
+void android_atomic_acquire_store(int32_t value, volatile int32_t* addr);
+void android_atomic_release_store(int32_t value, volatile int32_t* addr);
 
 /*
  * Compare-and-set operation with "acquire" or "release" ordering.
@@ -195,48 +132,10 @@
  * Implementations that use the release CAS in a loop may be less efficient
  * than possible, because we re-issue the memory barrier on each iteration.
  */
-ANDROID_ATOMIC_INLINE
 int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
-                           volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return (int)(!atomic_compare_exchange_strong_explicit(
-                                          a, &oldvalue, newvalue,
-                                          ANDROID_ATOMIC_ACQUIRE,
-                                          ANDROID_ATOMIC_ACQUIRE));
-}
-
-ANDROID_ATOMIC_INLINE
+        volatile int32_t* addr);
 int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
-                               volatile int32_t* addr)
-{
-    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
-    return (int)(!atomic_compare_exchange_strong_explicit(
-                                          a, &oldvalue, newvalue,
-                                          ANDROID_ATOMIC_RELEASE,
-                                          memory_order_relaxed));
-}
-
-/*
- * Fence primitives.
- */
-ANDROID_ATOMIC_INLINE
-void android_compiler_barrier(void)
-{
-    __asm__ __volatile__ ("" : : : "memory");
-    /* Could probably also be:                          */
-    /* atomic_signal_fence(memory_order_seq_cst);       */
-}
-
-ANDROID_ATOMIC_INLINE
-void android_memory_barrier(void)
-{
-#if ANDROID_SMP == 0
-    android_compiler_barrier();
-#else
-    atomic_thread_fence(memory_order_seq_cst);
-#endif
-}
+        volatile int32_t* addr);
 
 /*
  * Aliases for code using an older version of this header.  These are now
@@ -246,4 +145,8 @@
 #define android_atomic_write android_atomic_release_store
 #define android_atomic_cmpxchg android_atomic_release_cas
 
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
 #endif // ANDROID_CUTILS_ATOMIC_H
diff --git a/libcutils/atomic.c b/libcutils/atomic.c
index d34aa00..1484ef8 100644
--- a/libcutils/atomic.c
+++ b/libcutils/atomic.c
@@ -14,13 +14,6 @@
  * limitations under the License.
  */
 
-/*
- * Generate non-inlined versions of android_atomic functions.
- * Nobody should be using these, but some binary blobs currently (late 2014)
- * are.
- * If you read this in 2015 or later, please try to delete this file.
- */
-
 #define ANDROID_ATOMIC_INLINE
 
-#include <cutils/atomic.h>
+#include <cutils/atomic-inline.h>