Merge "ART: Move oat_data_flow_attributes_ to private and put an API"
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index b030bb4..0596d4f 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1018,8 +1018,8 @@
       vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment);
     }
   } else {
-    DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
-    DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
+    DCHECK_EQ(POPCOUNT(core_spill_mask_), 0);
+    DCHECK_EQ(POPCOUNT(fp_spill_mask_), 0);
     DCHECK_EQ(core_vmap_table_.size(), 0u);
     DCHECK_EQ(fp_vmap_table_.size(), 0u);
     vmap_encoder.PushBackUnsigned(0u);  // Size is 0.
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index effc38e..5c839dd 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1107,7 +1107,7 @@
   // The offset is off by 8 due to the way the ARM CPUs read PC.
   offset -= 8;
   CHECK_ALIGNED(offset, 4);
-  CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
+  CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
 
   // Properly preserve only the bits supported in the instruction.
   offset >>= 2;
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 45d3a97..9001f8a 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -123,7 +123,7 @@
 
 int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
   CHECK_ALIGNED(offset, 4);
-  CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
+  CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
 
   // Properly preserve only the bits supported in the instruction.
   offset >>= 2;
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 102e126..0e1b25e 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -44,8 +44,8 @@
   mirror::ArtMethod* method = fr.GetMethod();
   uint32_t core_spills = method->GetCoreSpillMask();
   uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = __builtin_popcount(core_spills);
-  size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+  size_t spill_count = POPCOUNT(core_spills);
+  size_t fp_spill_count = POPCOUNT(fp_core_spills);
   size_t frame_size = method->GetFrameSizeInBytes();
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 3bbec71..eddaa0b 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -233,9 +233,9 @@
 
   mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(sc->arm_r0);
   uint32_t spill_mask = method->GetCoreSpillMask();
-  uint32_t numcores = __builtin_popcount(spill_mask);
+  uint32_t numcores = POPCOUNT(spill_mask);
   uint32_t fp_spill_mask = method->GetFpSpillMask();
-  uint32_t numfps = __builtin_popcount(fp_spill_mask);
+  uint32_t numfps = POPCOUNT(fp_spill_mask);
   uint32_t spill_size = (numcores + numfps) * 4;
   LOG(DEBUG) << "spill size: " << spill_size;
   uint8_t* prevframe = prevsp + spill_size;
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index c96ff60..0890fa9 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -47,8 +47,8 @@
   mirror::ArtMethod* method = fr.GetMethod();
   uint32_t core_spills = method->GetCoreSpillMask();
   uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = __builtin_popcount(core_spills);
-  size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+  size_t spill_count = POPCOUNT(core_spills);
+  size_t fp_spill_count = POPCOUNT(fp_core_spills);
   size_t frame_size = method->GetFrameSizeInBytes();
 
   if (spill_count > 0) {
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index b957708..0950e71 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -43,8 +43,8 @@
   mirror::ArtMethod* method = fr.GetMethod();
   uint32_t core_spills = method->GetCoreSpillMask();
   uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = __builtin_popcount(core_spills);
-  size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+  size_t spill_count = POPCOUNT(core_spills);
+  size_t fp_spill_count = POPCOUNT(fp_core_spills);
   size_t frame_size = method->GetFrameSizeInBytes();
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context.
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 437beb5..8fbca94 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -183,12 +183,12 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__)
+#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
 extern "C" void art_quick_lock_object(void);
 #endif
 
 TEST_F(StubTest, LockObject) {
-#if defined(__i386__) || defined(__arm__)
+#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
   Thread* self = Thread::Current();
   // Create an object
   ScopedObjectAccess soa(self);
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 5cf3001..c68d76a 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -38,7 +38,7 @@
 void X86Context::FillCalleeSaves(const StackVisitor& fr) {
   mirror::ArtMethod* method = fr.GetMethod();
   uint32_t core_spills = method->GetCoreSpillMask();
-  size_t spill_count = __builtin_popcount(core_spills);
+  size_t spill_count = POPCOUNT(core_spills);
   DCHECK_EQ(method->GetFpSpillMask(), 0u);
   size_t frame_size = method->GetFrameSizeInBytes();
   if (spill_count > 0) {
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 3f1f86d..29a7065 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -42,8 +42,8 @@
   mirror::ArtMethod* method = fr.GetMethod();
   uint32_t core_spills = method->GetCoreSpillMask();
   uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = __builtin_popcount(core_spills);
-  size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+  size_t spill_count = POPCOUNT(core_spills);
+  size_t fp_spill_count = POPCOUNT(fp_core_spills);
   size_t frame_size = method->GetFrameSizeInBytes();
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index cac6cfd..9ccf6c9 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -661,13 +661,61 @@
 TWO_ARG_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
 
 DEFINE_FUNCTION art_quick_lock_object
-    int3
-    int3
+    testl %edi, %edi                      // Null check object/rdi.
+    jz   .Lslow_lock
+.Lretry_lock:
+    movl LOCK_WORD_OFFSET(%edi), %ecx     // ecx := lock word.
+    test LITERAL(0xC0000000), %ecx        // Test the 2 high bits.
+    jne  .Lslow_lock                      // Slow path if either of the two high bits are set.
+    movl %gs:THREAD_ID_OFFSET, %edx       // edx := thread id
+    test %ecx, %ecx
+    jnz  .Lalready_thin                   // Lock word contains a thin lock.
+    // unlocked case - %edx holds thread id with count of 0
+    xor  %eax, %eax                       // eax == 0 for comparison with lock word in cmpxchg
+    lock cmpxchg  %edx, LOCK_WORD_OFFSET(%edi)
+    jnz  .Lretry_lock                     // cmpxchg failed retry
+    ret
+.Lalready_thin:
+    cmpw %cx, %dx                         // do we hold the lock already?
+    jne  .Lslow_lock
+    addl LITERAL(65536), %ecx             // increment recursion count
+    test LITERAL(0xC0000000), %ecx        // overflowed if either of top two bits are set
+    jne  .Lslow_lock                      // count overflowed so go slow
+    movl %ecx, LOCK_WORD_OFFSET(%edi)     // update lockword, cmpxchg not necessary as we hold lock
+    ret
+.Lslow_lock:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+    movq %gs:THREAD_SELF_OFFSET, %rsi     // pass Thread::Current()
+    movq %rsp, %rdx                       // pass SP
+    call PLT_SYMBOL(artLockObjectFromCode)  // artLockObjectFromCode(object, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME    // restore frame up to return address
+    RETURN_IF_EAX_ZERO
 END_FUNCTION art_quick_lock_object
 
 DEFINE_FUNCTION art_quick_unlock_object
-    int3
-    int3
+    testl %edi, %edi                      // null check object/edi
+    jz   .Lslow_unlock
+    movl LOCK_WORD_OFFSET(%edi), %ecx     // ecx := lock word
+    movl %gs:THREAD_ID_OFFSET, %edx       // edx := thread id
+    test %ecx, %ecx
+    jb   .Lslow_unlock                    // lock word contains a monitor
+    cmpw %cx, %dx                         // does the thread id match?
+    jne  .Lslow_unlock
+    cmpl LITERAL(65536), %ecx
+    jae  .Lrecursive_thin_unlock
+    movl LITERAL(0), LOCK_WORD_OFFSET(%edi)
+    ret
+.Lrecursive_thin_unlock:
+    subl LITERAL(65536), %ecx
+    mov  %ecx, LOCK_WORD_OFFSET(%edi)
+    ret
+.Lslow_unlock:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+    movq %gs:THREAD_SELF_OFFSET, %rsi     // pass Thread::Current()
+    movq %rsp, %rdx                       // pass SP
+    call PLT_SYMBOL(artUnlockObjectFromCode)  // artUnlockObjectFromCode(object, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME    // restore frame up to return address
+    RETURN_IF_EAX_ZERO
 END_FUNCTION art_quick_unlock_object
 
 DEFINE_FUNCTION art_quick_is_assignable
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 12c0352..3df5101 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -201,7 +201,7 @@
 uint32_t BitVector::NumSetBits() const {
   uint32_t count = 0;
   for (uint32_t word = 0; word < storage_size_; word++) {
-    count += __builtin_popcount(storage_[word]);
+    count += POPCOUNT(storage_[word]);
   }
   return count;
 }
@@ -331,10 +331,10 @@
 
   uint32_t count = 0u;
   for (uint32_t word = 0u; word < word_end; word++) {
-    count += __builtin_popcount(storage[word]);
+    count += POPCOUNT(storage[word]);
   }
   if (partial_word_bits != 0u) {
-    count += __builtin_popcount(storage[word_end] & ~(0xffffffffu << partial_word_bits));
+    count += POPCOUNT(storage[word_end] & ~(0xffffffffu << partial_word_bits));
   }
   return count;
 }
diff --git a/runtime/gc/accounting/gc_allocator.h b/runtime/gc/accounting/gc_allocator.h
index 4fe9367..7dd7cca 100644
--- a/runtime/gc/accounting/gc_allocator.h
+++ b/runtime/gc/accounting/gc_allocator.h
@@ -73,7 +73,7 @@
 // GCAllocatorImpl<T> if kMeasureGCMemoryOverhead is true, std::allocator<T> otherwise.
 template <typename T>
 class GcAllocator : public TypeStaticIf<kMeasureGcMemoryOverhead, GcAllocatorImpl<T>,
-                                        std::allocator<T> >::value {
+                                        std::allocator<T> >::type {
 };
 
 }  // namespace accounting
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 64a849b..662303e 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -257,7 +257,7 @@
     for (Class* c = this; c != NULL; c = c->GetSuperClass()) {
       count += c->NumReferenceInstanceFieldsDuringLinking();
     }
-    CHECK_EQ((size_t)__builtin_popcount(new_reference_offsets), count);
+    CHECK_EQ((size_t)POPCOUNT(new_reference_offsets), count);
   }
   // Not called within a transaction.
   SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_),
@@ -268,7 +268,7 @@
   if (new_reference_offsets != CLASS_WALK_SUPER) {
     // Sanity check that the number of bits set in the reference offset bitmap
     // agrees with the number of references
-    CHECK_EQ((size_t)__builtin_popcount(new_reference_offsets),
+    CHECK_EQ((size_t)POPCOUNT(new_reference_offsets),
              NumReferenceStaticFieldsDuringLinking());
   }
   // Not called within a transaction.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 20df78e..fbc0460 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1010,8 +1010,8 @@
                              (1 << art::arm::S27) | (1 << art::arm::S28) | (1 << art::arm::S29) |
                              (1 << art::arm::S30) | (1 << art::arm::S31);
     uint32_t fp_spills = type == kSaveAll ? fp_all_spills : 0;
-    size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
-                                 __builtin_popcount(fp_spills) /* fprs */ +
+    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
+                                 POPCOUNT(fp_spills) /* fprs */ +
                                  1 /* Method* */) * kArmPointerSize, kStackAlignment);
     method->SetFrameSizeInBytes(frame_size);
     method->SetCoreSpillMask(core_spills);
@@ -1024,7 +1024,7 @@
     uint32_t all_spills = (1 << art::mips::S0) | (1 << art::mips::S1);
     uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
                            (type == kSaveAll ? all_spills : 0) | (1 << art::mips::RA);
-    size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
+    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
                                 (type == kRefsAndArgs ? 0 : 3) + 1 /* Method* */) *
                                 kMipsPointerSize, kStackAlignment);
     method->SetFrameSizeInBytes(frame_size);
@@ -1035,7 +1035,7 @@
     uint32_t arg_spills = (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
     uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
                          (1 << art::x86::kNumberOfCpuRegisters);  // fake return address callee save
-    size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
+    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
                                  1 /* Method* */) * kX86PointerSize, kStackAlignment);
     method->SetFrameSizeInBytes(frame_size);
     method->SetCoreSpillMask(core_spills);
@@ -1054,8 +1054,8 @@
         (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
         (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
     uint32_t fp_spills = (type == kRefsAndArgs ? fp_arg_spills : 0);
-    size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
-                                 __builtin_popcount(fp_spills) /* fprs */ +
+    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
+                                 POPCOUNT(fp_spills) /* fprs */ +
                                  1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
     method->SetFrameSizeInBytes(frame_size);
     method->SetCoreSpillMask(core_spills);
@@ -1094,8 +1094,8 @@
                           (1 << art::arm64::D31);
       uint32_t fp_spills = fp_ref_spills | (type == kRefsAndArgs ? fp_arg_spills: 0)
                           | (type == kSaveAll ? fp_all_spills : 0);
-      size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
-                                   __builtin_popcount(fp_spills) /* fprs */ +
+      size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
+                                   POPCOUNT(fp_spills) /* fprs */ +
                                    1 /* Method* */) * kArm64PointerSize, kStackAlignment);
       method->SetFrameSizeInBytes(frame_size);
       method->SetCoreSpillMask(core_spills);
diff --git a/runtime/stack.h b/runtime/stack.h
index afc4f25..73a823a 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -25,6 +25,7 @@
 #include "instruction_set.h"
 #include "mirror/object.h"
 #include "mirror/object_reference.h"
+#include "utils.h"
 #include "verify_object.h"
 
 #include <stdint.h>
@@ -638,8 +639,8 @@
                            size_t frame_size, int reg, InstructionSet isa) {
     DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
     DCHECK_NE(reg, static_cast<int>(kVRegInvalid));
-    int spill_size = __builtin_popcount(core_spills) * GetBytesPerGprSpillLocation(isa)
-        + __builtin_popcount(fp_spills) * GetBytesPerFprSpillLocation(isa)
+    int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
+        + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
         + sizeof(uint32_t);  // Filler.
     int num_ins = code_item->ins_size_;
     int num_regs = code_item->registers_size_ - num_ins;
diff --git a/runtime/utils.h b/runtime/utils.h
index 4b2f230..14a532e 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -47,7 +47,7 @@
 };
 
 template<typename T>
-static inline bool IsPowerOfTwo(T x) {
+static constexpr bool IsPowerOfTwo(T x) {
   return (x & (x - 1)) == 0;
 }
 
@@ -115,39 +115,46 @@
 }
 
 // A static if which determines whether to return type A or B based on the condition boolean.
-template <const bool condition, typename A, typename B>
+template <bool condition, typename A, typename B>
 struct TypeStaticIf {
-  typedef A value;
+  typedef A type;
 };
 
 // Specialization to handle the false case.
 template <typename A, typename B>
 struct TypeStaticIf<false, A,  B> {
-  typedef B value;
+  typedef B type;
+};
+
+// Type identity.
+template <typename T>
+struct TypeIdentity {
+  typedef T type;
 };
 
 // For rounding integers.
 template<typename T>
-static inline T RoundDown(T x, int n) {
-  DCHECK(IsPowerOfTwo(n));
-  return (x & -n);
+static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
+  return
+      // DCHECK(IsPowerOfTwo(n)) in a form acceptable in a constexpr function:
+      (kIsDebugBuild && !IsPowerOfTwo(n)) ? (LOG(FATAL) << n << " isn't a power of 2", T(0))
+      : (x & -n);
 }
 
 template<typename T>
-static inline T RoundUp(T x, int n) {
+static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) {
   return RoundDown(x + n - 1, n);
 }
 
 // For aligning pointers.
 template<typename T>
-static inline T* AlignDown(T* x, int n) {
-  CHECK(IsPowerOfTwo(n));
-  return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(x) & -static_cast<uintptr_t>(n));
+static inline T* AlignDown(T* x, uintptr_t n) {
+  return reinterpret_cast<T*>(RoundDown(reinterpret_cast<uintptr_t>(x), n));
 }
 
 template<typename T>
-static inline T* AlignUp(T* x, int n) {
-  return AlignDown(reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(x) + static_cast<uintptr_t>(n - 1)), n);
+static inline T* AlignUp(T* x, uintptr_t n) {
+  return reinterpret_cast<T*>(RoundUp(reinterpret_cast<uintptr_t>(x), n));
 }
 
 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
@@ -162,33 +169,25 @@
   return x + 1;
 }
 
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 5-2, page 66, where the function is called pop.
-static inline int CountOneBits(uint32_t x) {
-  x = x - ((x >> 1) & 0x55555555);
-  x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
-  x = (x + (x >> 4)) & 0x0F0F0F0F;
-  x = x + (x >> 8);
-  x = x + (x >> 16);
-  return static_cast<int>(x & 0x0000003F);
+template<typename T>
+static constexpr int CLZ(T x) {
+  return (sizeof(T) == sizeof(uint32_t))
+      ? __builtin_clz(x)
+      : __builtin_clzll(x);
 }
 
 template<typename T>
-static inline int CLZ(T x) {
-  if (sizeof(T) == sizeof(uint32_t)) {
-    return __builtin_clz(x);
-  } else {
-    return __builtin_clzll(x);
-  }
+static constexpr int CTZ(T x) {
+  return (sizeof(T) == sizeof(uint32_t))
+      ? __builtin_ctz(x)
+      : __builtin_ctzll(x);
 }
 
 template<typename T>
-static inline int CTZ(T x) {
-  if (sizeof(T) == sizeof(uint32_t)) {
-    return __builtin_ctz(x);
-  } else {
-    return __builtin_ctzll(x);
-  }
+static constexpr int POPCOUNT(T x) {
+  return (sizeof(T) == sizeof(uint32_t))
+      ? __builtin_popcount(x)
+      : __builtin_popcountll(x);
 }
 
 static inline uint32_t PointerToLowMemUInt32(const void* p) {
diff --git a/runtime/vmap_table.h b/runtime/vmap_table.h
index 2fbaebe..9821753 100644
--- a/runtime/vmap_table.h
+++ b/runtime/vmap_table.h
@@ -99,7 +99,7 @@
       }
       matches++;
     }
-    CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
+    CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(POPCOUNT(spill_mask)));
     uint32_t spill_shifts = 0;
     while (matches != (vmap_offset + 1)) {
       DCHECK_NE(spill_mask, 0u);