Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index 3e628fe..ea33e48 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -157,6 +157,8 @@
 #include "src/base/atomicops_internals_mips_gcc.h"
 #elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
 #include "src/base/atomicops_internals_mips64_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_S390
+#include "src/base/atomicops_internals_s390_gcc.h"
 #else
 #error "Atomic operations are not supported on your platform"
 #endif
diff --git a/src/base/atomicops_internals_s390_gcc.h b/src/base/atomicops_internals_s390_gcc.h
new file mode 100644
index 0000000..6e34f30
--- /dev/null
+++ b/src/base/atomicops_internals_s390_gcc.h
@@ -0,0 +1,152 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_S390_H_
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return __sync_add_and_fetch(ptr, increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value, Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value, Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() { __sync_synchronize(); }
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#ifdef V8_TARGET_ARCH_S390X
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 old_value;
+  do {
+    old_value = *ptr;
+  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return __sync_add_and_fetch(ptr, increment);
+}
+
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value, Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value, Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#endif
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_S390_H_
diff --git a/src/base/bits.h b/src/base/bits.h
index 4ba3c47..0e76624 100644
--- a/src/base/bits.h
+++ b/src/base/bits.h
@@ -92,6 +92,20 @@
 }
 
 
+// ReverseBits(value) returns |value| in reverse bit order.
+template <typename T>
+T ReverseBits(T value) {
+  DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
+         (sizeof(value) == 8));
+  T result = 0;
+  for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+    result = (result << 1) | (value & 1);
+    value >>= 1;
+  }
+  return result;
+}
+
+
 // CountTrailingZeros32(value) returns the number of zero bits preceding the
 // least significant 1 bit in |value| if |value| is non-zero, otherwise it
 // returns 32.
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
index 692494a..777f379 100644
--- a/src/base/cpu.cc
+++ b/src/base/cpu.cc
@@ -312,6 +312,8 @@
       architecture_(0),
       variant_(-1),
       part_(0),
+      icache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
+      dcache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
       has_fpu_(false),
       has_cmov_(false),
       has_sahf_(false),
@@ -644,9 +646,16 @@
       if (n == 0 || entry.a_type == AT_NULL) {
         break;
       }
-      if (entry.a_type == AT_PLATFORM) {
-        auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
-        break;
+      switch (entry.a_type) {
+        case AT_PLATFORM:
+          auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
+          break;
+        case AT_ICACHEBSIZE:
+          icache_line_size_ = entry.a_un.a_val;
+          break;
+        case AT_DCACHEBSIZE:
+          dcache_line_size_ = entry.a_un.a_val;
+          break;
       }
     }
     fclose(fp);
diff --git a/src/base/cpu.h b/src/base/cpu.h
index ca108fa..3778d27 100644
--- a/src/base/cpu.h
+++ b/src/base/cpu.h
@@ -75,6 +75,9 @@
 
   // General features
   bool has_fpu() const { return has_fpu_; }
+  int icache_line_size() const { return icache_line_size_; }
+  int dcache_line_size() const { return dcache_line_size_; }
+  static const int UNKNOWN_CACHE_LINE_SIZE = 0;
 
   // x86 features
   bool has_cmov() const { return has_cmov_; }
@@ -118,6 +121,8 @@
   int architecture_;
   int variant_;
   int part_;
+  int icache_line_size_;
+  int dcache_line_size_;
   bool has_fpu_;
   bool has_cmov_;
   bool has_sahf_;