Upgrade to 3.29

Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.

Bug: 17370214

Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/base/DEPS b/src/base/DEPS
new file mode 100644
index 0000000..e53cadf
--- /dev/null
+++ b/src/base/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+  "-include",
+  "+include/v8config.h",
+  "+include/v8stdint.h",
+  "-src",
+  "+src/base",
+]
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
new file mode 100644
index 0000000..eba172f
--- /dev/null
+++ b/src/base/atomicops.h
@@ -0,0 +1,163 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The routines exported by this module are subtle.  If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain.  If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative.  You should assume only properties explicitly guaranteed by the
+// specifications in this file.  You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break.  If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines.  The NoBarrier
+// versions are provided when no barriers are needed:
+//   NoBarrier_Store()
+//   NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef V8_BASE_ATOMICOPS_H_
+#define V8_BASE_ATOMICOPS_H_
+
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
+
+#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace v8 {
+namespace base {
+
+typedef char Atomic8;
+typedef int32_t Atomic32;
+#ifdef V8_HOST_ARCH_64_BIT
+// We need to be able to go between Atomic64 and AtomicWord implicitly.  This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__ILP32__)
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                 Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions.  "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef V8_HOST_ARCH_64_BIT
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                  Atomic64 old_value,
+                                  Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif  // V8_HOST_ARCH_64_BIT
+
+} }  // namespace v8::base
+
+// Include our platform specific implementation.
+#if defined(THREAD_SANITIZER)
+#include "src/base/atomicops_internals_tsan.h"
+#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "src/base/atomicops_internals_x86_msvc.h"
+#elif defined(__APPLE__)
+#include "src/base/atomicops_internals_mac.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
+#include "src/base/atomicops_internals_arm64_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
+#include "src/base/atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "src/base/atomicops_internals_x86_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
+#include "src/base/atomicops_internals_mips_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
+#include "src/base/atomicops_internals_mips64_gcc.h"
+#else
+#error "Atomic operations are not supported on your platform"
+#endif
+
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(__APPLE__) || defined(__OpenBSD__)
+#include "src/base/atomicops_internals_atomicword_compat.h"
+#endif
+
+#endif  // V8_BASE_ATOMICOPS_H_
diff --git a/src/base/atomicops_internals_arm64_gcc.h b/src/base/atomicops_internals_arm64_gcc.h
new file mode 100644
index 0000000..b01783e
--- /dev/null
+++ b/src/base/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,316 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace base {
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
+}
+
+// NoBarrier versions of the operation include "memory" in the clobber list.
+// This is not required for direct usage of the NoBarrier versions of the
+// operations. However this is required for correctness when they are used as
+// part of the Acquire or Release versions, to ensure that nothing from outside
+// the call is reordered between the operation and the memory barrier. This does
+// not change the code generated, so has no or minimal impact on the
+// NoBarrier operations.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
+    "cmp %w[prev], %w[old_value]           \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    "1:                                    \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"IJr" (old_value),
+      [new_value]"r" (new_value)
+    : "cc", "memory"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
+    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
+    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                       \n\t"
+    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
+    "add %w[result], %w[result], %w[increment]\n\t"
+    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
+    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"IJr" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  Atomic32 result;
+
+  MemoryBarrier();
+  result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev;
+
+  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+
+  return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev;
+
+  MemoryBarrier();
+  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __asm__ __volatile__ (  // NOLINT
+    "stlr %w[value], %[ptr]  \n\t"
+    : [ptr]"=Q" (*ptr)
+    : [value]"r" (value)
+    : "memory"
+  );  // NOLINT
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value;
+
+  __asm__ __volatile__ (  // NOLINT
+    "ldar %w[value], %[ptr]  \n\t"
+    : [value]"=r" (value)
+    : [ptr]"Q" (*ptr)
+    : "memory"
+  );  // NOLINT
+
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[prev], %[ptr]                  \n\t"
+    "cmp %[prev], %[old_value]             \n\t"
+    "bne 1f                                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    "1:                                    \n\t"
+    : [prev]"=&r" (prev),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [old_value]"IJr" (old_value),
+      [new_value]"r" (new_value)
+    : "cc", "memory"
+  );  // NOLINT
+
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                    \n\t"
+    "ldxr %[result], %[ptr]                \n\t"
+    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
+    "cbnz %w[temp], 0b                     \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [new_value]"r" (new_value)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 result;
+  int32_t temp;
+
+  __asm__ __volatile__ (  // NOLINT
+    "0:                                     \n\t"
+    "ldxr %[result], %[ptr]                 \n\t"
+    "add %[result], %[result], %[increment] \n\t"
+    "stxr %w[temp], %[result], %[ptr]       \n\t"
+    "cbnz %w[temp], 0b                      \n\t"
+    : [result]"=&r" (result),
+      [temp]"=&r" (temp),
+      [ptr]"+Q" (*ptr)
+    : [increment]"IJr" (increment)
+    : "memory"
+  );  // NOLINT
+
+  return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  Atomic64 result;
+
+  MemoryBarrier();
+  result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+
+  return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev;
+
+  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+
+  return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev;
+
+  MemoryBarrier();
+  prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+  return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __asm__ __volatile__ (  // NOLINT
+    "stlr %x[value], %[ptr]  \n\t"
+    : [ptr]"=Q" (*ptr)
+    : [value]"r" (value)
+    : "memory"
+  );  // NOLINT
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value;
+
+  __asm__ __volatile__ (  // NOLINT
+    "ldar %x[value], %[ptr]  \n\t"
+    : [value]"=r" (value)
+    : [ptr]"Q" (*ptr)
+    : "memory"
+  );  // NOLINT
+
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/base/atomicops_internals_arm_gcc.h b/src/base/atomicops_internals_arm_gcc.h
new file mode 100644
index 0000000..069b1ff
--- /dev/null
+++ b/src/base/atomicops_internals_arm_gcc.h
@@ -0,0 +1,301 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+#if defined(__QNXNTO__)
+#include <sys/cpuinline.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+// Memory barriers on ARM are funky, but the kernel is here to help:
+//
+// * ARMv5 didn't support SMP, there is no memory barrier instruction at
+//   all on this architecture, or when targeting its machine code.
+//
+// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
+//   writing a random value to a very specific coprocessor register.
+//
+// * On ARMv7, the "dmb" instruction is used to perform a full memory
+//   barrier (though writing to the co-processor will still work).
+//   However, on single core devices (e.g. Nexus One, or Nexus S),
+//   this instruction will take up to 200 ns, which is huge, even though
+//   it's completely un-needed on these devices.
+//
+// * There is no easy way to determine at runtime if the device is
+//   single or multi-core. However, the kernel provides a useful helper
+//   function at a fixed memory address (0xffff0fa0), which will always
+//   perform a memory barrier in the most efficient way. I.e. on single
+//   core devices, this is an empty function that exits immediately.
+//   On multi-core devices, it implements a full memory barrier.
+//
+// * This source could be compiled to ARMv5 machine code that runs on a
+//   multi-core ARMv6 or ARMv7 device. In this case, memory barriers
+//   are needed for correct execution. Always call the kernel helper, even
+//   when targeting ARMv5TE.
+//
+
+inline void MemoryBarrier() {
+#if defined(__linux__) || defined(__ANDROID__)
+  // Note: This is a function call, which is also an implicit compiler barrier.
+  typedef void (*KernelMemoryBarrierFunc)();
+  ((KernelMemoryBarrierFunc)0xffff0fa0)();
+#elif defined(__QNXNTO__)
+  __cpu_membarrier();
+#else
+#error MemoryBarrier() is not implemented on this platform.
+#endif
+}
+
+// An ARM toolchain would only define one of these depending on which
+// variant of the target architecture is being used. This tests against
+// any known ARMv6 or ARMv7 variant, where it is possible to directly
+// use ldrex/strex instructions to implement fast atomic operations.
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+    defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+    defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+    defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__)
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  int reloop;
+  do {
+    // The following is equivalent to:
+    //
+    //   prev_value = LDREX(ptr)
+    //   reloop = 0
+    //   if (prev_value != old_value)
+    //      reloop = STREX(ptr, new_value)
+    __asm__ __volatile__("    ldrex %0, [%3]\n"
+                         "    mov %1, #0\n"
+                         "    cmp %0, %4\n"
+#ifdef __thumb2__
+                         "    it eq\n"
+#endif
+                         "    strexeq %1, %5, [%3]\n"
+                         : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(old_value), "r"(new_value)
+                         : "cc", "memory");
+  } while (reloop != 0);
+  return prev_value;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 value;
+  int reloop;
+  do {
+    // Equivalent to:
+    //
+    //  value = LDREX(ptr)
+    //  value += increment
+    //  reloop = STREX(ptr, value)
+    //
+    __asm__ __volatile__("    ldrex %0, [%3]\n"
+                         "    add %0, %0, %4\n"
+                         "    strex %1, %0, [%3]\n"
+                         : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(increment)
+                         : "cc", "memory");
+  } while (reloop);
+  return value;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  // TODO(digit): Investigate if it's possible to implement this with
+  // a single MemoryBarrier() operation between the LDREX and STREX.
+  // See http://crbug.com/246514
+  MemoryBarrier();
+  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  int reloop;
+  do {
+    // old_value = LDREX(ptr)
+    // reloop = STREX(ptr, new_value)
+    __asm__ __volatile__("   ldrex %0, [%3]\n"
+                         "   strex %1, %4, [%3]\n"
+                         : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(new_value)
+                         : "cc", "memory");
+  } while (reloop != 0);
+  return old_value;
+}
+
+// This tests against any known ARMv5 variant.
+#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+      defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+
+// The kernel also provides a helper function to perform an atomic
+// compare-and-swap operation at the hard-wired address 0xffff0fc0.
+// On ARMv5, this is implemented by a special code path that the kernel
+// detects and treats specially when thread pre-emption happens.
+// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
+//
+// Note that this always perform a full memory barrier, there is no
+// need to add calls MemoryBarrier() before or after it. It also
+// returns 0 on success, and 1 on exit.
+//
+// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
+// use newer kernel revisions, so this should not be a concern.
+namespace {
+
+inline int LinuxKernelCmpxchg(Atomic32 old_value,
+                              Atomic32 new_value,
+                              volatile Atomic32* ptr) {
+  typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
+  return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
+}
+
+}  // namespace
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  for (;;) {
+    prev_value = *ptr;
+    if (prev_value != old_value)
+      return prev_value;
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+      return old_value;
+  }
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  for (;;) {
+    // Atomic exchange the old value with an incremented one.
+    Atomic32 old_value = *ptr;
+    Atomic32 new_value = old_value + increment;
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
+      // The exchange took place as expected.
+      return new_value;
+    }
+    // Otherwise, *ptr changed mid-loop and we need to retry.
+  }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev_value;
+  for (;;) {
+    prev_value = *ptr;
+    if (prev_value != old_value) {
+      // Always ensure acquire semantics.
+      MemoryBarrier();
+      return prev_value;
+    }
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+      return old_value;
+  }
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  // This could be implemented as:
+  //    MemoryBarrier();
+  //    return NoBarrier_CompareAndSwap();
+  //
+  // But would use 3 barriers per succesful CAS. To save performance,
+  // use Acquire_CompareAndSwap(). Its implementation guarantees that:
+  // - A succesful swap uses only 2 barriers (in the kernel helper).
+  // - An early return due to (prev_value != old_value) performs
+  //   a memory barrier with no store, which is equivalent to the
+  //   generic implementation above.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#else
+#  error "Your CPU's ARM architecture is not supported yet"
+#endif
+
+// NOTE: Atomicity of the following load and store operations is only
+// guaranteed in case of 32-bit alignement of |ptr| values.
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+// Byte accessors.
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/base/atomicops_internals_atomicword_compat.h b/src/base/atomicops_internals_atomicword_compat.h
new file mode 100644
index 0000000..0530ced
--- /dev/null
+++ b/src/base/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(V8_HOST_ARCH_64_BIT)
+
+namespace v8 {
+namespace base {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+                                           AtomicWord old_value,
+                                           AtomicWord new_value) {
+  return NoBarrier_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+                                           AtomicWord new_value) {
+  return NoBarrier_AtomicExchange(
+      reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                            AtomicWord increment) {
+  return NoBarrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                          AtomicWord increment) {
+  return Barrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::base::Acquire_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::base::Release_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+  NoBarrier_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::base::Acquire_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::base::Release_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+  return NoBarrier_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+  return v8::base::Acquire_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+  return v8::base::Release_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+} }  // namespace v8::base
+
+#endif  // !defined(V8_HOST_ARCH_64_BIT)
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/src/base/atomicops_internals_mac.h b/src/base/atomicops_internals_mac.h
new file mode 100644
index 0000000..a046872
--- /dev/null
+++ b/src/base/atomicops_internals_mac.h
@@ -0,0 +1,204 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32(old_value, new_value,
+                                 const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+                                     const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+  OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+                                        const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64(old_value, new_value,
+                                 reinterpret_cast<volatile int64_t*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+                                     reinterpret_cast<volatile int64_t*>(ptr)));
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return OSAtomicAdd64Barrier(increment,
+                              reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64Barrier(
+        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  // The lib kern interface does not distinguish between
+  // Acquire and Release memory barriers; they are equivalent.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#endif  // defined(__LP64__)
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/src/base/atomicops_internals_mips64_gcc.h b/src/base/atomicops_internals_mips64_gcc.h
new file mode 100644
index 0000000..1f629b6
--- /dev/null
+++ b/src/base/atomicops_internals_mips64_gcc.h
@@ -0,0 +1,307 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+namespace v8 {
+namespace base {
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %1, %2\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %2\n"  // temp = *ptr
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+
+// 64-bit versions of the atomic ops.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "scd %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %1, %2\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "scd %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %0, %2\n"  // temp = *ptr
+                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
+                       "scd %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  MemoryBarrier();
+  Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return res;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/base/atomicops_internals_mips_gcc.h b/src/base/atomicops_internals_mips_gcc.h
new file mode 100644
index 0000000..d33b668
--- /dev/null
+++ b/src/base/atomicops_internals_mips_gcc.h
@@ -0,0 +1,160 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+namespace v8 {
+namespace base {
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, 0(%4)\n"  // prev = *ptr
+                       "bne %0, %2, 2f\n"  // if (prev != old_value) goto 2
+                       "move %1, %3\n"  // tmp = new_value
+                       "sc %1, 0(%4)\n"  // *ptr = tmp (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "r" (ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       ".set at\n"
+                       "1:\n"
+                       "ll %1, 0(%3)\n"  // old = *ptr
+                       "move %0, %2\n"  // temp = new_value
+                       "sc %0, 0(%3)\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old)
+                       : "r" (new_value), "r" (ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, 0(%3)\n"  // temp = *ptr
+                       "addu %1, %0, %2\n"  // temp2 = temp + increment
+                       "sc %1, 0(%3)\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %2\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2)
+                       : "Ir" (increment), "r" (ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/base/atomicops_internals_tsan.h b/src/base/atomicops_internals_tsan.h
new file mode 100644
index 0000000..646e5bd
--- /dev/null
+++ b/src/base/atomicops_internals_tsan.h
@@ -0,0 +1,363 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
+
+namespace v8 {
+namespace base {
+
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+
+extern "C" {
+typedef char  __tsan_atomic8;
+typedef short __tsan_atomic16;  // NOLINT
+typedef int   __tsan_atomic32;
+typedef long  __tsan_atomic64;  // NOLINT
+
+#if defined(__SIZEOF_INT128__) \
+    || (__clang_major__ * 100 + __clang_minor__ >= 302)
+typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char     __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
+
+typedef enum {
+  __tsan_memory_order_relaxed,
+  __tsan_memory_order_consume,
+  __tsan_memory_order_acquire,
+  __tsan_memory_order_release,
+  __tsan_memory_order_acq_rel,
+  __tsan_memory_order_seq_cst,
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
+    __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
+    __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
+    __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
+    __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
+    __tsan_memory_order mo);
+
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
+    __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
+    __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
+    __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
+    __tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
+    __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
+    __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
+    __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+    volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+    volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+    volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+    volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+    volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
+}  // extern "C"
+
+#endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+                                       Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+                                       Atomic32 new_value) {
+  return __tsan_atomic32_exchange(ptr, new_value,
+      __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return increment + __tsan_atomic32_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 cmp = old_value;
+  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+                                       Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+                                       Atomic64 new_value) {
+  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return increment + __tsan_atomic64_fetch_add(ptr, increment,
+      __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+  return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 cmp = old_value;
+  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
+  return cmp;
+}
+
+inline void MemoryBarrier() {
+  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/src/base/atomicops_internals_x86_gcc.cc b/src/base/atomicops_internals_x86_gcc.cc
new file mode 100644
index 0000000..969f237
--- /dev/null
+++ b/src/base/atomicops_internals_x86_gcc.cc
@@ -0,0 +1,115 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <string.h>
+
+#include "src/base/atomicops.h"
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file.  If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
+// of the global offset table.  To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%ebx, %%edi\n"     \
+      "cpuid\n"                \
+      "xchg %%edi, %%ebx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined(__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%rbx, %%rdi\n"     \
+      "cpuid\n"                \
+      "xchg %%rdi, %%rbx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid)        // initialize the struct only on x86
+
+namespace v8 {
+namespace base {
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+  false,          // bug can't exist before process spawns multiple threads
+#if !defined(__SSE2__)
+  false,          // no SSE2
+#endif
+};
+
+} }  // namespace v8::base
+
+namespace {
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+void AtomicOps_Internalx86CPUFeaturesInit() {
+  using v8::base::AtomicOps_Internalx86CPUFeatures;
+
+  uint32_t eax = 0;
+  uint32_t ebx = 0;
+  uint32_t ecx = 0;
+  uint32_t edx = 0;
+
+  // Get vendor string (issue CPUID with eax = 0)
+  cpuid(eax, ebx, ecx, edx, 0);
+  char vendor[13];
+  memcpy(vendor, &ebx, 4);
+  memcpy(vendor + 4, &edx, 4);
+  memcpy(vendor + 8, &ecx, 4);
+  vendor[12] = 0;
+
+  // get feature flags in ecx/edx, and family/model in eax
+  cpuid(eax, ebx, ecx, edx, 1);
+
+  int family = (eax >> 8) & 0xf;        // family and model fields
+  int model = (eax >> 4) & 0xf;
+  if (family == 0xf) {                  // use extended family and model fields
+    family += (eax >> 20) & 0xff;
+    model += ((eax >> 16) & 0xf) << 4;
+  }
+
+  // Opteron Rev E has a bug in which on very rare occasions a locked
+  // instruction doesn't act as a read-acquire barrier if followed by a
+  // non-locked read-modify-write instruction.  Rev F has this bug in
+  // pre-release versions, but not in versions released to customers,
+  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
+      family == 15 &&
+      32 <= model && model <= 63) {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+  } else {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+  }
+
+#if !defined(__SSE2__)
+  // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+  AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+#endif
+}
+
+class AtomicOpsx86Initializer {
+ public:
+  AtomicOpsx86Initializer() {
+    AtomicOps_Internalx86CPUFeaturesInit();
+  }
+};
+
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+}  // namespace
+
+#endif  // if x86
+
+#endif  // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/base/atomicops_internals_x86_gcc.h b/src/base/atomicops_internals_x86_gcc.h
new file mode 100644
index 0000000..ec87c42
--- /dev/null
+++ b/src/base/atomicops_internals_x86_gcc.h
@@ -0,0 +1,274 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+namespace v8 {
+namespace base {
+
+// This struct is not part of the public API of this module; clients may not
+// use it.
+// Features of this x86.  Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
+                             // after acquire compare-and-swap.
+#if !defined(__SSE2__)
+  bool has_sse2;             // Processor has SSE2.
+#endif
+};
+extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  __asm__ __volatile__("lock; cmpxchgl %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+#if defined(__x86_64__) || defined(__SSE2__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
+inline void MemoryBarrier() {
+  __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+#else
+
+inline void MemoryBarrier() {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {  // mfence is faster but not present on PIII
+    Atomic32 x = 0;
+    NoBarrier_AtomicExchange(&x, 0);  // acts as a barrier on PIII
+  }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    *ptr = value;
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {
+    NoBarrier_AtomicExchange(ptr, value);
+                          // acts as a barrier on PIII
+  }
+}
+#endif
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  *ptr = value;  // An x86 store acts as a release barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  __asm__ __volatile__("lock; cmpxchgq %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+
+  *ptr = value;  // An x86 store acts as a release barrier
+                 // for current AMD/Intel chips as of Jan 2008.
+                 // See also Acquire_Load(), below.
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+  //
+  // x86 stores/loads fail to act as barriers for a few instructions (clflush
+  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+  // not generated by the compiler, and are rare.  Users of these instructions
+  // need to know about cache behaviour in any case since all of these involve
+  // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;  // An x86 load acts as a acquire barrier,
+                          // for current AMD/Intel chips as of Jan 2008.
+                          // See also Release_Store(), above.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#endif  // defined(__x86_64__)
+
+} }  // namespace v8::base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/base/atomicops_internals_x86_msvc.h b/src/base/atomicops_internals_x86_msvc.h
new file mode 100644
index 0000000..adc4031
--- /dev/null
+++ b/src/base/atomicops_internals_x86_msvc.h
@@ -0,0 +1,202 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include "src/base/macros.h"
+#include "src/base/win32-headers.h"
+
+#if defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  LONG result = InterlockedCompareExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value),
+      static_cast<LONG>(old_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  LONG result = InterlockedExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return InterlockedExchangeAdd(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
+#error "We require at least vs2005 for MemoryBarrier"
+#endif
+inline void MemoryBarrier() {
+#if defined(V8_HOST_ARCH_64_BIT)
+  // See #undef and note at the top of this file.
+  __faststorefence();
+#else
+  // We use MemoryBarrier from WinNT.h
+  ::MemoryBarrier();
+#endif
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+  // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedCompareExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return InterlockedExchangeAdd64(
+      reinterpret_cast<volatile LONGLONG*>(ptr),
+      static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif  // defined(_WIN64)
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/src/base/base.gyp b/src/base/base.gyp
new file mode 100644
index 0000000..e391e2e
--- /dev/null
+++ b/src/base/base.gyp
@@ -0,0 +1,46 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'base-unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../../testing/gtest.gyp:gtest',
+        '../../testing/gtest.gyp:gtest_main',
+        '../../tools/gyp/v8.gyp:v8_libbase',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'bits-unittest.cc',
+        'cpu-unittest.cc',
+        'division-by-constant-unittest.cc',
+        'flags-unittest.cc',
+        'platform/condition-variable-unittest.cc',
+        'platform/mutex-unittest.cc',
+        'platform/platform-unittest.cc',
+        'platform/semaphore-unittest.cc',
+        'platform/time-unittest.cc',
+        'sys-info-unittest.cc',
+        'utils/random-number-generator-unittest.cc',
+      ],
+      'conditions': [
+        ['os_posix == 1', {
+          # TODO(svenpanne): This is a temporary work-around to fix the warnings
+          # that show up because we use -std=gnu++0x instead of -std=c++11.
+          'cflags!': [
+            '-pedantic',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/base/bits-unittest.cc b/src/base/bits-unittest.cc
new file mode 100644
index 0000000..06c1183
--- /dev/null
+++ b/src/base/bits-unittest.cc
@@ -0,0 +1,167 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "testing/gtest-support.h"
+
+#ifdef DEBUG
+#define DISABLE_IN_RELEASE(Name) Name
+#else
+#define DISABLE_IN_RELEASE(Name) DISABLED_##Name
+#endif
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+TEST(Bits, CountPopulation32) {
+  EXPECT_EQ(0u, CountPopulation32(0));
+  EXPECT_EQ(1u, CountPopulation32(1));
+  EXPECT_EQ(8u, CountPopulation32(0x11111111));
+  EXPECT_EQ(16u, CountPopulation32(0xf0f0f0f0));
+  EXPECT_EQ(24u, CountPopulation32(0xfff0f0ff));
+  EXPECT_EQ(32u, CountPopulation32(0xffffffff));
+}
+
+
+TEST(Bits, CountLeadingZeros32) {
+  EXPECT_EQ(32u, CountLeadingZeros32(0));
+  EXPECT_EQ(31u, CountLeadingZeros32(1));
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(31u - shift, CountLeadingZeros32(1u << shift));
+  }
+  EXPECT_EQ(4u, CountLeadingZeros32(0x0f0f0f0f));
+}
+
+
+TEST(Bits, CountTrailingZeros32) {
+  EXPECT_EQ(32u, CountTrailingZeros32(0));
+  EXPECT_EQ(31u, CountTrailingZeros32(0x80000000));
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(shift, CountTrailingZeros32(1u << shift));
+  }
+  EXPECT_EQ(4u, CountTrailingZeros32(0xf0f0f0f0));
+}
+
+
+TEST(Bits, IsPowerOfTwo32) {
+  EXPECT_FALSE(IsPowerOfTwo32(0U));
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_TRUE(IsPowerOfTwo32(1U << shift));
+    EXPECT_FALSE(IsPowerOfTwo32((1U << shift) + 5U));
+    EXPECT_FALSE(IsPowerOfTwo32(~(1U << shift)));
+  }
+  TRACED_FORRANGE(uint32_t, shift, 2, 31) {
+    EXPECT_FALSE(IsPowerOfTwo32((1U << shift) - 1U));
+  }
+  EXPECT_FALSE(IsPowerOfTwo32(0xffffffff));
+}
+
+
+TEST(Bits, IsPowerOfTwo64) {
+  EXPECT_FALSE(IsPowerOfTwo64(0U));
+  TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+    EXPECT_TRUE(IsPowerOfTwo64(V8_UINT64_C(1) << shift));
+    EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) + 5U));
+    EXPECT_FALSE(IsPowerOfTwo64(~(V8_UINT64_C(1) << shift)));
+  }
+  TRACED_FORRANGE(uint32_t, shift, 2, 63) {
+    EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) - 1U));
+  }
+  EXPECT_FALSE(IsPowerOfTwo64(V8_UINT64_C(0xffffffffffffffff)));
+}
+
+
+TEST(Bits, RoundUpToPowerOfTwo32) {
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(1u << shift, RoundUpToPowerOfTwo32(1u << shift));
+  }
+  EXPECT_EQ(0u, RoundUpToPowerOfTwo32(0));
+  EXPECT_EQ(4u, RoundUpToPowerOfTwo32(3));
+  EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7fffffffu));
+}
+
+
+TEST(BitsDeathTest, DISABLE_IN_RELEASE(RoundUpToPowerOfTwo32)) {
+  ASSERT_DEATH_IF_SUPPORTED({ RoundUpToPowerOfTwo32(0x80000001u); },
+                            "0x80000000");
+}
+
+
+TEST(Bits, RoundDownToPowerOfTwo32) {
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(1u << shift, RoundDownToPowerOfTwo32(1u << shift));
+  }
+  EXPECT_EQ(0u, RoundDownToPowerOfTwo32(0));
+  EXPECT_EQ(4u, RoundDownToPowerOfTwo32(5));
+  EXPECT_EQ(0x80000000u, RoundDownToPowerOfTwo32(0x80000001u));
+}
+
+
+TEST(Bits, RotateRight32) {
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(0u, RotateRight32(0u, shift));
+  }
+  EXPECT_EQ(1u, RotateRight32(1, 0));
+  EXPECT_EQ(1u, RotateRight32(2, 1));
+  EXPECT_EQ(0x80000000u, RotateRight32(1, 1));
+}
+
+
+TEST(Bits, RotateRight64) {
+  TRACED_FORRANGE(uint64_t, shift, 0, 63) {
+    EXPECT_EQ(0u, RotateRight64(0u, shift));
+  }
+  EXPECT_EQ(1u, RotateRight64(1, 0));
+  EXPECT_EQ(1u, RotateRight64(2, 1));
+  EXPECT_EQ(V8_UINT64_C(0x8000000000000000), RotateRight64(1, 1));
+}
+
+
+TEST(Bits, SignedAddOverflow32) {
+  int32_t val = 0;
+  EXPECT_FALSE(SignedAddOverflow32(0, 0, &val));
+  EXPECT_EQ(0, val);
+  EXPECT_TRUE(
+      SignedAddOverflow32(std::numeric_limits<int32_t>::max(), 1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+  EXPECT_TRUE(
+      SignedAddOverflow32(std::numeric_limits<int32_t>::min(), -1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+  EXPECT_TRUE(SignedAddOverflow32(std::numeric_limits<int32_t>::max(),
+                                  std::numeric_limits<int32_t>::max(), &val));
+  EXPECT_EQ(-2, val);
+  TRACED_FORRANGE(int32_t, i, 1, 50) {
+    TRACED_FORRANGE(int32_t, j, 1, i) {
+      EXPECT_FALSE(SignedAddOverflow32(i, j, &val));
+      EXPECT_EQ(i + j, val);
+    }
+  }
+}
+
+
+TEST(Bits, SignedSubOverflow32) {
+  int32_t val = 0;
+  EXPECT_FALSE(SignedSubOverflow32(0, 0, &val));
+  EXPECT_EQ(0, val);
+  EXPECT_TRUE(
+      SignedSubOverflow32(std::numeric_limits<int32_t>::min(), 1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+  EXPECT_TRUE(
+      SignedSubOverflow32(std::numeric_limits<int32_t>::max(), -1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+  TRACED_FORRANGE(int32_t, i, 1, 50) {
+    TRACED_FORRANGE(int32_t, j, 1, i) {
+      EXPECT_FALSE(SignedSubOverflow32(i, j, &val));
+      EXPECT_EQ(i - j, val);
+    }
+  }
+}
+
+}  // namespace bits
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/bits.cc b/src/base/bits.cc
new file mode 100644
index 0000000..6daee53
--- /dev/null
+++ b/src/base/bits.cc
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
+  DCHECK_LE(value, 0x80000000u);
+  value = value - 1;
+  value = value | (value >> 1);
+  value = value | (value >> 2);
+  value = value | (value >> 4);
+  value = value | (value >> 8);
+  value = value | (value >> 16);
+  return value + 1;
+}
+
+}  // namespace bits
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/bits.h b/src/base/bits.h
new file mode 100644
index 0000000..e6a733a
--- /dev/null
+++ b/src/base/bits.h
@@ -0,0 +1,150 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BITS_H_
+#define V8_BASE_BITS_H_
+
+#include "include/v8stdint.h"
+#include "src/base/macros.h"
+#if V8_CC_MSVC
+#include <intrin.h>
+#endif
+#if V8_OS_WIN32
+#include "src/base/win32-headers.h"
+#endif
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+// CountPopulation32(value) returns the number of bits set in |value|.
+inline uint32_t CountPopulation32(uint32_t value) {
+#if V8_HAS_BUILTIN_POPCOUNT
+  return __builtin_popcount(value);
+#else
+  value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
+  value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
+  value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+  value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
+  value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+  return value;
+#endif
+}
+
+
+// CountLeadingZeros32(value) returns the number of zero bits following the most
+// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
+inline uint32_t CountLeadingZeros32(uint32_t value) {
+#if V8_HAS_BUILTIN_CLZ
+  return value ? __builtin_clz(value) : 32;
+#elif V8_CC_MSVC
+  unsigned long result;  // NOLINT(runtime/int)
+  if (!_BitScanReverse(&result, value)) return 32;
+  return static_cast<uint32_t>(31 - result);
+#else
+  value = value | (value >> 1);
+  value = value | (value >> 2);
+  value = value | (value >> 4);
+  value = value | (value >> 8);
+  value = value | (value >> 16);
+  return CountPopulation32(~value);
+#endif
+}
+
+
+// CountTrailingZeros32(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns 32.
+inline uint32_t CountTrailingZeros32(uint32_t value) {
+#if V8_HAS_BUILTIN_CTZ
+  return value ? __builtin_ctz(value) : 32;
+#elif V8_CC_MSVC
+  unsigned long result;  // NOLINT(runtime/int)
+  if (!_BitScanForward(&result, value)) return 32;
+  return static_cast<uint32_t>(result);
+#else
+  if (value == 0) return 32;
+  unsigned count = 0;
+  for (value ^= value - 1; value >>= 1; ++count)
+    ;
+  return count;
+#endif
+}
+
+
+// Returns true iff |value| is a power of 2.
+inline bool IsPowerOfTwo32(uint32_t value) {
+  return value && !(value & (value - 1));
+}
+
+
+// Returns true iff |value| is a power of 2.
+inline bool IsPowerOfTwo64(uint64_t value) {
+  return value && !(value & (value - 1));
+}
+
+
+// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
+// greater than or equal to |value|. If you pass in a |value| that is already a
+// power of two, it is returned as is. |value| must be less than or equal to
+// 0x80000000u. Implementation is from "Hacker's Delight" by Henry S. Warren,
+// Jr., figure 3-3, page 48, where the function is called clp2.
+uint32_t RoundUpToPowerOfTwo32(uint32_t value);
+
+
+// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
+// less than or equal to |value|. If you pass in a |value| that is already a
+// power of two, it is returned as is.
+inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
+  if (value > 0x80000000u) return 0x80000000u;
+  uint32_t result = RoundUpToPowerOfTwo32(value);
+  if (result > value) result >>= 1;
+  return result;
+}
+
+
+inline uint32_t RotateRight32(uint32_t value, uint32_t shift) {
+  if (shift == 0) return value;
+  return (value >> shift) | (value << (32 - shift));
+}
+
+
+inline uint64_t RotateRight64(uint64_t value, uint64_t shift) {
+  if (shift == 0) return value;
+  return (value >> shift) | (value << (64 - shift));
+}
+
+
+// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed summation resulted in an overflow.
+inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
+#if V8_HAS_BUILTIN_SADD_OVERFLOW
+  return __builtin_sadd_overflow(lhs, rhs, val);
+#else
+  uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs);
+  *val = bit_cast<int32_t>(res);
+  return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0;
+#endif
+}
+
+
+// SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed subtraction resulted in an overflow.
+inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
+#if V8_HAS_BUILTIN_SSUB_OVERFLOW
+  return __builtin_ssub_overflow(lhs, rhs, val);
+#else
+  uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs);
+  *val = bit_cast<int32_t>(res);
+  return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0;
+#endif
+}
+
+}  // namespace bits
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_BITS_H_
diff --git a/src/base/build_config.h b/src/base/build_config.h
new file mode 100644
index 0000000..2bf57c9
--- /dev/null
+++ b/src/base/build_config.h
@@ -0,0 +1,161 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BUILD_CONFIG_H_
+#define V8_BASE_BUILD_CONFIG_H_
+
+#include "include/v8config.h"
+
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#if defined(__native_client__)
+// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
+// generates ARM machine code, together with a portable ARM simulator
+// compiled for the host architecture in question.
+//
+// Since Native Client is ILP-32 on all architectures we use
+// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#else
+#define V8_HOST_ARCH_X64 1
+#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4  // Check for x32.
+#define V8_HOST_ARCH_32_BIT 1
+#else
+#define V8_HOST_ARCH_64_BIT 1
+#endif
+#endif  // __native_client__
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__AARCH64EL__)
+#define V8_HOST_ARCH_ARM64 1
+#define V8_HOST_ARCH_64_BIT 1
+#elif defined(__ARMEL__)
+#define V8_HOST_ARCH_ARM 1
+#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__mips64)
+#define V8_HOST_ARCH_MIPS64 1
+#define V8_HOST_ARCH_64_BIT 1
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
+#define V8_HOST_ARCH_MIPS 1
+#define V8_HOST_ARCH_32_BIT 1
+#else
+#error "Host architecture was not detected as supported by v8"
+#endif
+
+#if defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7R__) || \
+    defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+# ifndef CAN_USE_VFP3_INSTRUCTIONS
+#  define CAN_USE_VFP3_INSTRUCTIONS
+# endif
+#endif
+
+
+// Target architecture detection. This may be set externally. If not, detect
+// in the same way as the host architecture, that is, target the native
+// environment as presented by the compiler.
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
+    !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+    !V8_TARGET_ARCH_MIPS64
+#if defined(_M_X64) || defined(__x86_64__)
+#define V8_TARGET_ARCH_X64 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_TARGET_ARCH_IA32 1
+#elif defined(__AARCH64EL__)
+#define V8_TARGET_ARCH_ARM64 1
+#elif defined(__ARMEL__)
+#define V8_TARGET_ARCH_ARM 1
+#elif defined(__mips64)
+#define V8_TARGET_ARCH_MIPS64 1
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
+#define V8_TARGET_ARCH_MIPS 1
+#else
+#error Target architecture was not detected as supported by v8
+#endif
+#endif
+
+// Determine architecture pointer size.
+#if V8_TARGET_ARCH_IA32
+#define V8_TARGET_ARCH_32_BIT 1
+#elif V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_64_BIT
+#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4  // Check for x32.
+#define V8_TARGET_ARCH_32_BIT 1
+#else
+#define V8_TARGET_ARCH_64_BIT 1
+#endif
+#endif
+#elif V8_TARGET_ARCH_ARM
+#define V8_TARGET_ARCH_32_BIT 1
+#elif V8_TARGET_ARCH_ARM64
+#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_MIPS
+#define V8_TARGET_ARCH_32_BIT 1
+#elif V8_TARGET_ARCH_MIPS64
+#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_X87
+#define V8_TARGET_ARCH_32_BIT 1
+#else
+#error Unknown target architecture pointer size
+#endif
+
+// Check for supported combinations of host and target architectures.
+#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32
+#error Target architecture ia32 is only supported on ia32 host
+#endif
+#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT && \
+     !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_64_BIT))
+#error Target architecture x64 is only supported on x64 host
+#endif
+#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT && \
+     !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_32_BIT))
+#error Target architecture x32 is only supported on x64 host with x32 support
+#endif
+#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
+#error Target architecture arm is only supported on arm and ia32 host
+#endif
+#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
+#error Target architecture arm64 is only supported on arm64 and x64 host
+#endif
+#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
+#error Target architecture mips is only supported on mips and ia32 host
+#endif
+#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
+#error Target architecture mips64 is only supported on mips64 and x64 host
+#endif
+
+// Determine architecture endianness.
+#if V8_TARGET_ARCH_IA32
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_X64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_MIPS
+#if defined(__MIPSEB__)
+#define V8_TARGET_BIG_ENDIAN 1
+#else
+#define V8_TARGET_LITTLE_ENDIAN 1
+#endif
+#elif V8_TARGET_ARCH_MIPS64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_X87
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#error Unknown target architecture endianness
+#endif
+
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
+
+#endif  // V8_BASE_BUILD_CONFIG_H_
diff --git a/src/base/compiler-specific.h b/src/base/compiler-specific.h
new file mode 100644
index 0000000..475a32c
--- /dev/null
+++ b/src/base/compiler-specific.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_COMPILER_SPECIFIC_H_
+#define V8_BASE_COMPILER_SPECIFIC_H_
+
+#include "include/v8config.h"
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+//   int x ALLOW_UNUSED = ...;
+#if V8_HAS_ATTRIBUTE_UNUSED
+#define ALLOW_UNUSED __attribute__((unused))
+#else
+#define ALLOW_UNUSED
+#endif
+
+
+// Annotate a virtual method indicating it must be overriding a virtual
+// method in the parent class.
+// Use like:
+//   virtual void bar() OVERRIDE;
+#if V8_HAS_CXX11_OVERRIDE
+#define OVERRIDE override
+#else
+#define OVERRIDE /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a virtual method indicating that subclasses must not override it,
+// or annotate a class to indicate that it cannot be subclassed.
+// Use like:
+//   class B FINAL : public A {};
+//   virtual void bar() FINAL;
+#if V8_HAS_CXX11_FINAL
+#define FINAL final
+#elif V8_HAS___FINAL
+#define FINAL __final
+#elif V8_HAS_SEALED
+#define FINAL sealed
+#else
+#define FINAL /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+//   int foo() WARN_UNUSED_RESULT;
+#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT /* NOT SUPPORTED */
+#endif
+
+#endif  // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/src/base/cpu-unittest.cc b/src/base/cpu-unittest.cc
new file mode 100644
index 0000000..5c58f86
--- /dev/null
+++ b/src/base/cpu-unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/cpu.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(CPUTest, FeatureImplications) {
+  CPU cpu;
+
+  // ia32 and x64 features
+  EXPECT_TRUE(!cpu.has_sse() || cpu.has_mmx());
+  EXPECT_TRUE(!cpu.has_sse2() || cpu.has_sse());
+  EXPECT_TRUE(!cpu.has_sse3() || cpu.has_sse2());
+  EXPECT_TRUE(!cpu.has_ssse3() || cpu.has_sse3());
+  EXPECT_TRUE(!cpu.has_sse41() || cpu.has_sse3());
+  EXPECT_TRUE(!cpu.has_sse42() || cpu.has_sse41());
+
+  // arm features
+  EXPECT_TRUE(!cpu.has_vfp3_d32() || cpu.has_vfp3());
+}
+
+
+TEST(CPUTest, RequiredFeatures) {
+  CPU cpu;
+
+#if V8_HOST_ARCH_ARM
+  EXPECT_TRUE(cpu.has_fpu());
+#endif
+
+#if V8_HOST_ARCH_IA32
+  EXPECT_TRUE(cpu.has_fpu());
+  EXPECT_TRUE(cpu.has_sahf());
+#endif
+
+#if V8_HOST_ARCH_X64
+  EXPECT_TRUE(cpu.has_fpu());
+  EXPECT_TRUE(cpu.has_cmov());
+  EXPECT_TRUE(cpu.has_mmx());
+  EXPECT_TRUE(cpu.has_sse());
+  EXPECT_TRUE(cpu.has_sse2());
+#endif
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
new file mode 100644
index 0000000..fbfbcf6
--- /dev/null
+++ b/src/base/cpu.cc
@@ -0,0 +1,528 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/cpu.h"
+
+#if V8_LIBC_MSVCRT
+#include <intrin.h>  // __cpuid()
+#endif
+#if V8_OS_POSIX
+#include <unistd.h>  // sysconf()
+#endif
+#if V8_OS_QNX
+#include <sys/syspage.h>  // cpuinfo
+#endif
+
+#include <ctype.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+
+#include "src/base/logging.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"  // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+// Define __cpuid() for non-MSVC libraries.
+#if !V8_LIBC_MSVCRT
+
+static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
+#if defined(__i386__) && defined(__pic__)
+  // Make sure to preserve ebx, which contains the pointer
+  // to the GOT in case we're generating PIC.
+  __asm__ volatile (
+    "mov %%ebx, %%edi\n\t"
+    "cpuid\n\t"
+    "xchg %%edi, %%ebx\n\t"
+    : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
+#else
+  __asm__ volatile (
+    "cpuid \n\t"
+    : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
+#endif  // defined(__i386__) && defined(__pic__)
+}
+
+#endif  // !V8_LIBC_MSVCRT
+
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 \
+    || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+#if V8_OS_LINUX
+
+#if V8_HOST_ARCH_ARM
+
+// See <uapi/asm/hwcap.h> kernel header.
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF  (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3)  /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP  (1 << 7)
+#define HWCAP_JAVA  (1 << 8)
+#define HWCAP_IWMMXT  (1 << 9)
+#define HWCAP_CRUNCH  (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON  (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16  (1 << 14) /* also set for VFPv4-D16 */
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32  (1 << 19) /* set if VFP has 32 regs (not 16) */
+#define HWCAP_IDIV  (HWCAP_IDIVA | HWCAP_IDIVT)
+#define HWCAP_LPAE  (1 << 20)
+
+#define AT_HWCAP 16
+
+// Read the ELF HWCAP flags by parsing /proc/self/auxv.
+static uint32_t ReadELFHWCaps() {
+  uint32_t result = 0;
+  FILE* fp = fopen("/proc/self/auxv", "r");
+  if (fp != NULL) {
+    struct { uint32_t tag; uint32_t value; } entry;
+    for (;;) {
+      size_t n = fread(&entry, sizeof(entry), 1, fp);
+      if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
+        break;
+      }
+      if (entry.tag == AT_HWCAP) {
+        result = entry.value;
+        break;
+      }
+    }
+    fclose(fp);
+  }
+  return result;
+}
+
+#endif  // V8_HOST_ARCH_ARM
+
+#if V8_HOST_ARCH_MIPS
+int __detect_fp64_mode(void) {
+  double result = 0;
+  // Bit representation of (double)1 is 0x3FF0000000000000.
+  asm(
+    "lui $t0, 0x3FF0\n\t"
+    "ldc1 $f0, %0\n\t"
+    "mtc1 $t0, $f1\n\t"
+    "sdc1 $f0, %0\n\t"
+    : "+m" (result)
+    : : "t0", "$f0", "$f1", "memory");
+
+  return !(result == 1);
+}
+
+
+int __detect_mips_arch_revision(void) {
+  // TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
+  // kernel. Currently fail-back to the least common denominator which is
+  // mips32 revision 1.
+  return 1;
+}
+#endif
+
+// Extract the information exposed by the kernel via /proc/cpuinfo.
+class CPUInfo FINAL {
+ public:
+  CPUInfo() : datalen_(0) {
+    // Get the size of the cpuinfo file by reading it until the end. This is
+    // required because files under /proc do not always return a valid size
+    // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
+    static const char PATHNAME[] = "/proc/cpuinfo";
+    FILE* fp = fopen(PATHNAME, "r");
+    if (fp != NULL) {
+      for (;;) {
+        char buffer[256];
+        size_t n = fread(buffer, 1, sizeof(buffer), fp);
+        if (n == 0) {
+          break;
+        }
+        datalen_ += n;
+      }
+      fclose(fp);
+    }
+
+    // Read the contents of the cpuinfo file.
+    data_ = new char[datalen_ + 1];
+    fp = fopen(PATHNAME, "r");
+    if (fp != NULL) {
+      for (size_t offset = 0; offset < datalen_; ) {
+        size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
+        if (n == 0) {
+          break;
+        }
+        offset += n;
+      }
+      fclose(fp);
+    }
+
+    // Zero-terminate the data.
+    data_[datalen_] = '\0';
+  }
+
+  ~CPUInfo() {
+    delete[] data_;
+  }
+
+  // Extract the content of a the first occurence of a given field in
+  // the content of the cpuinfo file and return it as a heap-allocated
+  // string that must be freed by the caller using delete[].
+  // Return NULL if not found.
+  char* ExtractField(const char* field) const {
+    DCHECK(field != NULL);
+
+    // Look for first field occurence, and ensure it starts the line.
+    size_t fieldlen = strlen(field);
+    char* p = data_;
+    for (;;) {
+      p = strstr(p, field);
+      if (p == NULL) {
+        return NULL;
+      }
+      if (p == data_ || p[-1] == '\n') {
+        break;
+      }
+      p += fieldlen;
+    }
+
+    // Skip to the first colon followed by a space.
+    p = strchr(p + fieldlen, ':');
+    if (p == NULL || !isspace(p[1])) {
+      return NULL;
+    }
+    p += 2;
+
+    // Find the end of the line.
+    char* q = strchr(p, '\n');
+    if (q == NULL) {
+      q = data_ + datalen_;
+    }
+
+    // Copy the line into a heap-allocated buffer.
+    size_t len = q - p;
+    char* result = new char[len + 1];
+    if (result != NULL) {
+      memcpy(result, p, len);
+      result[len] = '\0';
+    }
+    return result;
+  }
+
+ private:
+  char* data_;
+  size_t datalen_;
+};
+
+#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+// Checks that a space-separated list of items contains one given 'item'.
+static bool HasListItem(const char* list, const char* item) {
+  ssize_t item_len = strlen(item);
+  const char* p = list;
+  if (p != NULL) {
+    while (*p != '\0') {
+      // Skip whitespace.
+      while (isspace(*p)) ++p;
+
+      // Find end of current list item.
+      const char* q = p;
+      while (*q != '\0' && !isspace(*q)) ++q;
+
+      if (item_len == q - p && memcmp(p, item, item_len) == 0) {
+        return true;
+      }
+
+      // Skip to next item.
+      p = q;
+    }
+  }
+  return false;
+}
+
+#endif  // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+#endif  // V8_OS_LINUX
+
+#endif  // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+CPU::CPU() : stepping_(0),
+             model_(0),
+             ext_model_(0),
+             family_(0),
+             ext_family_(0),
+             type_(0),
+             implementer_(0),
+             architecture_(0),
+             part_(0),
+             has_fpu_(false),
+             has_cmov_(false),
+             has_sahf_(false),
+             has_mmx_(false),
+             has_sse_(false),
+             has_sse2_(false),
+             has_sse3_(false),
+             has_ssse3_(false),
+             has_sse41_(false),
+             has_sse42_(false),
+             has_idiva_(false),
+             has_neon_(false),
+             has_thumb2_(false),
+             has_vfp_(false),
+             has_vfp3_(false),
+             has_vfp3_d32_(false),
+             is_fp64_mode_(false) {
+  memcpy(vendor_, "Unknown", 8);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+  int cpu_info[4];
+
+  // __cpuid with an InfoType argument of 0 returns the number of
+  // valid Ids in CPUInfo[0] and the CPU identification string in
+  // the other three array elements. The CPU identification string is
+  // not in linear order. The code below arranges the information
+  // in a human readable form. The human readable order is CPUInfo[1] |
+  // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+  // before using memcpy to copy these three array elements to cpu_string.
+  __cpuid(cpu_info, 0);
+  unsigned num_ids = cpu_info[0];
+  std::swap(cpu_info[2], cpu_info[3]);
+  memcpy(vendor_, cpu_info + 1, 12);
+  vendor_[12] = '\0';
+
+  // Interpret CPU feature information.
+  if (num_ids > 0) {
+    __cpuid(cpu_info, 1);
+    stepping_ = cpu_info[0] & 0xf;
+    model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+    family_ = (cpu_info[0] >> 8) & 0xf;
+    type_ = (cpu_info[0] >> 12) & 0x3;
+    ext_model_ = (cpu_info[0] >> 16) & 0xf;
+    ext_family_ = (cpu_info[0] >> 20) & 0xff;
+    has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
+    has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
+    has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+    has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+    has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+    has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+    has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+    has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+    has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+  }
+
+#if V8_HOST_ARCH_IA32
+  // SAHF is always available in compat/legacy mode,
+  has_sahf_ = true;
+#else
+  // Query extended IDs.
+  __cpuid(cpu_info, 0x80000000);
+  unsigned num_ext_ids = cpu_info[0];
+
+  // Interpret extended CPU feature information.
+  if (num_ext_ids > 0x80000000) {
+    __cpuid(cpu_info, 0x80000001);
+    // SAHF must be probed in long mode.
+    has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
+  }
+#endif
+
+#elif V8_HOST_ARCH_ARM
+
+#if V8_OS_LINUX
+
+  CPUInfo cpu_info;
+
+  // Extract implementor from the "CPU implementer" field.
+  char* implementer = cpu_info.ExtractField("CPU implementer");
+  if (implementer != NULL) {
+    char* end ;
+    implementer_ = strtol(implementer, &end, 0);
+    if (end == implementer) {
+      implementer_ = 0;
+    }
+    delete[] implementer;
+  }
+
+  // Extract part number from the "CPU part" field.
+  char* part = cpu_info.ExtractField("CPU part");
+  if (part != NULL) {
+    char* end ;
+    part_ = strtol(part, &end, 0);
+    if (end == part) {
+      part_ = 0;
+    }
+    delete[] part;
+  }
+
+  // Extract architecture from the "CPU Architecture" field.
+  // The list is well-known, unlike the the output of
+  // the 'Processor' field which can vary greatly.
+  // See the definition of the 'proc_arch' array in
+  // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+  // same file.
+  char* architecture = cpu_info.ExtractField("CPU architecture");
+  if (architecture != NULL) {
+    char* end;
+    architecture_ = strtol(architecture, &end, 10);
+    if (end == architecture) {
+      architecture_ = 0;
+    }
+    delete[] architecture;
+
+    // Unfortunately, it seems that certain ARMv6-based CPUs
+    // report an incorrect architecture number of 7!
+    //
+    // See http://code.google.com/p/android/issues/detail?id=10812
+    //
+    // We try to correct this by looking at the 'elf_format'
+    // field reported by the 'Processor' field, which is of the
+    // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+    // an ARMv6-one. For example, the Raspberry Pi is one popular
+    // ARMv6 device that reports architecture 7.
+    if (architecture_ == 7) {
+      char* processor = cpu_info.ExtractField("Processor");
+      if (HasListItem(processor, "(v6l)")) {
+        architecture_ = 6;
+      }
+      delete[] processor;
+    }
+  }
+
+  // Try to extract the list of CPU features from ELF hwcaps.
+  uint32_t hwcaps = ReadELFHWCaps();
+  if (hwcaps != 0) {
+    has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
+    has_neon_ = (hwcaps & HWCAP_NEON) != 0;
+    has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
+    has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
+    has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
+                                   (hwcaps & HWCAP_VFPD32) != 0));
+  } else {
+    // Try to fallback to "Features" CPUInfo field.
+    char* features = cpu_info.ExtractField("Features");
+    has_idiva_ = HasListItem(features, "idiva");
+    has_neon_ = HasListItem(features, "neon");
+    has_thumb2_ = HasListItem(features, "thumb2");
+    has_vfp_ = HasListItem(features, "vfp");
+    if (HasListItem(features, "vfpv3d16")) {
+      has_vfp3_ = true;
+    } else if (HasListItem(features, "vfpv3")) {
+      has_vfp3_ = true;
+      has_vfp3_d32_ = true;
+    }
+    delete[] features;
+  }
+
+  // Some old kernels will report vfp not vfpv3. Here we make an attempt
+  // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+  // available on architectures with vfpv3. Checking neon on its own is
+  // not enough as it is possible to have neon without vfp.
+  if (has_vfp_ && has_neon_) {
+    has_vfp3_ = true;
+  }
+
+  // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+  if (architecture_ < 7 && has_vfp3_) {
+    architecture_ = 7;
+  }
+
+  // ARMv7 implies Thumb2.
+  if (architecture_ >= 7) {
+    has_thumb2_ = true;
+  }
+
+  // The earliest architecture with Thumb2 is ARMv6T2.
+  if (has_thumb2_ && architecture_ < 6) {
+    architecture_ = 6;
+  }
+
+  // We don't support any FPUs other than VFP.
+  has_fpu_ = has_vfp_;
+
+#elif V8_OS_QNX
+
+  uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
+  if (cpu_flags & ARM_CPU_FLAG_V7) {
+    architecture_ = 7;
+    has_thumb2_ = true;
+  } else if (cpu_flags & ARM_CPU_FLAG_V6) {
+    architecture_ = 6;
+    // QNX doesn't say if Thumb2 is available.
+    // Assume false for the architectures older than ARMv7.
+  }
+  DCHECK(architecture_ >= 6);
+  has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
+  has_vfp_ = has_fpu_;
+  if (cpu_flags & ARM_CPU_FLAG_NEON) {
+    has_neon_ = true;
+    has_vfp3_ = has_vfp_;
+#ifdef ARM_CPU_FLAG_VFP_D32
+    has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0;
+#endif
+  }
+  has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0;
+
+#endif  // V8_OS_LINUX
+
+#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+  // Simple detection of FPU at runtime for Linux.
+  // It is based on /proc/cpuinfo, which reveals hardware configuration
+  // to user-space applications.  According to MIPS (early 2010), no similar
+  // facility is universally available on the MIPS architectures,
+  // so it's up to individual OSes to provide such.
+  CPUInfo cpu_info;
+  char* cpu_model = cpu_info.ExtractField("cpu model");
+  has_fpu_ = HasListItem(cpu_model, "FPU");
+  delete[] cpu_model;
+#ifdef V8_HOST_ARCH_MIPS
+  is_fp64_mode_ = __detect_fp64_mode();
+  architecture_ = __detect_mips_arch_revision();
+#endif
+
+#elif V8_HOST_ARCH_ARM64
+
+  CPUInfo cpu_info;
+
+  // Extract implementor from the "CPU implementer" field.
+  char* implementer = cpu_info.ExtractField("CPU implementer");
+  if (implementer != NULL) {
+    char* end ;
+    implementer_ = strtol(implementer, &end, 0);
+    if (end == implementer) {
+      implementer_ = 0;
+    }
+    delete[] implementer;
+  }
+
+  // Extract part number from the "CPU part" field.
+  char* part = cpu_info.ExtractField("CPU part");
+  if (part != NULL) {
+    char* end ;
+    part_ = strtol(part, &end, 0);
+    if (end == part) {
+      part_ = 0;
+    }
+    delete[] part;
+  }
+
+#endif
+}
+
+} }  // namespace v8::base
diff --git a/src/base/cpu.h b/src/base/cpu.h
new file mode 100644
index 0000000..dc0eaf4
--- /dev/null
+++ b/src/base/cpu.h
@@ -0,0 +1,115 @@
+// Copyright 2006-2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module contains the architecture-specific code. This make the rest of
+// the code less dependent on differences between different processor
+// architecture.
+// The classes have the same definition for all architectures. The
+// implementation for a particular architecture is put in cpu_<arch>.cc.
+// The build system then uses the implementation for the target architecture.
+//
+
+#ifndef V8_BASE_CPU_H_
+#define V8_BASE_CPU_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// CPU
+//
+// Query information about the processor.
+//
+// This class also has static methods for the architecture specific functions.
+// Add methods here to cope with differences between the supported
+// architectures. For each architecture the file cpu_<arch>.cc contains the
+// implementation of these static functions.
+
+class CPU FINAL {
+ public:
+  CPU();
+
+  // x86 CPUID information
+  const char* vendor() const { return vendor_; }
+  int stepping() const { return stepping_; }
+  int model() const { return model_; }
+  int ext_model() const { return ext_model_; }
+  int family() const { return family_; }
+  int ext_family() const { return ext_family_; }
+  int type() const { return type_; }
+
+  // arm implementer/part information
+  int implementer() const { return implementer_; }
+  static const int ARM = 0x41;
+  static const int NVIDIA = 0x4e;
+  static const int QUALCOMM = 0x51;
+  int architecture() const { return architecture_; }
+  int part() const { return part_; }
+  static const int ARM_CORTEX_A5 = 0xc05;
+  static const int ARM_CORTEX_A7 = 0xc07;
+  static const int ARM_CORTEX_A8 = 0xc08;
+  static const int ARM_CORTEX_A9 = 0xc09;
+  static const int ARM_CORTEX_A12 = 0xc0c;
+  static const int ARM_CORTEX_A15 = 0xc0f;
+
+  // General features
+  bool has_fpu() const { return has_fpu_; }
+
+  // x86 features
+  bool has_cmov() const { return has_cmov_; }
+  bool has_sahf() const { return has_sahf_; }
+  bool has_mmx() const { return has_mmx_; }
+  bool has_sse() const { return has_sse_; }
+  bool has_sse2() const { return has_sse2_; }
+  bool has_sse3() const { return has_sse3_; }
+  bool has_ssse3() const { return has_ssse3_; }
+  bool has_sse41() const { return has_sse41_; }
+  bool has_sse42() const { return has_sse42_; }
+
+  // arm features
+  bool has_idiva() const { return has_idiva_; }
+  bool has_neon() const { return has_neon_; }
+  bool has_thumb2() const { return has_thumb2_; }
+  bool has_vfp() const { return has_vfp_; }
+  bool has_vfp3() const { return has_vfp3_; }
+  bool has_vfp3_d32() const { return has_vfp3_d32_; }
+
+  // mips features
+  bool is_fp64_mode() const { return is_fp64_mode_; }
+
+ private:
+  char vendor_[13];
+  int stepping_;
+  int model_;
+  int ext_model_;
+  int family_;
+  int ext_family_;
+  int type_;
+  int implementer_;
+  int architecture_;
+  int part_;
+  bool has_fpu_;
+  bool has_cmov_;
+  bool has_sahf_;
+  bool has_mmx_;
+  bool has_sse_;
+  bool has_sse2_;
+  bool has_sse3_;
+  bool has_ssse3_;
+  bool has_sse41_;
+  bool has_sse42_;
+  bool has_idiva_;
+  bool has_neon_;
+  bool has_thumb2_;
+  bool has_vfp_;
+  bool has_vfp3_;
+  bool has_vfp3_d32_;
+  bool is_fp64_mode_;
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_CPU_H_
diff --git a/src/base/division-by-constant-unittest.cc b/src/base/division-by-constant-unittest.cc
new file mode 100644
index 0000000..47c2483
--- /dev/null
+++ b/src/base/division-by-constant-unittest.cc
@@ -0,0 +1,132 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check all examples from table 10-1 of "Hacker's Delight".
+
+#include "src/base/division-by-constant.h"
+
+#include <ostream>  // NOLINT
+
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+std::ostream& operator<<(std::ostream& os,
+                         const MagicNumbersForDivision<T>& mag) {
+  return os << "{ multiplier: " << mag.multiplier << ", shift: " << mag.shift
+            << ", add: " << mag.add << " }";
+}
+
+
+// Some abbreviations...
+
+typedef MagicNumbersForDivision<uint32_t> M32;
+typedef MagicNumbersForDivision<uint64_t> M64;
+
+
+static M32 s32(int32_t d) {
+  return SignedDivisionByConstant<uint32_t>(static_cast<uint32_t>(d));
+}
+
+
+static M64 s64(int64_t d) {
+  return SignedDivisionByConstant<uint64_t>(static_cast<uint64_t>(d));
+}
+
+
+static M32 u32(uint32_t d) { return UnsignedDivisionByConstant<uint32_t>(d); }
+static M64 u64(uint64_t d) { return UnsignedDivisionByConstant<uint64_t>(d); }
+
+
+TEST(DivisionByConstant, Signed32) {
+  EXPECT_EQ(M32(0x99999999U, 1, false), s32(-5));
+  EXPECT_EQ(M32(0x55555555U, 1, false), s32(-3));
+  int32_t d = -1;
+  for (unsigned k = 1; k <= 32 - 1; ++k) {
+    d *= 2;
+    EXPECT_EQ(M32(0x7FFFFFFFU, k - 1, false), s32(d));
+  }
+  for (unsigned k = 1; k <= 32 - 2; ++k) {
+    EXPECT_EQ(M32(0x80000001U, k - 1, false), s32(1 << k));
+  }
+  EXPECT_EQ(M32(0x55555556U, 0, false), s32(3));
+  EXPECT_EQ(M32(0x66666667U, 1, false), s32(5));
+  EXPECT_EQ(M32(0x2AAAAAABU, 0, false), s32(6));
+  EXPECT_EQ(M32(0x92492493U, 2, false), s32(7));
+  EXPECT_EQ(M32(0x38E38E39U, 1, false), s32(9));
+  EXPECT_EQ(M32(0x66666667U, 2, false), s32(10));
+  EXPECT_EQ(M32(0x2E8BA2E9U, 1, false), s32(11));
+  EXPECT_EQ(M32(0x2AAAAAABU, 1, false), s32(12));
+  EXPECT_EQ(M32(0x51EB851FU, 3, false), s32(25));
+  EXPECT_EQ(M32(0x10624DD3U, 3, false), s32(125));
+  EXPECT_EQ(M32(0x68DB8BADU, 8, false), s32(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned32) {
+  EXPECT_EQ(M32(0x00000000U, 0, true), u32(1));
+  for (unsigned k = 1; k <= 30; ++k) {
+    EXPECT_EQ(M32(1U << (32 - k), 0, false), u32(1U << k));
+  }
+  EXPECT_EQ(M32(0xAAAAAAABU, 1, false), u32(3));
+  EXPECT_EQ(M32(0xCCCCCCCDU, 2, false), u32(5));
+  EXPECT_EQ(M32(0xAAAAAAABU, 2, false), u32(6));
+  EXPECT_EQ(M32(0x24924925U, 3, true), u32(7));
+  EXPECT_EQ(M32(0x38E38E39U, 1, false), u32(9));
+  EXPECT_EQ(M32(0xCCCCCCCDU, 3, false), u32(10));
+  EXPECT_EQ(M32(0xBA2E8BA3U, 3, false), u32(11));
+  EXPECT_EQ(M32(0xAAAAAAABU, 3, false), u32(12));
+  EXPECT_EQ(M32(0x51EB851FU, 3, false), u32(25));
+  EXPECT_EQ(M32(0x10624DD3U, 3, false), u32(125));
+  EXPECT_EQ(M32(0xD1B71759U, 9, false), u32(625));
+}
+
+
+TEST(DivisionByConstant, Signed64) {
+  EXPECT_EQ(M64(0x9999999999999999ULL, 1, false), s64(-5));
+  EXPECT_EQ(M64(0x5555555555555555ULL, 1, false), s64(-3));
+  int64_t d = -1;
+  for (unsigned k = 1; k <= 64 - 1; ++k) {
+    d *= 2;
+    EXPECT_EQ(M64(0x7FFFFFFFFFFFFFFFULL, k - 1, false), s64(d));
+  }
+  for (unsigned k = 1; k <= 64 - 2; ++k) {
+    EXPECT_EQ(M64(0x8000000000000001ULL, k - 1, false), s64(1LL << k));
+  }
+  EXPECT_EQ(M64(0x5555555555555556ULL, 0, false), s64(3));
+  EXPECT_EQ(M64(0x6666666666666667ULL, 1, false), s64(5));
+  EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 0, false), s64(6));
+  EXPECT_EQ(M64(0x4924924924924925ULL, 1, false), s64(7));
+  EXPECT_EQ(M64(0x1C71C71C71C71C72ULL, 0, false), s64(9));
+  EXPECT_EQ(M64(0x6666666666666667ULL, 2, false), s64(10));
+  EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), s64(11));
+  EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 1, false), s64(12));
+  EXPECT_EQ(M64(0xA3D70A3D70A3D70BULL, 4, false), s64(25));
+  EXPECT_EQ(M64(0x20C49BA5E353F7CFULL, 4, false), s64(125));
+  EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), s64(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned64) {
+  EXPECT_EQ(M64(0x0000000000000000ULL, 0, true), u64(1));
+  for (unsigned k = 1; k <= 64 - 2; ++k) {
+    EXPECT_EQ(M64(1ULL << (64 - k), 0, false), u64(1ULL << k));
+  }
+  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 1, false), u64(3));
+  EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 2, false), u64(5));
+  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 2, false), u64(6));
+  EXPECT_EQ(M64(0x2492492492492493ULL, 3, true), u64(7));
+  EXPECT_EQ(M64(0xE38E38E38E38E38FULL, 3, false), u64(9));
+  EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 3, false), u64(10));
+  EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), u64(11));
+  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 3, false), u64(12));
+  EXPECT_EQ(M64(0x47AE147AE147AE15ULL, 5, true), u64(25));
+  EXPECT_EQ(M64(0x0624DD2F1A9FBE77ULL, 7, true), u64(125));
+  EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), u64(625));
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/division-by-constant.cc b/src/base/division-by-constant.cc
new file mode 100644
index 0000000..235d39f
--- /dev/null
+++ b/src/base/division-by-constant.cc
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/division-by-constant.h"
+
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+bool MagicNumbersForDivision<T>::operator==(
+    const MagicNumbersForDivision& rhs) const {
+  return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
+}
+
+
+template <class T>
+MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
+  STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
+  DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
+  const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
+  const T min = (static_cast<T>(1) << (bits - 1));
+  const bool neg = (min & d) != 0;
+  const T ad = neg ? (0 - d) : d;
+  const T t = min + (d >> (bits - 1));
+  const T anc = t - 1 - t % ad;  // Absolute value of nc
+  unsigned p = bits - 1;         // Init. p.
+  T q1 = min / anc;              // Init. q1 = 2**p/|nc|.
+  T r1 = min - q1 * anc;         // Init. r1 = rem(2**p, |nc|).
+  T q2 = min / ad;               // Init. q2 = 2**p/|d|.
+  T r2 = min - q2 * ad;          // Init. r2 = rem(2**p, |d|).
+  T delta;
+  do {
+    p = p + 1;
+    q1 = 2 * q1;      // Update q1 = 2**p/|nc|.
+    r1 = 2 * r1;      // Update r1 = rem(2**p, |nc|).
+    if (r1 >= anc) {  // Must be an unsigned comparison here.
+      q1 = q1 + 1;
+      r1 = r1 - anc;
+    }
+    q2 = 2 * q2;     // Update q2 = 2**p/|d|.
+    r2 = 2 * r2;     // Update r2 = rem(2**p, |d|).
+    if (r2 >= ad) {  // Must be an unsigned comparison here.
+      q2 = q2 + 1;
+      r2 = r2 - ad;
+    }
+    delta = ad - r2;
+  } while (q1 < delta || (q1 == delta && r1 == 0));
+  T mul = q2 + 1;
+  return {neg ? (0 - mul) : mul, p - bits, false};
+}
+
+
+template <class T>
+MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
+                                                      unsigned leading_zeros) {
+  STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
+  DCHECK(d != 0);
+  const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
+  const T ones = ~static_cast<T>(0) >> leading_zeros;
+  const T min = static_cast<T>(1) << (bits - 1);
+  const T max = ~static_cast<T>(0) >> 1;
+  const T nc = ones - (ones - d) % d;
+  bool a = false;         // Init. "add" indicator.
+  unsigned p = bits - 1;  // Init. p.
+  T q1 = min / nc;        // Init. q1 = 2**p/nc
+  T r1 = min - q1 * nc;   // Init. r1 = rem(2**p,nc)
+  T q2 = max / d;         // Init. q2 = (2**p - 1)/d.
+  T r2 = max - q2 * d;    // Init. r2 = rem(2**p - 1, d).
+  T delta;
+  do {
+    p = p + 1;
+    if (r1 >= nc - r1) {
+      q1 = 2 * q1 + 1;
+      r1 = 2 * r1 - nc;
+    } else {
+      q1 = 2 * q1;
+      r1 = 2 * r1;
+    }
+    if (r2 + 1 >= d - r2) {
+      if (q2 >= max) a = true;
+      q2 = 2 * q2 + 1;
+      r2 = 2 * r2 + 1 - d;
+    } else {
+      if (q2 >= min) a = true;
+      q2 = 2 * q2;
+      r2 = 2 * r2 + 1;
+    }
+    delta = d - 1 - r2;
+  } while (p < bits * 2 && (q1 < delta || (q1 == delta && r1 == 0)));
+  return {q2 + 1, p - bits, a};
+}
+
+
+// -----------------------------------------------------------------------------
+// Instantiations.
+
+template struct MagicNumbersForDivision<uint32_t>;
+template struct MagicNumbersForDivision<uint64_t>;
+
+template MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
+template MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
+
+template MagicNumbersForDivision<uint32_t> UnsignedDivisionByConstant(
+    uint32_t d, unsigned leading_zeros);
+template MagicNumbersForDivision<uint64_t> UnsignedDivisionByConstant(
+    uint64_t d, unsigned leading_zeros);
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/division-by-constant.h b/src/base/division-by-constant.h
new file mode 100644
index 0000000..02e7e14
--- /dev/null
+++ b/src/base/division-by-constant.h
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_DIVISION_BY_CONSTANT_H_
+#define V8_BASE_DIVISION_BY_CONSTANT_H_
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+
+// The magic numbers for division via multiplication, see Warren's "Hacker's
+// Delight", chapter 10. The template parameter must be one of the unsigned
+// integral types.
+template <class T>
+struct MagicNumbersForDivision {
+  MagicNumbersForDivision(T m, unsigned s, bool a)
+      : multiplier(m), shift(s), add(a) {}
+  bool operator==(const MagicNumbersForDivision& rhs) const;
+
+  T multiplier;
+  unsigned shift;
+  bool add;
+};
+
+
+// Calculate the multiplier and shift for signed division via multiplication.
+// The divisor must not be -1, 0 or 1 when interpreted as a signed value.
+template <class T>
+MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
+
+
+// Calculate the multiplier and shift for unsigned division via multiplication,
+// see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
+// leading_zeros can be used to speed up the calculation if the given number of
+// upper bits of the dividend value are known to be zero.
+template <class T>
+MagicNumbersForDivision<T> UnsignedDivisionByConstant(
+    T d, unsigned leading_zeros = 0);
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_DIVISION_BY_CONSTANT_H_
diff --git a/src/base/flags-unittest.cc b/src/base/flags-unittest.cc
new file mode 100644
index 0000000..a1d6f37
--- /dev/null
+++ b/src/base/flags-unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8stdint.h"
+#include "src/base/flags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+enum Flag1 {
+  kFlag1None = 0,
+  kFlag1First = 1u << 1,
+  kFlag1Second = 1u << 2,
+  kFlag1All = kFlag1None | kFlag1First | kFlag1Second
+};
+typedef Flags<Flag1> Flags1;
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Flags1)
+
+
+Flags1 bar(Flags1 flags1) { return flags1; }
+
+}  // namespace
+
+
+TEST(FlagsTest, BasicOperations) {
+  Flags1 a;
+  EXPECT_EQ(kFlag1None, static_cast<int>(a));
+  a |= kFlag1First;
+  EXPECT_EQ(kFlag1First, static_cast<int>(a));
+  a = a | kFlag1Second;
+  EXPECT_EQ(kFlag1All, static_cast<int>(a));
+  a &= kFlag1Second;
+  EXPECT_EQ(kFlag1Second, static_cast<int>(a));
+  a = kFlag1None & a;
+  EXPECT_EQ(kFlag1None, static_cast<int>(a));
+  a ^= (kFlag1All | kFlag1None);
+  EXPECT_EQ(kFlag1All, static_cast<int>(a));
+  Flags1 b = ~a;
+  EXPECT_EQ(kFlag1All, static_cast<int>(a));
+  EXPECT_EQ(~static_cast<int>(a), static_cast<int>(b));
+  Flags1 c = a;
+  EXPECT_EQ(a, c);
+  EXPECT_NE(a, b);
+  EXPECT_EQ(a, bar(a));
+  EXPECT_EQ(a, bar(kFlag1All));
+}
+
+
+namespace {
+namespace foo {
+
+enum Option {
+  kNoOptions = 0,
+  kOption1 = 1,
+  kOption2 = 2,
+  kAllOptions = kNoOptions | kOption1 | kOption2
+};
+typedef Flags<Option> Options;
+
+}  // namespace foo
+
+
+DEFINE_OPERATORS_FOR_FLAGS(foo::Options)
+
+}  // namespace
+
+
+TEST(FlagsTest, NamespaceScope) {
+  foo::Options options;
+  options ^= foo::kNoOptions;
+  options |= foo::kOption1 | foo::kOption2;
+  EXPECT_EQ(foo::kAllOptions, static_cast<int>(options));
+}
+
+
+namespace {
+
+struct Foo {
+  enum Enum { kEnum1 = 1, kEnum2 = 2 };
+  typedef Flags<Enum, uint32_t> Enums;
+};
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Foo::Enums)
+
+}  // namespace
+
+
+TEST(FlagsTest, ClassScope) {
+  Foo::Enums enums;
+  enums |= Foo::kEnum1;
+  enums |= Foo::kEnum2;
+  EXPECT_TRUE(enums & Foo::kEnum1);
+  EXPECT_TRUE(enums & Foo::kEnum2);
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/flags.h b/src/base/flags.h
new file mode 100644
index 0000000..f3420ee
--- /dev/null
+++ b/src/base/flags.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_FLAGS_H_
+#define V8_BASE_FLAGS_H_
+
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+// The Flags class provides a type-safe way of storing OR-combinations of enum
+// values. The Flags<T, S> class is a template class, where T is an enum type,
+// and S is the underlying storage type (usually int).
+//
+// The traditional C++ approach for storing OR-combinations of enum values is to
+// use an int or unsigned int variable. The inconvenience with this approach is
+// that there's no type checking at all; any enum value can be OR'd with any
+// other enum value and passed on to a function that takes an int or unsigned
+// int.
+template <typename T, typename S = int>
+class Flags FINAL {
+ public:
+  typedef T flag_type;
+  typedef S mask_type;
+
+  Flags() : mask_(0) {}
+  Flags(flag_type flag) : mask_(flag) {}  // NOLINT(runtime/explicit)
+  explicit Flags(mask_type mask) : mask_(mask) {}
+
+  Flags& operator&=(const Flags& flags) {
+    mask_ &= flags.mask_;
+    return *this;
+  }
+  Flags& operator|=(const Flags& flags) {
+    mask_ |= flags.mask_;
+    return *this;
+  }
+  Flags& operator^=(const Flags& flags) {
+    mask_ ^= flags.mask_;
+    return *this;
+  }
+
+  Flags operator&(const Flags& flags) const { return Flags(*this) &= flags; }
+  Flags operator|(const Flags& flags) const { return Flags(*this) |= flags; }
+  Flags operator^(const Flags& flags) const { return Flags(*this) ^= flags; }
+
+  Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); }
+  Flags& operator|=(flag_type flag) { return operator|=(Flags(flag)); }
+  Flags& operator^=(flag_type flag) { return operator^=(Flags(flag)); }
+
+  Flags operator&(flag_type flag) const { return operator&(Flags(flag)); }
+  Flags operator|(flag_type flag) const { return operator|(Flags(flag)); }
+  Flags operator^(flag_type flag) const { return operator^(Flags(flag)); }
+
+  Flags operator~() const { return Flags(~mask_); }
+
+  operator mask_type() const { return mask_; }
+  bool operator!() const { return !mask_; }
+
+ private:
+  mask_type mask_;
+};
+
+
+#define DEFINE_OPERATORS_FOR_FLAGS(Type)                                       \
+  inline Type operator&(Type::flag_type lhs,                                   \
+                        Type::flag_type rhs)ALLOW_UNUSED WARN_UNUSED_RESULT;   \
+  inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) {            \
+    return Type(lhs) & rhs;                                                    \
+  }                                                                            \
+  inline Type operator&(Type::flag_type lhs,                                   \
+                        const Type& rhs)ALLOW_UNUSED WARN_UNUSED_RESULT;       \
+  inline Type operator&(Type::flag_type lhs, const Type& rhs) {                \
+    return rhs & lhs;                                                          \
+  }                                                                            \
+  inline void operator&(Type::flag_type lhs, Type::mask_type rhs)ALLOW_UNUSED; \
+  inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {}           \
+  inline Type operator|(Type::flag_type lhs, Type::flag_type rhs)              \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) {            \
+    return Type(lhs) | rhs;                                                    \
+  }                                                                            \
+  inline Type operator|(Type::flag_type lhs, const Type& rhs)                  \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator|(Type::flag_type lhs, const Type& rhs) {                \
+    return rhs | lhs;                                                          \
+  }                                                                            \
+  inline void operator|(Type::flag_type lhs, Type::mask_type rhs)              \
+      ALLOW_UNUSED;                                                            \
+  inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {}           \
+  inline Type operator^(Type::flag_type lhs, Type::flag_type rhs)              \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) {            \
+    return Type(lhs) ^ rhs;                                                    \
+  } inline Type operator^(Type::flag_type lhs, const Type& rhs)                \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator^(Type::flag_type lhs, const Type& rhs) {                \
+    return rhs ^ lhs;                                                          \
+  } inline void operator^(Type::flag_type lhs, Type::mask_type rhs)            \
+      ALLOW_UNUSED;                                                            \
+  inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {}
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_FLAGS_H_
diff --git a/src/base/lazy-instance.h b/src/base/lazy-instance.h
new file mode 100644
index 0000000..a20689a
--- /dev/null
+++ b/src/base/lazy-instance.h
@@ -0,0 +1,237 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed.  This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety.  The Type constructor will only ever
+// be called once, even if two threads are racing to create the object.  Get()
+// and Pointer() will always return the same, completely initialized instance.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor.  It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property.  You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance.  It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap.  This may help with the
+// performance of creating the instance, and reducing heap fragmentation.  This
+// requires that Type be a complete type so we can determine the size. See
+// notes for advanced users below for more explanations.
+//
+// Example usage:
+//   static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
+//   void SomeMethod() {
+//     my_instance.Get().SomeMethod();  // MyClass::SomeMethod()
+//
+//     MyClass* ptr = my_instance.Pointer();
+//     ptr->DoDoDo();  // MyClass::DoDoDo
+//   }
+//
+// Additionally you can override the way your instance is constructed by
+// providing your own trait:
+// Example usage:
+//   struct MyCreateTrait {
+//     static void Construct(MyClass* allocated_ptr) {
+//       new (allocated_ptr) MyClass(/* extra parameters... */);
+//     }
+//   };
+//   static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
+//      LAZY_INSTANCE_INITIALIZER;
+//
+// WARNINGS:
+// - This implementation of LazyInstance IS THREAD-SAFE by default. See
+//   SingleThreadInitOnceTrait if you don't care about thread safety.
+// - Lazy initialization comes with a cost. Make sure that you don't use it on
+//   critical path. Consider adding your initialization code to a function
+//   which is explicitly called once.
+//
+// Notes for advanced users:
+// LazyInstance can actually be used in two different ways:
+//
+// - "Static mode" which is the default mode since it is the most efficient
+//   (no extra heap allocation). In this mode, the instance is statically
+//   allocated (stored in the global data section at compile time).
+//   The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
+//   must be used to initialize static lazy instances.
+//
+// - "Dynamic mode". In this mode, the instance is dynamically allocated and
+//   constructed (using new) by default. This mode is useful if you have to
+//   deal with some code already allocating the instance for you (e.g.
+//   OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
+//   The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
+//   dynamic lazy instances.
+
+#ifndef V8_BASE_LAZY_INSTANCE_H_
+#define V8_BASE_LAZY_INSTANCE_H_
+
+#include "src/base/macros.h"
+#include "src/base/once.h"
+
+namespace v8 {
+namespace base {
+
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } }
+#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
+
+// Default to static mode.
+#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+template <typename T>
+struct LeakyInstanceTrait {
+  static void Destroy(T* /* instance */) {}
+};
+
+
+// Traits that define how an instance is allocated and accessed.
+
+
+template <typename T>
+struct StaticallyAllocatedInstanceTrait {
+  // 16-byte alignment fallback to be on the safe side here.
+  struct V8_ALIGNAS(T, 16) StorageType {
+    char x[sizeof(T)];
+  };
+
+  STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T));
+
+  static T* MutableInstance(StorageType* storage) {
+    return reinterpret_cast<T*>(storage);
+  }
+
+  template <typename ConstructTrait>
+  static void InitStorageUsingTrait(StorageType* storage) {
+    ConstructTrait::Construct(MutableInstance(storage));
+  }
+};
+
+
+template <typename T>
+struct DynamicallyAllocatedInstanceTrait {
+  typedef T* StorageType;
+
+  static T* MutableInstance(StorageType* storage) {
+    return *storage;
+  }
+
+  template <typename CreateTrait>
+  static void InitStorageUsingTrait(StorageType* storage) {
+    *storage = CreateTrait::Create();
+  }
+};
+
+
+template <typename T>
+struct DefaultConstructTrait {
+  // Constructs the provided object which was already allocated.
+  static void Construct(T* allocated_ptr) {
+    new(allocated_ptr) T();
+  }
+};
+
+
+template <typename T>
+struct DefaultCreateTrait {
+  static T* Create() {
+    return new T();
+  }
+};
+
+
+struct ThreadSafeInitOnceTrait {
+  template <typename Function, typename Storage>
+  static void Init(OnceType* once, Function function, Storage storage) {
+    CallOnce(once, function, storage);
+  }
+};
+
+
+// Initialization trait for users who don't care about thread-safety.
+struct SingleThreadInitOnceTrait {
+  template <typename Function, typename Storage>
+  static void Init(OnceType* once, Function function, Storage storage) {
+    if (*once == ONCE_STATE_UNINITIALIZED) {
+      function(storage);
+      *once = ONCE_STATE_DONE;
+    }
+  }
+};
+
+
+// TODO(pliard): Handle instances destruction (using global destructors).
+template <typename T, typename AllocationTrait, typename CreateTrait,
+          typename InitOnceTrait, typename DestroyTrait  /* not used yet. */>
+struct LazyInstanceImpl {
+ public:
+  typedef typename AllocationTrait::StorageType StorageType;
+
+ private:
+  static void InitInstance(StorageType* storage) {
+    AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
+  }
+
+  void Init() const {
+    InitOnceTrait::Init(
+        &once_,
+        // Casts to void* are needed here to avoid breaking strict aliasing
+        // rules.
+        reinterpret_cast<void(*)(void*)>(&InitInstance),  // NOLINT
+        reinterpret_cast<void*>(&storage_));
+  }
+
+ public:
+  T* Pointer() {
+    Init();
+    return AllocationTrait::MutableInstance(&storage_);
+  }
+
+  const T& Get() const {
+    Init();
+    return *AllocationTrait::MutableInstance(&storage_);
+  }
+
+  mutable OnceType once_;
+  // Note that the previous field, OnceType, is an AtomicWord which guarantees
+  // 4-byte alignment of the storage field below. If compiling with GCC (>4.2),
+  // the LAZY_ALIGN macro above will guarantee correctness for any alignment.
+  mutable StorageType storage_;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename InitOnceTrait = ThreadSafeInitOnceTrait,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyStaticInstance {
+  typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>,
+      CreateTrait, InitOnceTrait, DestroyTrait> type;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename InitOnceTrait = ThreadSafeInitOnceTrait,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyInstance {
+  // A LazyInstance is a LazyStaticInstance.
+  typedef typename LazyStaticInstance<T, CreateTrait, InitOnceTrait,
+      DestroyTrait>::type type;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultCreateTrait<T>,
+          typename InitOnceTrait = ThreadSafeInitOnceTrait,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyDynamicInstance {
+  typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>,
+      CreateTrait, InitOnceTrait, DestroyTrait> type;
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_LAZY_INSTANCE_H_
diff --git a/src/base/logging.cc b/src/base/logging.cc
new file mode 100644
index 0000000..c3f609f
--- /dev/null
+++ b/src/base/logging.cc
@@ -0,0 +1,88 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+
+#if V8_LIBC_GLIBC || V8_OS_BSD
+# include <cxxabi.h>
+# include <execinfo.h>
+#elif V8_OS_QNX
+# include <backtrace.h>
+#endif  // V8_LIBC_GLIBC || V8_OS_BSD
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+// Attempts to dump a backtrace (if supported).
+void DumpBacktrace() {
+#if V8_LIBC_GLIBC || V8_OS_BSD
+  void* trace[100];
+  int size = backtrace(trace, arraysize(trace));
+  char** symbols = backtrace_symbols(trace, size);
+  OS::PrintError("\n==== C stack trace ===============================\n\n");
+  if (size == 0) {
+    OS::PrintError("(empty)\n");
+  } else if (symbols == NULL) {
+    OS::PrintError("(no symbols)\n");
+  } else {
+    for (int i = 1; i < size; ++i) {
+      OS::PrintError("%2d: ", i);
+      char mangled[201];
+      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
+        int status;
+        size_t length;
+        char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+        OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+        free(demangled);
+      } else {
+        OS::PrintError("??\n");
+      }
+    }
+  }
+  free(symbols);
+#elif V8_OS_QNX
+  char out[1024];
+  bt_accessor_t acc;
+  bt_memmap_t memmap;
+  bt_init_accessor(&acc, BT_SELF);
+  bt_load_memmap(&acc, &memmap);
+  bt_sprn_memmap(&memmap, out, sizeof(out));
+  OS::PrintError(out);
+  bt_addr_t trace[100];
+  int size = bt_get_backtrace(&acc, trace, arraysize(trace));
+  OS::PrintError("\n==== C stack trace ===============================\n\n");
+  if (size == 0) {
+    OS::PrintError("(empty)\n");
+  } else {
+    bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
+                   out, sizeof(out), NULL);
+    OS::PrintError(out);
+  }
+  bt_unload_memmap(&memmap);
+  bt_release_accessor(&acc);
+#endif  // V8_LIBC_GLIBC || V8_OS_BSD
+}
+
+} }  // namespace v8::base
+
+
+// Contains protection against recursive calls (faults while handling faults).
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+  fflush(stdout);
+  fflush(stderr);
+  v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file,
+                           line);
+  va_list arguments;
+  va_start(arguments, format);
+  v8::base::OS::VPrintError(format, arguments);
+  va_end(arguments);
+  v8::base::OS::PrintError("\n#\n");
+  v8::base::DumpBacktrace();
+  fflush(stderr);
+  v8::base::OS::Abort();
+}
diff --git a/src/base/logging.h b/src/base/logging.h
new file mode 100644
index 0000000..8e24bb0
--- /dev/null
+++ b/src/base/logging.h
@@ -0,0 +1,223 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_LOGGING_H_
+#define V8_BASE_LOGGING_H_
+
+#include <string.h>
+
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
+
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
+
+// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
+// development, but they should not be relied on in the final product.
+#ifdef DEBUG
+#define FATAL(msg)                              \
+  V8_Fatal(__FILE__, __LINE__, "%s", (msg))
+#define UNIMPLEMENTED()                         \
+  V8_Fatal(__FILE__, __LINE__, "unimplemented code")
+#define UNREACHABLE()                           \
+  V8_Fatal(__FILE__, __LINE__, "unreachable code")
+#else
+#define FATAL(msg)                              \
+  V8_Fatal("", 0, "%s", (msg))
+#define UNIMPLEMENTED()                         \
+  V8_Fatal("", 0, "unimplemented code")
+#define UNREACHABLE() ((void) 0)
+#endif
+
+
+// The CHECK macro checks that the given condition is true; if not, it
+// prints a message to stderr and aborts.
+#define CHECK(condition) do {                                       \
+    if (!(condition)) {                                             \
+      V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
+    }                                                               \
+  } while (0)
+
+
+// Helper function used by the CHECK_EQ function when given int
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source, int expected,
+                              const char* value_source, int value) {
+  if (expected != value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %i\n#   Found: %i",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+// Helper function used by the CHECK_EQ function when given int64_t
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source,
+                              int64_t expected,
+                              const char* value_source,
+                              int64_t value) {
+  if (expected != value) {
+    // Print int64_t values in hex, as two int32s,
+    // to avoid platform-dependencies.
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#"
+             "   Expected: 0x%08x%08x\n#   Found: 0x%08x%08x",
+             expected_source, value_source,
+             static_cast<uint32_t>(expected >> 32),
+             static_cast<uint32_t>(expected),
+             static_cast<uint32_t>(value >> 32),
+             static_cast<uint32_t>(value));
+  }
+}
+
+
+// Helper function used by the CHECK_NE function when given int
+// arguments.  Should not be called directly.
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* unexpected_source,
+                                 int unexpected,
+                                 const char* value_source,
+                                 int value) {
+  if (unexpected == value) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
+             unexpected_source, value_source, value);
+  }
+}
+
+
+// Helper function used by the CHECK function when given string
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const char* expected,
+                              const char* value_source,
+                              const char* value) {
+  if ((expected == NULL && value != NULL) ||
+      (expected != NULL && value == NULL) ||
+      (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %s\n#   Found: %s",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const char* expected,
+                                 const char* value_source,
+                                 const char* value) {
+  if (expected == value ||
+      (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
+             expected_source, value_source, value);
+  }
+}
+
+
+// Helper function used by the CHECK function when given pointer
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const void* expected,
+                              const char* value_source,
+                              const void* value) {
+  if (expected != value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %p\n#   Found: %p",
+             expected_source, value_source,
+             expected, value);
+  }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const void* expected,
+                                 const char* value_source,
+                                 const void* value) {
+  if (expected == value) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
+             expected_source, value_source, value);
+  }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              int64_t expected,
+                              const char* value_source,
+                              int64_t value) {
+  if (expected == value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
+  #expected, expected, #value, value)
+
+
+#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
+  #unexpected, unexpected, #value, value)
+
+
+#define CHECK_GT(a, b) CHECK((a) > (b))
+#define CHECK_GE(a, b) CHECK((a) >= (b))
+#define CHECK_LT(a, b) CHECK((a) < (b))
+#define CHECK_LE(a, b) CHECK((a) <= (b))
+
+
+namespace v8 {
+namespace base {
+
+// Exposed for making debugging easier (to see where your function is being
+// called, just add a call to DumpBacktrace).
+void DumpBacktrace();
+
+} }  // namespace v8::base
+
+
+// The DCHECK macro is equivalent to CHECK except that it only
+// generates code in debug builds.
+#ifdef DEBUG
+#define DCHECK_RESULT(expr)    CHECK(expr)
+#define DCHECK(condition)      CHECK(condition)
+#define DCHECK_EQ(v1, v2)      CHECK_EQ(v1, v2)
+#define DCHECK_NE(v1, v2)      CHECK_NE(v1, v2)
+#define DCHECK_GE(v1, v2)      CHECK_GE(v1, v2)
+#define DCHECK_LT(v1, v2)      CHECK_LT(v1, v2)
+#define DCHECK_LE(v1, v2)      CHECK_LE(v1, v2)
+#else
+#define DCHECK_RESULT(expr)    (expr)
+#define DCHECK(condition)      ((void) 0)
+#define DCHECK_EQ(v1, v2)      ((void) 0)
+#define DCHECK_NE(v1, v2)      ((void) 0)
+#define DCHECK_GE(v1, v2)      ((void) 0)
+#define DCHECK_LT(v1, v2)      ((void) 0)
+#define DCHECK_LE(v1, v2)      ((void) 0)
+#endif
+
+#define DCHECK_NOT_NULL(p)  DCHECK_NE(NULL, p)
+
+// "Extra checks" are lightweight checks that are enabled in some release
+// builds.
+#ifdef ENABLE_EXTRA_CHECKS
+#define EXTRA_CHECK(condition) CHECK(condition)
+#else
+#define EXTRA_CHECK(condition) ((void) 0)
+#endif
+
+#endif  // V8_BASE_LOGGING_H_
diff --git a/src/base/macros.h b/src/base/macros.h
new file mode 100644
index 0000000..cef088c
--- /dev/null
+++ b/src/base/macros.h
@@ -0,0 +1,411 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_MACROS_H_
+#define V8_BASE_MACROS_H_
+
+#include <cstring>
+
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/logging.h"
+
+
+// The expression OFFSET_OF(type, field) computes the byte-offset
+// of the specified field relative to the containing type. This
+// corresponds to 'offsetof' (in stddef.h), except that it doesn't
+// use 0 or NULL, which causes a problem with the compiler warnings
+// we have enabled (which is also why 'offsetof' doesn't seem to work).
+// Here we simply use the non-zero value 4, which seems to work.
+#define OFFSET_OF(type, field)                                          \
+  (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
+
+
+// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
+// but can be used on anonymous types or types defined inside
+// functions.  It's less safe than arraysize as it accepts some
+// (although not all) pointers.  Therefore, you should use arraysize
+// whenever possible.
+//
+// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
+// size_t.
+//
+// ARRAYSIZE_UNSAFE catches a few type errors.  If you see a compiler error
+//
+//   "warning: division by zero in ..."
+//
+// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
+// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element).  If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array.  Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size.  Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+#define ARRAYSIZE_UNSAFE(a)     \
+  ((sizeof(a) / sizeof(*(a))) / \
+   static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))  // NOLINT
+
+
+#if V8_OS_NACL
+
+// TODO(bmeurer): For some reason, the NaCl toolchain cannot handle the correct
+// definition of arraysize() below, so we have to use the unsafe version for
+// now.
+#define arraysize ARRAYSIZE_UNSAFE
+
+#else  // V8_OS_NACL
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example.  If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+// One caveat is that arraysize() doesn't accept any array of an
+// anonymous type or a type defined inside a function.  In these rare
+// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below.  This is
+// due to a limitation in C++'s template system.  The limitation might
+// eventually be removed, but it hasn't happened yet.
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+
+#if !V8_CC_MSVC
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#endif  // V8_OS_NACL
+
+
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+//   COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+//                  content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+//   COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+#if V8_HAS_CXX11_STATIC_ASSERT
+
+// Under C++11, just use static_assert.
+#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
+
+#else
+
+template <bool>
+struct CompileAssert {};
+
+#define COMPILE_ASSERT(expr, msg)                \
+  typedef CompileAssert<static_cast<bool>(expr)> \
+      msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED
+
+// Implementation details of COMPILE_ASSERT:
+//
+// - COMPILE_ASSERT works by defining an array type that has -1
+//   elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+//     #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+//   does not work, as gcc supports variable-length arrays whose sizes
+//   are determined at run-time (this is gcc's extension and not part
+//   of the C++ standard).  As a result, gcc fails to reject the
+//   following code with the simple definition:
+//
+//     int foo;
+//     COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
+//                               // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+//   expr is a compile-time constant.  (Template arguments must be
+//   determined at compile-time.)
+//
+// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
+//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
+//
+//     CompileAssert<bool(expr)>
+//
+//   instead, these compilers will refuse to compile
+//
+//     COMPILE_ASSERT(5 > 0, some_message);
+//
+//   (They seem to think the ">" in "5 > 0" marks the end of the
+//   template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+//     ((expr) ? 1 : -1).
+//
+//   This is to avoid running into a bug in MS VC 7.1, which
+//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+#endif
+
+
+// bit_cast<Dest,Source> is a template function that implements the
+// equivalent of "*reinterpret_cast<Dest*>(&source)".  We need this in
+// very low-level functions like the protobuf library and fast math
+// support.
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int32>(f);
+//   // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+//   // WRONG
+//   float f = 3.14159265358979;            // WRONG
+//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//
+// The address-casting method actually produces undefined behavior
+// according to ISO C++ specification section 3.10 -15 -.  Roughly, this
+// section says: if an object in memory has one type, and a program
+// accesses it with a different type, then the result is undefined
+// behavior for most values of "different type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f).  And it is particularly true for
+// conversions between integral lvalues and floating-point lvalues.
+//
+// The purpose of 3.10 -15- is to allow optimizing compilers to assume
+// that expressions with different types refer to different memory.  gcc
+// 4.0.1 has an optimizer that takes advantage of this.  So a
+// non-conforming program quietly produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast.  The problem is type
+// punning: holding an object in memory of one type and reading its bits
+// back using a different type.
+//
+// The C++ standard is more subtle and complex than this, but that
+// is the basic idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard,
+// especially by the example in section 3.9 .  Also, of course,
+// bit_cast<> wraps up the nasty logic in one place.
+//
+// Fortunately memcpy() is very fast.  In optimized mode, with a
+// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
+// code with the minimal amount of data movement.  On a 32-bit system,
+// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
+// compiles to two loads and two stores.
+//
+// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
+//
+// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
+// is likely to surprise you.
+template <class Dest, class Source>
+V8_INLINE Dest bit_cast(Source const& source) {
+  COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define DISALLOW_COPY_AND_ASSIGN(TypeName)  \
+  TypeName(const TypeName&) V8_DELETE;      \
+  void operator=(const TypeName&) V8_DELETE
+
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)  \
+  TypeName() V8_DELETE;                           \
+  DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+
+// Newly written code should use V8_INLINE and V8_NOINLINE directly.
+#define INLINE(declarator)    V8_INLINE declarator
+#define NO_INLINE(declarator) V8_NOINLINE declarator
+
+
+// Newly written code should use WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT WARN_UNUSED_RESULT
+
+
+// Define V8_USE_ADDRESS_SANITIZER macros.
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_USE_ADDRESS_SANITIZER 1
+#endif
+#endif
+
+// Define DISABLE_ASAN macros.
+#ifdef V8_USE_ADDRESS_SANITIZER
+#define DISABLE_ASAN __attribute__((no_sanitize_address))
+#else
+#define DISABLE_ASAN
+#endif
+
+
+#if V8_CC_GNU
+#define V8_IMMEDIATE_CRASH() __builtin_trap()
+#else
+#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
+#endif
+
+
+// Use C++11 static_assert if possible, which gives error
+// messages that are easier to understand on first sight.
+#if V8_HAS_CXX11_STATIC_ASSERT
+#define STATIC_ASSERT(test) static_assert(test, #test)
+#else
+// This is inspired by the static assertion facility in boost.  This
+// is pretty magical.  If it causes you trouble on a platform you may
+// find a fix in the boost code.
+template <bool> class StaticAssertion;
+template <> class StaticAssertion<true> { };
+// This macro joins two tokens.  If one of the tokens is a macro the
+// helper call causes it to be resolved before joining.
+#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
+#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
+// Causes an error during compilation of the condition is not
+// statically known to be true.  It is formulated as a typedef so that
+// it can be used wherever a typedef can be used.  Beware that this
+// actually causes each use to introduce a new defined type with a
+// name depending on the source line.
+template <int> class StaticAssertionHelper { };
+#define STATIC_ASSERT(test)                                                    \
+  typedef                                                                     \
+    StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
+    SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED
+
+#endif
+
+
+// The USE(x) template is used to silence C++ compiler warnings
+// issued for (yet) unused variables (typically parameters).
+template <typename T>
+inline void USE(T) { }
+
+
+#define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+
+// Define our own macros for writing 64-bit constants.  This is less fragile
+// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
+// works on compilers that don't have it (like MSVC).
+#if V8_CC_MSVC
+# define V8_UINT64_C(x)   (x ## UI64)
+# define V8_INT64_C(x)    (x ## I64)
+# if V8_HOST_ARCH_64_BIT
+#  define V8_INTPTR_C(x)  (x ## I64)
+#  define V8_PTR_PREFIX   "ll"
+# else
+#  define V8_INTPTR_C(x)  (x)
+#  define V8_PTR_PREFIX   ""
+# endif  // V8_HOST_ARCH_64_BIT
+#elif V8_CC_MINGW64
+# define V8_UINT64_C(x)   (x ## ULL)
+# define V8_INT64_C(x)    (x ## LL)
+# define V8_INTPTR_C(x)   (x ## LL)
+# define V8_PTR_PREFIX    "I64"
+#elif V8_HOST_ARCH_64_BIT
+# if V8_OS_MACOSX
+#  define V8_UINT64_C(x)   (x ## ULL)
+#  define V8_INT64_C(x)    (x ## LL)
+# else
+#  define V8_UINT64_C(x)   (x ## UL)
+#  define V8_INT64_C(x)    (x ## L)
+# endif
+# define V8_INTPTR_C(x)   (x ## L)
+# define V8_PTR_PREFIX    "l"
+#else
+# define V8_UINT64_C(x)   (x ## ULL)
+# define V8_INT64_C(x)    (x ## LL)
+# define V8_INTPTR_C(x)   (x)
+# define V8_PTR_PREFIX    ""
+#endif
+
+#define V8PRIxPTR V8_PTR_PREFIX "x"
+#define V8PRIdPTR V8_PTR_PREFIX "d"
+#define V8PRIuPTR V8_PTR_PREFIX "u"
+
+// Fix for Mac OS X defining uintptr_t as "unsigned long":
+#if V8_OS_MACOSX
+#undef V8PRIxPTR
+#define V8PRIxPTR "lx"
+#endif
+
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+//      write V8_2PART_UINT64_C(0x12345678,90123456);
+#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+
+
+// Compute the 0-relative offset of some absolute value x of type T.
+// This allows conversion of Addresses and integral types into
+// 0-relative int offsets.
+template <typename T>
+inline intptr_t OffsetFrom(T x) {
+  return x - static_cast<T>(0);
+}
+
+
+// Compute the absolute value of type T for some 0-relative offset x.
+// This allows conversion of 0-relative int offsets into Addresses and
+// integral types.
+template <typename T>
+inline T AddressFrom(intptr_t x) {
+  return static_cast<T>(static_cast<T>(0) + x);
+}
+
+
+// Return the largest multiple of m which is <= x.
+template <typename T>
+inline T RoundDown(T x, intptr_t m) {
+  DCHECK(IS_POWER_OF_TWO(m));
+  return AddressFrom<T>(OffsetFrom(x) & -m);
+}
+
+
+// Return the smallest multiple of m which is >= x.
+template <typename T>
+inline T RoundUp(T x, intptr_t m) {
+  return RoundDown<T>(static_cast<T>(x + m - 1), m);
+}
+
+#endif   // V8_BASE_MACROS_H_
diff --git a/src/base/once.cc b/src/base/once.cc
new file mode 100644
index 0000000..eaabf40
--- /dev/null
+++ b/src/base/once.cc
@@ -0,0 +1,53 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/once.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#endif
+
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace base {
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+  AtomicWord state = Acquire_Load(once);
+  // Fast path. The provided function was already executed.
+  if (state == ONCE_STATE_DONE) {
+    return;
+  }
+
+  // The function execution did not complete yet. The once object can be in one
+  // of the two following states:
+  //   - UNINITIALIZED: We are the first thread calling this function.
+  //   - EXECUTING_FUNCTION: Another thread is already executing the function.
+  //
+  // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
+  // atomically.
+  state = Acquire_CompareAndSwap(
+      once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
+  if (state == ONCE_STATE_UNINITIALIZED) {
+    // We are the first thread to call this function, so we have to call the
+    // function.
+    init_func(arg);
+    Release_Store(once, ONCE_STATE_DONE);
+  } else {
+    // Another thread has already started executing the function. We need to
+    // wait until it completes the initialization.
+    while (state == ONCE_STATE_EXECUTING_FUNCTION) {
+#ifdef _WIN32
+      ::Sleep(0);
+#else
+      sched_yield();
+#endif
+      state = Acquire_Load(once);
+    }
+  }
+}
+
+} }  // namespace v8::base
diff --git a/src/base/once.h b/src/base/once.h
new file mode 100644
index 0000000..a8e8437
--- /dev/null
+++ b/src/base/once.h
@@ -0,0 +1,100 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// emulates google3/base/once.h
+//
+// This header is intended to be included only by v8's internal code. Users
+// should not use this directly.
+//
+// This is basically a portable version of pthread_once().
+//
+// This header declares:
+// * A type called OnceType.
+// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type
+//   OnceType.
+// * A function CallOnce(OnceType* once, void (*init_func)()).
+//   This function, when invoked multiple times given the same OnceType object,
+//   will invoke init_func on the first call only, and will make sure none of
+//   the calls return before that first call to init_func has finished.
+//
+// Additionally, the following features are supported:
+// * A macro V8_ONCE_INIT which is expanded into the expression used to
+//   initialize a OnceType. This is only useful when clients embed a OnceType
+//   into a structure of their own and want to initialize it statically.
+// * The user can provide a parameter which CallOnce() forwards to the
+//   user-provided function when it is called. Usage example:
+//     CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10);
+// * This implementation guarantees that OnceType is a POD (i.e. no static
+//   initializer generated).
+//
+// This implements a way to perform lazy initialization.  It's more efficient
+// than using mutexes as no lock is needed if initialization has already
+// happened.
+//
+// Example usage:
+//   void Init();
+//   V8_DECLARE_ONCE(once_init);
+//
+//   // Calls Init() exactly once.
+//   void InitOnce() {
+//     CallOnce(&once_init, &Init);
+//   }
+//
+// Note that if CallOnce() is called before main() has begun, it must
+// only be called by the thread that will eventually call main() -- that is,
+// the thread that performs dynamic initialization.  In general this is a safe
+// assumption since people don't usually construct threads before main() starts,
+// but it is technically not guaranteed.  Unfortunately, Win32 provides no way
+// whatsoever to statically-initialize its synchronization primitives, so our
+// only choice is to assume that dynamic initialization is single-threaded.
+
+#ifndef V8_BASE_ONCE_H_
+#define V8_BASE_ONCE_H_
+
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace base {
+
+typedef AtomicWord OnceType;
+
+#define V8_ONCE_INIT 0
+
+#define V8_DECLARE_ONCE(NAME) ::v8::base::OnceType NAME
+
+enum {
+  ONCE_STATE_UNINITIALIZED = 0,
+  ONCE_STATE_EXECUTING_FUNCTION = 1,
+  ONCE_STATE_DONE = 2
+};
+
+typedef void (*NoArgFunction)();
+typedef void (*PointerArgFunction)(void* arg);
+
+template <typename T>
+struct OneArgFunction {
+  typedef void (*type)(T);
+};
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+
+inline void CallOnce(OnceType* once, NoArgFunction init_func) {
+  if (Acquire_Load(once) != ONCE_STATE_DONE) {
+    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
+  }
+}
+
+
+template <typename Arg>
+inline void CallOnce(OnceType* once,
+    typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
+  if (Acquire_Load(once) != ONCE_STATE_DONE) {
+    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
+        static_cast<void*>(arg));
+  }
+}
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ONCE_H_
diff --git a/src/base/platform/condition-variable-unittest.cc b/src/base/platform/condition-variable-unittest.cc
new file mode 100644
index 0000000..fe0ad2a
--- /dev/null
+++ b/src/base/platform/condition-variable-unittest.cc
@@ -0,0 +1,301 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/condition-variable.h"
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(ConditionVariable, WaitForAfterNofityOnSameThread) {
+  for (int n = 0; n < 10; ++n) {
+    Mutex mutex;
+    ConditionVariable cv;
+
+    LockGuard<Mutex> lock_guard(&mutex);
+
+    cv.NotifyOne();
+    EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+
+    cv.NotifyAll();
+    EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+  }
+}
+
+
+namespace {
+
+class ThreadWithMutexAndConditionVariable FINAL : public Thread {
+ public:
+  ThreadWithMutexAndConditionVariable()
+      : Thread(Options("ThreadWithMutexAndConditionVariable")),
+        running_(false),
+        finished_(false) {}
+  virtual ~ThreadWithMutexAndConditionVariable() {}
+
+  virtual void Run() OVERRIDE {
+    LockGuard<Mutex> lock_guard(&mutex_);
+    running_ = true;
+    cv_.NotifyOne();
+    while (running_) {
+      cv_.Wait(&mutex_);
+    }
+    finished_ = true;
+    cv_.NotifyAll();
+  }
+
+  bool running_;
+  bool finished_;
+  ConditionVariable cv_;
+  Mutex mutex_;
+};
+
+}  // namespace
+
+
+TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
+  static const int kThreadCount = 128;
+  ThreadWithMutexAndConditionVariable threads[kThreadCount];
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_FALSE(threads[n].running_);
+    EXPECT_FALSE(threads[n].finished_);
+    threads[n].Start();
+    // Wait for nth thread to start.
+    while (!threads[n].running_) {
+      threads[n].cv_.Wait(&threads[n].mutex_);
+    }
+  }
+
+  for (int n = kThreadCount - 1; n >= 0; --n) {
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_TRUE(threads[n].running_);
+    EXPECT_FALSE(threads[n].finished_);
+  }
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_TRUE(threads[n].running_);
+    EXPECT_FALSE(threads[n].finished_);
+    // Tell the nth thread to quit.
+    threads[n].running_ = false;
+    threads[n].cv_.NotifyOne();
+  }
+
+  for (int n = kThreadCount - 1; n >= 0; --n) {
+    // Wait for nth thread to quit.
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    while (!threads[n].finished_) {
+      threads[n].cv_.Wait(&threads[n].mutex_);
+    }
+    EXPECT_FALSE(threads[n].running_);
+    EXPECT_TRUE(threads[n].finished_);
+  }
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    threads[n].Join();
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_FALSE(threads[n].running_);
+    EXPECT_TRUE(threads[n].finished_);
+  }
+}
+
+
+namespace {
+
+class ThreadWithSharedMutexAndConditionVariable FINAL : public Thread {
+ public:
+  ThreadWithSharedMutexAndConditionVariable()
+      : Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
+        running_(false),
+        finished_(false),
+        cv_(NULL),
+        mutex_(NULL) {}
+  virtual ~ThreadWithSharedMutexAndConditionVariable() {}
+
+  virtual void Run() OVERRIDE {
+    LockGuard<Mutex> lock_guard(mutex_);
+    running_ = true;
+    cv_->NotifyAll();
+    while (running_) {
+      cv_->Wait(mutex_);
+    }
+    finished_ = true;
+    cv_->NotifyAll();
+  }
+
+  bool running_;
+  bool finished_;
+  ConditionVariable* cv_;
+  Mutex* mutex_;
+};
+
+}  // namespace
+
+
+TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
+  static const int kThreadCount = 128;
+  ThreadWithSharedMutexAndConditionVariable threads[kThreadCount];
+  ConditionVariable cv;
+  Mutex mutex;
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    threads[n].mutex_ = &mutex;
+    threads[n].cv_ = &cv;
+  }
+
+  // Start all threads.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = 0; n < kThreadCount; ++n) {
+      EXPECT_FALSE(threads[n].running_);
+      EXPECT_FALSE(threads[n].finished_);
+      threads[n].Start();
+    }
+  }
+
+  // Wait for all threads to start.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = kThreadCount - 1; n >= 0; --n) {
+      while (!threads[n].running_) {
+        cv.Wait(&mutex);
+      }
+    }
+  }
+
+  // Make sure that all threads are running.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = 0; n < kThreadCount; ++n) {
+      EXPECT_TRUE(threads[n].running_);
+      EXPECT_FALSE(threads[n].finished_);
+    }
+  }
+
+  // Tell all threads to quit.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = kThreadCount - 1; n >= 0; --n) {
+      EXPECT_TRUE(threads[n].running_);
+      EXPECT_FALSE(threads[n].finished_);
+      // Tell the nth thread to quit.
+      threads[n].running_ = false;
+    }
+    cv.NotifyAll();
+  }
+
+  // Wait for all threads to quit.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = 0; n < kThreadCount; ++n) {
+      while (!threads[n].finished_) {
+        cv.Wait(&mutex);
+      }
+    }
+  }
+
+  // Make sure all threads are finished.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = kThreadCount - 1; n >= 0; --n) {
+      EXPECT_FALSE(threads[n].running_);
+      EXPECT_TRUE(threads[n].finished_);
+    }
+  }
+
+  // Join all threads.
+  for (int n = 0; n < kThreadCount; ++n) {
+    threads[n].Join();
+  }
+}
+
+
+namespace {
+
+class LoopIncrementThread FINAL : public Thread {
+ public:
+  LoopIncrementThread(int rem, int* counter, int limit, int thread_count,
+                      ConditionVariable* cv, Mutex* mutex)
+      : Thread(Options("LoopIncrementThread")),
+        rem_(rem),
+        counter_(counter),
+        limit_(limit),
+        thread_count_(thread_count),
+        cv_(cv),
+        mutex_(mutex) {
+    EXPECT_LT(rem, thread_count);
+    EXPECT_EQ(0, limit % thread_count);
+  }
+
+  virtual void Run() OVERRIDE {
+    int last_count = -1;
+    while (true) {
+      LockGuard<Mutex> lock_guard(mutex_);
+      int count = *counter_;
+      while (count % thread_count_ != rem_ && count < limit_) {
+        cv_->Wait(mutex_);
+        count = *counter_;
+      }
+      if (count >= limit_) break;
+      EXPECT_EQ(*counter_, count);
+      if (last_count != -1) {
+        EXPECT_EQ(last_count + (thread_count_ - 1), count);
+      }
+      count++;
+      *counter_ = count;
+      last_count = count;
+      cv_->NotifyAll();
+    }
+  }
+
+ private:
+  const int rem_;
+  int* counter_;
+  const int limit_;
+  const int thread_count_;
+  ConditionVariable* cv_;
+  Mutex* mutex_;
+};
+
+}  // namespace
+
+
+TEST(ConditionVariable, LoopIncrement) {
+  static const int kMaxThreadCount = 16;
+  Mutex mutex;
+  ConditionVariable cv;
+  for (int thread_count = 1; thread_count < kMaxThreadCount; ++thread_count) {
+    int limit = thread_count * 10;
+    int counter = 0;
+
+    // Setup the threads.
+    Thread** threads = new Thread* [thread_count];
+    for (int n = 0; n < thread_count; ++n) {
+      threads[n] = new LoopIncrementThread(n, &counter, limit, thread_count,
+                                           &cv, &mutex);
+    }
+
+    // Start all threads.
+    for (int n = thread_count - 1; n >= 0; --n) {
+      threads[n]->Start();
+    }
+
+    // Join and cleanup all threads.
+    for (int n = 0; n < thread_count; ++n) {
+      threads[n]->Join();
+      delete threads[n];
+    }
+    delete[] threads;
+
+    EXPECT_EQ(limit, counter);
+  }
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/condition-variable.cc b/src/base/platform/condition-variable.cc
new file mode 100644
index 0000000..4547b66
--- /dev/null
+++ b/src/base/platform/condition-variable.cc
@@ -0,0 +1,322 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/condition-variable.h"
+
+#include <errno.h>
+#include <time.h>
+
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_POSIX
+
+ConditionVariable::ConditionVariable() {
+  // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+  // hack to support cross-compiling Chrome for Android in AOSP. Remove
+  // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+     (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+  // On Free/Net/OpenBSD and Linux with glibc we can change the time
+  // source for pthread_cond_timedwait() to use the monotonic clock.
+  pthread_condattr_t attr;
+  int result = pthread_condattr_init(&attr);
+  DCHECK_EQ(0, result);
+  result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+  DCHECK_EQ(0, result);
+  result = pthread_cond_init(&native_handle_, &attr);
+  DCHECK_EQ(0, result);
+  result = pthread_condattr_destroy(&attr);
+#else
+  int result = pthread_cond_init(&native_handle_, NULL);
+#endif
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+ConditionVariable::~ConditionVariable() {
+  int result = pthread_cond_destroy(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void ConditionVariable::NotifyOne() {
+  int result = pthread_cond_signal(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void ConditionVariable::NotifyAll() {
+  int result = pthread_cond_broadcast(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+  mutex->AssertHeldAndUnmark();
+  int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
+  DCHECK_EQ(0, result);
+  USE(result);
+  mutex->AssertUnheldAndMark();
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+  struct timespec ts;
+  int result;
+  mutex->AssertHeldAndUnmark();
+#if V8_OS_MACOSX
+  // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
+  // not depend on the real time clock, which is what you really WANT here!
+  ts = rel_time.ToTimespec();
+  DCHECK_GE(ts.tv_sec, 0);
+  DCHECK_GE(ts.tv_nsec, 0);
+  result = pthread_cond_timedwait_relative_np(
+      &native_handle_, &mutex->native_handle(), &ts);
+#else
+  // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+  // hack to support cross-compiling Chrome for Android in AOSP. Remove
+  // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+     (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+  // On Free/Net/OpenBSD and Linux with glibc we can change the time
+  // source for pthread_cond_timedwait() to use the monotonic clock.
+  result = clock_gettime(CLOCK_MONOTONIC, &ts);
+  DCHECK_EQ(0, result);
+  Time now = Time::FromTimespec(ts);
+#else
+  // The timeout argument to pthread_cond_timedwait() is in absolute time.
+  Time now = Time::NowFromSystemTime();
+#endif
+  Time end_time = now + rel_time;
+  DCHECK_GE(end_time, now);
+  ts = end_time.ToTimespec();
+  result = pthread_cond_timedwait(
+      &native_handle_, &mutex->native_handle(), &ts);
+#endif  // V8_OS_MACOSX
+  mutex->AssertUnheldAndMark();
+  if (result == ETIMEDOUT) {
+    return false;
+  }
+  DCHECK_EQ(0, result);
+  return true;
+}
+
+#elif V8_OS_WIN
+
+struct ConditionVariable::Event {
+  Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
+    DCHECK(handle_ != NULL);
+  }
+
+  ~Event() {
+    BOOL ok = ::CloseHandle(handle_);
+    DCHECK(ok);
+    USE(ok);
+  }
+
+  bool WaitFor(DWORD timeout_ms) {
+    DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
+    if (result == WAIT_OBJECT_0) {
+      return true;
+    }
+    DCHECK(result == WAIT_TIMEOUT);
+    return false;
+  }
+
+  HANDLE handle_;
+  Event* next_;
+  HANDLE thread_;
+  volatile bool notified_;
+};
+
+
+ConditionVariable::NativeHandle::~NativeHandle() {
+  DCHECK(waitlist_ == NULL);
+
+  while (freelist_ != NULL) {
+    Event* event = freelist_;
+    freelist_ = event->next_;
+    delete event;
+  }
+}
+
+
+ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
+  LockGuard<Mutex> lock_guard(&mutex_);
+
+  // Grab an event from the free list or create a new one.
+  Event* event = freelist_;
+  if (event != NULL) {
+    freelist_ = event->next_;
+  } else {
+    event = new Event;
+  }
+  event->thread_ = GetCurrentThread();
+  event->notified_ = false;
+
+#ifdef DEBUG
+  // The event must not be on the wait list.
+  for (Event* we = waitlist_; we != NULL; we = we->next_) {
+    DCHECK_NE(event, we);
+  }
+#endif
+
+  // Prepend the event to the wait list.
+  event->next_ = waitlist_;
+  waitlist_ = event;
+
+  return event;
+}
+
+
+void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
+  LockGuard<Mutex> lock_guard(&mutex_);
+
+  // Remove the event from the wait list.
+  for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
+    DCHECK_NE(NULL, *wep);
+    if (*wep == event) {
+      *wep = event->next_;
+      break;
+    }
+  }
+
+#ifdef DEBUG
+  // The event must not be on the free list.
+  for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
+    DCHECK_NE(event, fe);
+  }
+#endif
+
+  // Reset the event.
+  BOOL ok = ::ResetEvent(event->handle_);
+  DCHECK(ok);
+  USE(ok);
+
+  // Insert the event into the free list.
+  event->next_ = freelist_;
+  freelist_ = event;
+
+  // Forward signals delivered after the timeout to the next waiting event.
+  if (!result && event->notified_ && waitlist_ != NULL) {
+    ok = ::SetEvent(waitlist_->handle_);
+    DCHECK(ok);
+    USE(ok);
+    waitlist_->notified_ = true;
+  }
+}
+
+
+ConditionVariable::ConditionVariable() {}
+
+
+ConditionVariable::~ConditionVariable() {}
+
+
+void ConditionVariable::NotifyOne() {
+  // Notify the thread with the highest priority in the waitlist
+  // that was not already signalled.
+  LockGuard<Mutex> lock_guard(native_handle_.mutex());
+  Event* highest_event = NULL;
+  int highest_priority = std::numeric_limits<int>::min();
+  for (Event* event = native_handle().waitlist();
+       event != NULL;
+       event = event->next_) {
+    if (event->notified_) {
+      continue;
+    }
+    int priority = GetThreadPriority(event->thread_);
+    DCHECK_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
+    if (priority >= highest_priority) {
+      highest_priority = priority;
+      highest_event = event;
+    }
+  }
+  if (highest_event != NULL) {
+    DCHECK(!highest_event->notified_);
+    ::SetEvent(highest_event->handle_);
+    highest_event->notified_ = true;
+  }
+}
+
+
+void ConditionVariable::NotifyAll() {
+  // Notify all threads on the waitlist.
+  LockGuard<Mutex> lock_guard(native_handle_.mutex());
+  for (Event* event = native_handle().waitlist();
+       event != NULL;
+       event = event->next_) {
+    if (!event->notified_) {
+      ::SetEvent(event->handle_);
+      event->notified_ = true;
+    }
+  }
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+  // Create and setup the wait event.
+  Event* event = native_handle_.Pre();
+
+  // Release the user mutex.
+  mutex->Unlock();
+
+  // Wait on the wait event.
+  while (!event->WaitFor(INFINITE))
+    ;
+
+  // Reaquire the user mutex.
+  mutex->Lock();
+
+  // Release the wait event (we must have been notified).
+  DCHECK(event->notified_);
+  native_handle_.Post(event, true);
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+  // Create and setup the wait event.
+  Event* event = native_handle_.Pre();
+
+  // Release the user mutex.
+  mutex->Unlock();
+
+  // Wait on the wait event.
+  TimeTicks now = TimeTicks::Now();
+  TimeTicks end = now + rel_time;
+  bool result = false;
+  while (true) {
+    int64_t msec = (end - now).InMilliseconds();
+    if (msec >= static_cast<int64_t>(INFINITE)) {
+      result = event->WaitFor(INFINITE - 1);
+      if (result) {
+        break;
+      }
+      now = TimeTicks::Now();
+    } else {
+      result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
+      break;
+    }
+  }
+
+  // Reaquire the user mutex.
+  mutex->Lock();
+
+  // Release the wait event.
+  DCHECK(!result || event->notified_);
+  native_handle_.Post(event, result);
+
+  return result;
+}
+
+#endif  // V8_OS_POSIX
+
+} }  // namespace v8::base
diff --git a/src/base/platform/condition-variable.h b/src/base/platform/condition-variable.h
new file mode 100644
index 0000000..b5a6c3f
--- /dev/null
+++ b/src/base/platform/condition-variable.h
@@ -0,0 +1,118 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+#define V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace base {
+
+// Forward declarations.
+class ConditionVariableEvent;
+class TimeDelta;
+
+// -----------------------------------------------------------------------------
+// ConditionVariable
+//
+// This class is a synchronization primitive that can be used to block a thread,
+// or multiple threads at the same time, until:
+// - a notification is received from another thread,
+// - a timeout expires, or
+// - a spurious wakeup occurs
+// Any thread that intends to wait on a ConditionVariable has to acquire a lock
+// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
+// the mutex and suspend the execution of the calling thread. When the condition
+// variable is notified, the thread is awakened, and the mutex is reacquired.
+
+class ConditionVariable FINAL {
+ public:
+  ConditionVariable();
+  ~ConditionVariable();
+
+  // If any threads are waiting on this condition variable, calling
+  // |NotifyOne()| unblocks one of the waiting threads.
+  void NotifyOne();
+
+  // Unblocks all threads currently waiting for this condition variable.
+  void NotifyAll();
+
+  // |Wait()| causes the calling thread to block until the condition variable is
+  // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
+  // the current executing thread, and adds it to the list of threads waiting on
+  // this condition variable. The thread will be unblocked when |NotifyAll()| or
+  // |NotifyOne()| is executed. It may also be unblocked spuriously. When
+  // unblocked, regardless of the reason, the lock on the mutex is reacquired
+  // and |Wait()| exits.
+  void Wait(Mutex* mutex);
+
+  // Atomically releases the mutex, blocks the current executing thread, and
+  // adds it to the list of threads waiting on this condition variable. The
+  // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
+  // or when the relative timeout |rel_time| expires. It may also be unblocked
+  // spuriously. When unblocked, regardless of the reason, the lock on the mutex
+  // is reacquired and |WaitFor()| exits. Returns true if the condition variable
+  // was notified prior to the timeout.
+  bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) WARN_UNUSED_RESULT;
+
+  // The implementation-defined native handle type.
+#if V8_OS_POSIX
+  typedef pthread_cond_t NativeHandle;
+#elif V8_OS_WIN
+  struct Event;
+  class NativeHandle FINAL {
+   public:
+    NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
+    ~NativeHandle();
+
+    Event* Pre() WARN_UNUSED_RESULT;
+    void Post(Event* event, bool result);
+
+    Mutex* mutex() { return &mutex_; }
+    Event* waitlist() { return waitlist_; }
+
+   private:
+    Event* waitlist_;
+    Event* freelist_;
+    Mutex mutex_;
+
+    DISALLOW_COPY_AND_ASSIGN(NativeHandle);
+  };
+#endif
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+
+// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+//   static LazyConditionVariable my_condvar =
+//       LAZY_CONDITION_VARIABLE_INITIALIZER;
+//
+//   void my_function() {
+//     LockGuard<Mutex> lock_guard(&my_mutex);
+//     my_condvar.Pointer()->Wait(&my_mutex);
+//   }
+typedef LazyStaticInstance<
+    ConditionVariable, DefaultConstructTrait<ConditionVariable>,
+    ThreadSafeInitOnceTrait>::type LazyConditionVariable;
+
+#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/src/base/platform/elapsed-timer.h b/src/base/platform/elapsed-timer.h
new file mode 100644
index 0000000..dccba3a
--- /dev/null
+++ b/src/base/platform/elapsed-timer.h
@@ -0,0 +1,97 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_ELAPSED_TIMER_H_
+#define V8_BASE_PLATFORM_ELAPSED_TIMER_H_
+
+#include "src/base/logging.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+class ElapsedTimer FINAL {
+ public:
+#ifdef DEBUG
+  ElapsedTimer() : started_(false) {}
+#endif
+
+  // Starts this timer. Once started a timer can be checked with
+  // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
+  // This method must not be called on an already started timer.
+  void Start() {
+    DCHECK(!IsStarted());
+    start_ticks_ = Now();
+#ifdef DEBUG
+    started_ = true;
+#endif
+    DCHECK(IsStarted());
+  }
+
+  // Stops this timer. Must not be called on a timer that was not
+  // started before.
+  void Stop() {
+    DCHECK(IsStarted());
+    start_ticks_ = TimeTicks();
+#ifdef DEBUG
+    started_ = false;
+#endif
+    DCHECK(!IsStarted());
+  }
+
+  // Returns |true| if this timer was started previously.
+  bool IsStarted() const {
+    DCHECK(started_ || start_ticks_.IsNull());
+    DCHECK(!started_ || !start_ticks_.IsNull());
+    return !start_ticks_.IsNull();
+  }
+
+  // Restarts the timer and returns the time elapsed since the previous start.
+  // This method is equivalent to obtaining the elapsed time with |Elapsed()|
+  // and then starting the timer again, but does so in one single operation,
+  // avoiding the need to obtain the clock value twice. It may only be called
+  // on a previously started timer.
+  TimeDelta Restart() {
+    DCHECK(IsStarted());
+    TimeTicks ticks = Now();
+    TimeDelta elapsed = ticks - start_ticks_;
+    DCHECK(elapsed.InMicroseconds() >= 0);
+    start_ticks_ = ticks;
+    DCHECK(IsStarted());
+    return elapsed;
+  }
+
+  // Returns the time elapsed since the previous start. This method may only
+  // be called on a previously started timer.
+  TimeDelta Elapsed() const {
+    DCHECK(IsStarted());
+    TimeDelta elapsed = Now() - start_ticks_;
+    DCHECK(elapsed.InMicroseconds() >= 0);
+    return elapsed;
+  }
+
+  // Returns |true| if the specified |time_delta| has elapsed since the
+  // previous start, or |false| if not. This method may only be called on
+  // a previously started timer.
+  bool HasExpired(TimeDelta time_delta) const {
+    DCHECK(IsStarted());
+    return Elapsed() >= time_delta;
+  }
+
+ private:
+  static V8_INLINE TimeTicks Now() {
+    TimeTicks now = TimeTicks::HighResolutionNow();
+    DCHECK(!now.IsNull());
+    return now;
+  }
+
+  TimeTicks start_ticks_;
+#ifdef DEBUG
+  bool started_;
+#endif
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_ELAPSED_TIMER_H_
diff --git a/src/base/platform/mutex-unittest.cc b/src/base/platform/mutex-unittest.cc
new file mode 100644
index 0000000..5af5efb
--- /dev/null
+++ b/src/base/platform/mutex-unittest.cc
@@ -0,0 +1,91 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/mutex.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(Mutex, LockGuardMutex) {
+  Mutex mutex;
+  { LockGuard<Mutex> lock_guard(&mutex); }
+  { LockGuard<Mutex> lock_guard(&mutex); }
+}
+
+
+TEST(Mutex, LockGuardRecursiveMutex) {
+  RecursiveMutex recursive_mutex;
+  { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex); }
+  {
+    LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
+    LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
+  }
+}
+
+
+TEST(Mutex, LockGuardLazyMutex) {
+  LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
+  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
+  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
+}
+
+
+TEST(Mutex, LockGuardLazyRecursiveMutex) {
+  LazyRecursiveMutex lazy_recursive_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+  { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer()); }
+  {
+    LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
+    LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
+  }
+}
+
+
+TEST(Mutex, MultipleMutexes) {
+  Mutex mutex1;
+  Mutex mutex2;
+  Mutex mutex3;
+  // Order 1
+  mutex1.Lock();
+  mutex2.Lock();
+  mutex3.Lock();
+  mutex1.Unlock();
+  mutex2.Unlock();
+  mutex3.Unlock();
+  // Order 2
+  mutex1.Lock();
+  mutex2.Lock();
+  mutex3.Lock();
+  mutex3.Unlock();
+  mutex2.Unlock();
+  mutex1.Unlock();
+}
+
+
+TEST(Mutex, MultipleRecursiveMutexes) {
+  RecursiveMutex recursive_mutex1;
+  RecursiveMutex recursive_mutex2;
+  // Order 1
+  recursive_mutex1.Lock();
+  recursive_mutex2.Lock();
+  EXPECT_TRUE(recursive_mutex1.TryLock());
+  EXPECT_TRUE(recursive_mutex2.TryLock());
+  recursive_mutex1.Unlock();
+  recursive_mutex1.Unlock();
+  recursive_mutex2.Unlock();
+  recursive_mutex2.Unlock();
+  // Order 2
+  recursive_mutex1.Lock();
+  EXPECT_TRUE(recursive_mutex1.TryLock());
+  recursive_mutex2.Lock();
+  EXPECT_TRUE(recursive_mutex2.TryLock());
+  recursive_mutex2.Unlock();
+  recursive_mutex1.Unlock();
+  recursive_mutex2.Unlock();
+  recursive_mutex1.Unlock();
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/mutex.cc b/src/base/platform/mutex.cc
new file mode 100644
index 0000000..8b1e305
--- /dev/null
+++ b/src/base/platform/mutex.cc
@@ -0,0 +1,191 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/mutex.h"
+
+#include <errno.h>
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_POSIX
+
+static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
+  int result;
+#if defined(DEBUG)
+  // Use an error checking mutex in debug mode.
+  pthread_mutexattr_t attr;
+  result = pthread_mutexattr_init(&attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+  DCHECK_EQ(0, result);
+  result = pthread_mutex_init(mutex, &attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_destroy(&attr);
+#else
+  // Use a fast mutex (default attributes).
+  result = pthread_mutex_init(mutex, NULL);
+#endif  // defined(DEBUG)
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
+  pthread_mutexattr_t attr;
+  int result = pthread_mutexattr_init(&attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+  DCHECK_EQ(0, result);
+  result = pthread_mutex_init(mutex, &attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_destroy(&attr);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_destroy(mutex);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_lock(mutex);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_unlock(mutex);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_trylock(mutex);
+  if (result == EBUSY) {
+    return false;
+  }
+  DCHECK_EQ(0, result);
+  return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
+  InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
+  InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
+  DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
+  EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
+  LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
+  return TryEnterCriticalSection(cs);
+}
+
+#endif  // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+  InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+  level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+  DestroyNativeHandle(&native_handle_);
+  DCHECK_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+  LockNativeHandle(&native_handle_);
+  AssertUnheldAndMark();
+}
+
+
+void Mutex::Unlock() {
+  AssertHeldAndUnmark();
+  UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+  if (!TryLockNativeHandle(&native_handle_)) {
+    return false;
+  }
+  AssertUnheldAndMark();
+  return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+  InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+  level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+  DestroyNativeHandle(&native_handle_);
+  DCHECK_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+  LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+  DCHECK_LE(0, level_);
+  level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+  DCHECK_LT(0, level_);
+  level_--;
+#endif
+  UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+  if (!TryLockNativeHandle(&native_handle_)) {
+    return false;
+  }
+#ifdef DEBUG
+  DCHECK_LE(0, level_);
+  level_++;
+#endif
+  return true;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/mutex.h b/src/base/platform/mutex.h
new file mode 100644
index 0000000..5d0e57b
--- /dev/null
+++ b/src/base/platform/mutex.h
@@ -0,0 +1,215 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_MUTEX_H_
+#define V8_BASE_PLATFORM_MUTEX_H_
+
+#include "src/base/lazy-instance.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+#include "src/base/logging.h"
+
+#if V8_OS_POSIX
+#include <pthread.h>  // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+//   either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+//   |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+//   attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex FINAL {
+ public:
+  Mutex();
+  ~Mutex();
+
+  // Locks the given mutex. If the mutex is currently unlocked, it becomes
+  // locked and owned by the calling thread, and immediately. If the mutex
+  // is already locked by another thread, suspends the calling thread until
+  // the mutex is unlocked.
+  void Lock();
+
+  // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+  // the calling thread on entrance.
+  void Unlock();
+
+  // Tries to lock the given mutex. Returns whether the mutex was
+  // successfully locked.
+  bool TryLock() WARN_UNUSED_RESULT;
+
+  // The implementation-defined native handle type.
+#if V8_OS_POSIX
+  typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+  typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+#ifdef DEBUG
+  int level_;
+#endif
+
+  V8_INLINE void AssertHeldAndUnmark() {
+#ifdef DEBUG
+    DCHECK_EQ(1, level_);
+    level_--;
+#endif
+  }
+
+  V8_INLINE void AssertUnheldAndMark() {
+#ifdef DEBUG
+    DCHECK_EQ(0, level_);
+    level_++;
+#endif
+  }
+
+  friend class ConditionVariable;
+
+  DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+//   void my_function() {
+//     LockGuard<Mutex> guard(my_mutex.Pointer());
+//     // Do something.
+//   }
+//
+typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
+                           ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+//   when it successfully calls either |Lock()| or |TryLock()|. During this
+//   period, the thread may make additional calls to |Lock()| or |TryLock()|.
+//   The period of ownership ends when the thread makes a matching number of
+//   calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+//   calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+//   they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+//   unspecified, but after that number is reached, calls to |Lock()| will
+//   probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex FINAL {
+ public:
+  RecursiveMutex();
+  ~RecursiveMutex();
+
+  // Locks the mutex. If another thread has already locked the mutex, a call to
+  // |Lock()| will block execution until the lock is acquired. A thread may call
+  // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+  // after the thread makes a matching number of calls to |Unlock()|.
+  // The behavior is undefined if the mutex is not unlocked before being
+  // destroyed, i.e. some thread still owns it.
+  void Lock();
+
+  // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+  // more call to |Lock()| than there were calls to unlock() made by this
+  // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+  // locked by the current thread of execution, otherwise, the behavior is
+  // undefined.
+  void Unlock();
+
+  // Tries to lock the given mutex. Returns whether the mutex was
+  // successfully locked.
+  bool TryLock() WARN_UNUSED_RESULT;
+
+  // The implementation-defined native handle type.
+  typedef Mutex::NativeHandle NativeHandle;
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+#ifdef DEBUG
+  int level_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+//   static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+//   void my_function() {
+//     LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+//     // Do something.
+//   }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+                           DefaultConstructTrait<RecursiveMutex>,
+                           ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard FINAL {
+ public:
+  explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+  ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+  Mutex* mutex_;
+
+  DISALLOW_COPY_AND_ASSIGN(LockGuard);
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_MUTEX_H_
diff --git a/src/base/platform/platform-cygwin.cc b/src/base/platform/platform-cygwin.cc
new file mode 100644
index 0000000..8a767cf
--- /dev/null
+++ b/src/base/platform/platform-cygwin.cc
@@ -0,0 +1,303 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Cygwin goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <errno.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/time.h>
+#include <unistd.h>     // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/win32-headers.h"
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return tzname[0];  // The location of the timezone string on Cygwin.
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  // On Cygwin, struct tm does not contain a tm_gmtoff field.
+  time_t utc = time(NULL);
+  DCHECK(utc != -1);
+  struct tm* loc = localtime(&utc);
+  DCHECK(loc != NULL);
+  // time - localtime includes any daylight savings offset, so subtract it.
+  return static_cast<double>((mktime(loc) - utc) * msPerSecond -
+                             (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddresses> result;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return result;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  // This loop will terminate once the scanning hits an EOF.
+  while (true) {
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/'));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if (c == '/') {
+        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      result.push_back(SharedLibraryAddress(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to set up
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
+  }
+  free(lib_name);
+  fclose(fp);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+  // Nothing to do on Cygwin.
+}
+
+
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+  LPVOID base = NULL;
+
+  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+    // For exectutable pages try and randomize the allocation address
+    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+      base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
+    }
+  }
+
+  // After three attempts give up and let the OS find an address to use.
+  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+  return base;
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size);
+  if (address == NULL) return;
+  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  DCHECK(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != NULL) {
+    request_size = size;
+    DCHECK(base == static_cast<uint8_t*>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size);
+    if (address == NULL) return;
+  }
+  address_ = address;
+  size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address_, size_);
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  DCHECK(IsReserved());
+  return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_NOACCESS)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-freebsd.cc b/src/base/platform/platform-freebsd.cc
new file mode 100644
index 0000000..507b946
--- /dev/null
+++ b/src/base/platform/platform-freebsd.cc
@@ -0,0 +1,307 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for FreeBSD goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+
+#include <sys/fcntl.h>  // open
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // getpagesize
+// If you don't have execinfo.h then you need devel/libexecinfo from ports.
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <strings.h>    // index
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+static unsigned StringToLong(char* buffer) {
+  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  static const int MAP_LENGTH = 1024;
+  int fd = open("/proc/self/maps", O_RDONLY);
+  if (fd < 0) return result;
+  while (true) {
+    char addr_buffer[11];
+    addr_buffer[0] = '0';
+    addr_buffer[1] = 'x';
+    addr_buffer[10] = 0;
+    ssize_t bytes_read = read(fd, addr_buffer + 2, 8);
+    if (bytes_read < 8) break;
+    unsigned start = StringToLong(addr_buffer);
+    bytes_read = read(fd, addr_buffer + 2, 1);
+    if (bytes_read < 1) break;
+    if (addr_buffer[2] != '-') break;
+    bytes_read = read(fd, addr_buffer + 2, 8);
+    if (bytes_read < 8) break;
+    unsigned end = StringToLong(addr_buffer);
+    char buffer[MAP_LENGTH];
+    int bytes_read = -1;
+    do {
+      bytes_read++;
+      if (bytes_read >= MAP_LENGTH - 1)
+        break;
+      bytes_read = read(fd, buffer + bytes_read, 1);
+      if (bytes_read < 1) break;
+    } while (buffer[bytes_read] != '\n');
+    buffer[bytes_read] = 0;
+    // Ignore mappings that are not executable.
+    if (buffer[3] != 'x') continue;
+    char* start_of_path = index(buffer, '/');
+    // There may be no filename in this line.  Skip to next.
+    if (start_of_path == NULL) continue;
+    buffer[bytes_read] = 0;
+    result.push_back(SharedLibraryAddress(start_of_path, start, end));
+  }
+  close(fd);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-linux.cc b/src/base/platform/platform-linux.cc
new file mode 100644
index 0000000..eff5ced
--- /dev/null
+++ b/src/base/platform/platform-linux.cc
@@ -0,0 +1,446 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Linux goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+// Ubuntu Dapper requires memory pages to be marked as
+// executable. Otherwise, OS raises an exception when executing code
+// in that page.
+#include <errno.h>
+#include <fcntl.h>      // open
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // sysconf
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+    (defined(__arm__) || defined(__aarch64__)) && \
+    !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h>  // NOLINT
+#endif
+
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+#if V8_OS_NACL
+#if !defined(MAP_NORESERVE)
+// PNaCL doesn't have this, so we always grab all of the memory, which is bad.
+#define MAP_NORESERVE 0
+#endif
+#else
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+  // We use these as well as a couple of other defines to statically determine
+  // what FP ABI used.
+  // GCC versions 4.4 and below don't support hard-fp.
+  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+  // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000                                          \
+                     + __GNUC_MINOR__ * 100                                    \
+                     + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+  return true;
+#else
+  return false;
+#endif
+
+#elif GCC_VERSION < 40500
+  return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+  return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+      !defined(__VFP_FP__)
+  return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for."          \
+       "Please report it on this issue"                                        \
+       "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif  // def __arm__
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+#if V8_OS_NACL
+  // Missing support for tm_zone field.
+  return "";
+#else
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+#endif
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+#if V8_OS_NACL
+  // Missing support for tm_zone field.
+  return 0;
+#else
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+#endif
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = OS::GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return result;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  // This loop will terminate once the scanning hits an EOF.
+  while (true) {
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if ((c == '/') || (c == '[')) {
+        // Push the '/' or '[' back into the stream to be read below.
+        ungetc(c, fp);
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' or '[' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      result.push_back(SharedLibraryAddress(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to set up
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
+  }
+  free(lib_name);
+  fclose(fp);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+  // Support for ll_prof.py.
+  //
+  // The Linux profiler built into the kernel logs all mmap's with
+  // PROT_EXEC so that analysis tools can properly attribute ticks. We
+  // do a mmap with a name known by ll_prof.py and immediately munmap
+  // it. This injects a GC marker into the stream of events generated
+  // by the kernel and allows us to synchronize V8 code log and the
+  // kernel log.
+  int size = sysconf(_SC_PAGESIZE);
+  FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
+  if (f == NULL) {
+    OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
+    OS::Abort();
+  }
+  void* addr = mmap(OS::GetRandomMmapAddr(), size,
+#if V8_OS_NACL
+                    // The Native Client port of V8 uses an interpreter,
+                    // so code pages don't need PROT_EXEC.
+                    PROT_READ,
+#else
+                    PROT_READ | PROT_EXEC,
+#endif
+                    MAP_PRIVATE, fileno(f), 0);
+  DCHECK(addr != MAP_FAILED);
+  OS::Free(addr, size);
+  fclose(f);
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+#if defined(LEAK_SANITIZER)
+  __lsan_register_root_region(address_, size_);
+#endif
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+#if defined(LEAK_SANITIZER)
+  __lsan_register_root_region(result, size);
+#endif
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+#if V8_OS_NACL
+  // The Native Client port of V8 uses an interpreter,
+  // so code pages don't need PROT_EXEC.
+  int prot = PROT_READ | PROT_WRITE;
+#else
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+#endif
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+#if defined(LEAK_SANITIZER)
+  __lsan_unregister_root_region(base, size);
+#endif
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  return true;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-macos.cc b/src/base/platform/platform-macos.cc
new file mode 100644
index 0000000..77893ee
--- /dev/null
+++ b/src/base/platform/platform-macos.cc
@@ -0,0 +1,310 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for MacOS goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <dlfcn.h>
+#include <mach/mach_init.h>
+#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <AvailabilityMacros.h>
+
+#include <errno.h>
+#include <libkern/OSAtomic.h>
+#include <mach/mach.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <mach/vm_statistics.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(OS::GetRandomMmapAddr(),
+                     msize,
+                     prot,
+                     MAP_PRIVATE | MAP_ANON,
+                     kMmapFd,
+                     kMmapFdOffset);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+          size,
+          PROT_READ | PROT_WRITE,
+          MAP_SHARED,
+          fileno(file),
+          0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  unsigned int images_count = _dyld_image_count();
+  for (unsigned int i = 0; i < images_count; ++i) {
+    const mach_header* header = _dyld_get_image_header(i);
+    if (header == NULL) continue;
+#if V8_HOST_ARCH_X64
+    uint64_t size;
+    char* code_ptr = getsectdatafromheader_64(
+        reinterpret_cast<const mach_header_64*>(header),
+        SEG_TEXT,
+        SECT_TEXT,
+        &size);
+#else
+    unsigned int size;
+    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+#endif
+    if (code_ptr == NULL) continue;
+    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
+    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+    result.push_back(
+        SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
+  }
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+                                 size_t size,
+                                 bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+  return mmap(address,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-openbsd.cc b/src/base/platform/platform-openbsd.cc
new file mode 100644
index 0000000..4e706cb
--- /dev/null
+++ b/src/base/platform/platform-openbsd.cc
@@ -0,0 +1,338 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for OpenBSD and NetBSD goes here. For the
+// POSIX-compatible parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>      // open
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = OS::GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return result;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  // This loop will terminate once the scanning hits an EOF.
+  while (true) {
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/'));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if (c == '/') {
+        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      result.push_back(SharedLibraryAddress(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to set up
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
+  }
+  free(lib_name);
+  fclose(fp);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+  // Support for ll_prof.py.
+  //
+  // The Linux profiler built into the kernel logs all mmap's with
+  // PROT_EXEC so that analysis tools can properly attribute ticks. We
+  // do a mmap with a name known by ll_prof.py and immediately munmap
+  // it. This injects a GC marker into the stream of events generated
+  // by the kernel and allows us to synchronize V8 code log and the
+  // kernel log.
+  int size = sysconf(_SC_PAGESIZE);
+  FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
+  if (f == NULL) {
+    OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
+    OS::Abort();
+  }
+  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+                    fileno(f), 0);
+  DCHECK(addr != MAP_FAILED);
+  OS::Free(addr, size);
+  fclose(f);
+}
+
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc
new file mode 100644
index 0000000..0fc04fc
--- /dev/null
+++ b/src/base/platform/platform-posix.cc
@@ -0,0 +1,678 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for POSIX goes here. This is not a platform on its
+// own, but contains the parts which are the same across the POSIX platforms
+// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
+
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <pthread_np.h>  // for pthread_set_name_np
+#endif
+#include <sched.h>  // for sched_yield
+#include <time.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+    defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/sysctl.h>  // NOLINT, for sysctl
+#endif
+
+#undef MAP_TYPE
+
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define LOG_TAG "v8"
+#include <android/log.h>  // NOLINT
+#endif
+
+#include <cmath>
+#include <cstdlib>
+
+#include "src/base/lazy-instance.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
+
+#ifdef V8_FAST_TLS_SUPPORTED
+#include "src/base/atomicops.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <dlfcn.h>
+#endif
+
+#if V8_OS_LINUX
+#include <sys/prctl.h>  // NOLINT, for prctl
+#endif
+
+#if !V8_OS_NACL
+#include <sys/syscall.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+// 0 is never a valid thread id.
+const pthread_t kNoThread = (pthread_t) 0;
+
+bool g_hard_abort = false;
+
+const char* g_gc_fake_mmap = NULL;
+
+}  // namespace
+
+
+int OS::ActivationFrameAlignment() {
+#if V8_TARGET_ARCH_ARM
+  // On EABI ARM targets this is required for fp correctness in the
+  // runtime system.
+  return 8;
+#elif V8_TARGET_ARCH_MIPS
+  return 8;
+#else
+  // Otherwise we just assume 16 byte alignment, i.e.:
+  // - With gcc 4.4 the tree vectorization optimizer can generate code
+  //   that requires 16 byte alignment such as movdqa on x86.
+  // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
+  //   see "Mac OS X ABI Function Call Guide"
+  return 16;
+#endif
+}
+
+
+intptr_t OS::CommitPageSize() {
+  static intptr_t page_size = getpagesize();
+  return page_size;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): munmap has a return value which is ignored here.
+  int result = munmap(address, size);
+  USE(result);
+  DCHECK(result == 0);
+}
+
+
+// Get rid of writable permission on code allocations.
+void OS::ProtectCode(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+#elif V8_OS_NACL
+  // The Native Client port of V8 uses an interpreter, so
+  // code pages don't need PROT_EXEC.
+  mprotect(address, size, PROT_READ);
+#else
+  mprotect(address, size, PROT_READ | PROT_EXEC);
+#endif
+}
+
+
+// Create guard pages.
+void OS::Guard(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+#else
+  mprotect(address, size, PROT_NONE);
+#endif
+}
+
+
+static LazyInstance<RandomNumberGenerator>::type
+    platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+                    const char* const gc_fake_mmap) {
+  if (random_seed) {
+    platform_random_number_generator.Pointer()->SetSeed(random_seed);
+  }
+  g_hard_abort = hard_abort;
+  g_gc_fake_mmap = gc_fake_mmap;
+}
+
+
+const char* OS::GetGCFakeMMapFile() {
+  return g_gc_fake_mmap;
+}
+
+
+void* OS::GetRandomMmapAddr() {
+#if V8_OS_NACL
+  // TODO(bradchen): restore randomization once Native Client gets
+  // smarter about using mmap address hints.
+  // See http://code.google.com/p/nativeclient/issues/3341
+  return NULL;
+#endif
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+    defined(THREAD_SANITIZER)
+  // Dynamic tools do not support custom mmap addresses.
+  return NULL;
+#endif
+  uintptr_t raw_addr;
+  platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
+                                                        sizeof(raw_addr));
+#if V8_TARGET_ARCH_X64
+  // Currently available CPUs have 48 bits of virtual addressing.  Truncate
+  // the hint address to 46 bits to give the kernel a fighting chance of
+  // fulfilling our placement request.
+  raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+  raw_addr &= 0x3ffff000;
+
+# ifdef __sun
+  // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+  // half of the top half of the address space (that is, the third quarter).
+  // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+  // system will not fail to mmap() because something else happens to already
+  // be mapped at our random address. We deliberately set the hint high enough
+  // to get well above the system's break (that is, the heap); Solaris and
+  // illumos will try the hint and if that fails allocate as if there were
+  // no hint at all. The high hint prevents the break from getting hemmed in
+  // at low values, ceding half of the address space to the system heap.
+  raw_addr += 0x80000000;
+# else
+  // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+  // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+  // 10.6 and 10.7.
+  raw_addr += 0x20000000;
+# endif
+#endif
+  return reinterpret_cast<void*>(raw_addr);
+}
+
+
+size_t OS::AllocateAlignment() {
+  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+}
+
+
+void OS::Sleep(int milliseconds) {
+  useconds_t ms = static_cast<useconds_t>(milliseconds);
+  usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+  if (g_hard_abort) {
+    V8_IMMEDIATE_CRASH();
+  }
+  // Redirect to std abort to signal abnormal program termination.
+  abort();
+}
+
+
+void OS::DebugBreak() {
+#if V8_HOST_ARCH_ARM
+  asm("bkpt 0");
+#elif V8_HOST_ARCH_ARM64
+  asm("brk 0");
+#elif V8_HOST_ARCH_MIPS
+  asm("break");
+#elif V8_HOST_ARCH_MIPS64
+  asm("break");
+#elif V8_HOST_ARCH_IA32
+#if V8_OS_NACL
+  asm("hlt");
+#else
+  asm("int $3");
+#endif  // V8_OS_NACL
+#elif V8_HOST_ARCH_X64
+  asm("int $3");
+#else
+#error Unsupported host architecture.
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// Math functions
+
+double OS::nan_value() {
+  // NAN from math.h is defined in C99 and not in POSIX.
+  return NAN;
+}
+
+
+int OS::GetCurrentProcessId() {
+  return static_cast<int>(getpid());
+}
+
+
+int OS::GetCurrentThreadId() {
+#if defined(ANDROID)
+  return static_cast<int>(syscall(__NR_gettid));
+#elif defined(SYS_gettid)
+  return static_cast<int>(syscall(SYS_gettid));
+#else
+  // PNaCL doesn't have a way to get an integral thread ID, but it doesn't
+  // really matter, because we only need it in PerfJitLogger::LogRecordedBuffer.
+  return 0;
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX date/time support.
+//
+
+int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
+#if V8_OS_NACL
+  // Optionally used in Logger::ResourceEvent.
+  return -1;
+#else
+  struct rusage usage;
+
+  if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
+  *secs = usage.ru_utime.tv_sec;
+  *usecs = usage.ru_utime.tv_usec;
+  return 0;
+#endif
+}
+
+
+double OS::TimeCurrentMillis() {
+  return Time::Now().ToJsTime();
+}
+
+
+class TimezoneCache {};
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+  return NULL;
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+  DCHECK(cache == NULL);
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+  DCHECK(cache == NULL);
+}
+
+
+double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
+  if (std::isnan(time)) return nan_value();
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return nan_value();
+  return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
+}
+
+
+int OS::GetLastError() {
+  return errno;
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX stdio support.
+//
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+  FILE* file = fopen(path, mode);
+  if (file == NULL) return NULL;
+  struct stat file_stat;
+  if (fstat(fileno(file), &file_stat) != 0) return NULL;
+  bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
+  if (is_regular_file) return file;
+  fclose(file);
+  return NULL;
+}
+
+
+bool OS::Remove(const char* path) {
+  return (remove(path) == 0);
+}
+
+
+FILE* OS::OpenTemporaryFile() {
+  return tmpfile();
+}
+
+
+const char* const OS::LogFileOpenMode = "w";
+
+
+void OS::Print(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrint(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+  vprintf(format, args);
+#endif
+}
+
+
+void OS::FPrint(FILE* out, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VFPrint(out, format, args);
+  va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+  vfprintf(out, format, args);
+#endif
+}
+
+
+void OS::PrintError(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+#else
+  vfprintf(stderr, format, args);
+#endif
+}
+
+
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  int result = VSNPrintF(str, length, format, args);
+  va_end(args);
+  return result;
+}
+
+
+int OS::VSNPrintF(char* str,
+                  int length,
+                  const char* format,
+                  va_list args) {
+  int n = vsnprintf(str, length, format, args);
+  if (n < 0 || n >= length) {
+    // If the length is zero, the assignment fails.
+    if (length > 0)
+      str[length - 1] = '\0';
+    return -1;
+  } else {
+    return n;
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX string support.
+//
+
+char* OS::StrChr(char* str, int c) {
+  return strchr(str, c);
+}
+
+
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
+  strncpy(dest, src, n);
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX thread support.
+//
+
+class Thread::PlatformData {
+ public:
+  PlatformData() : thread_(kNoThread) {}
+  pthread_t thread_;  // Thread handle for pthread.
+  // Synchronizes thread creation
+  Mutex thread_creation_mutex_;
+};
+
+Thread::Thread(const Options& options)
+    : data_(new PlatformData),
+      stack_size_(options.stack_size()),
+      start_semaphore_(NULL) {
+  if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
+    stack_size_ = PTHREAD_STACK_MIN;
+  }
+  set_name(options.name());
+}
+
+
+Thread::~Thread() {
+  delete data_;
+}
+
+
+static void SetThreadName(const char* name) {
+#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
+  pthread_set_name_np(pthread_self(), name);
+#elif V8_OS_NETBSD
+  STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
+  pthread_setname_np(pthread_self(), "%s", name);
+#elif V8_OS_MACOSX
+  // pthread_setname_np is only available in 10.6 or later, so test
+  // for it at runtime.
+  int (*dynamic_pthread_setname_np)(const char*);
+  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
+    dlsym(RTLD_DEFAULT, "pthread_setname_np");
+  if (dynamic_pthread_setname_np == NULL)
+    return;
+
+  // Mac OS X does not expose the length limit of the name, so hardcode it.
+  static const int kMaxNameLength = 63;
+  STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
+  dynamic_pthread_setname_np(name);
+#elif defined(PR_SET_NAME)
+  prctl(PR_SET_NAME,
+        reinterpret_cast<unsigned long>(name),  // NOLINT
+        0, 0, 0);
+#endif
+}
+
+
+static void* ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // We take the lock here to make sure that pthread_create finished first since
+  // we don't know which thread will run first (the original thread or the new
+  // one).
+  { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
+  SetThreadName(thread->name());
+  DCHECK(thread->data()->thread_ != kNoThread);
+  thread->NotifyStartedAndRun();
+  return NULL;
+}
+
+
+void Thread::set_name(const char* name) {
+  strncpy(name_, name, sizeof(name_));
+  name_[sizeof(name_) - 1] = '\0';
+}
+
+
+void Thread::Start() {
+  int result;
+  pthread_attr_t attr;
+  memset(&attr, 0, sizeof(attr));
+  result = pthread_attr_init(&attr);
+  DCHECK_EQ(0, result);
+  // Native client uses default stack size.
+#if !V8_OS_NACL
+  if (stack_size_ > 0) {
+    result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+    DCHECK_EQ(0, result);
+  }
+#endif
+  {
+    LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
+    result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+  }
+  DCHECK_EQ(0, result);
+  result = pthread_attr_destroy(&attr);
+  DCHECK_EQ(0, result);
+  DCHECK(data_->thread_ != kNoThread);
+  USE(result);
+}
+
+
+void Thread::Join() {
+  pthread_join(data_->thread_, NULL);
+}
+
+
+void Thread::YieldCPU() {
+  int result = sched_yield();
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
+#if V8_OS_CYGWIN
+  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
+  // because pthread_key_t is a pointer type on Cygwin. This will probably not
+  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
+  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
+  return static_cast<Thread::LocalStorageKey>(ptr_key);
+#else
+  return static_cast<Thread::LocalStorageKey>(pthread_key);
+#endif
+}
+
+
+static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
+#if V8_OS_CYGWIN
+  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+  intptr_t ptr_key = static_cast<intptr_t>(local_key);
+  return reinterpret_cast<pthread_key_t>(ptr_key);
+#else
+  return static_cast<pthread_key_t>(local_key);
+#endif
+}
+
+
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+  const size_t kBufferSize = 128;
+  char buffer[kBufferSize];
+  size_t buffer_size = kBufferSize;
+  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+    V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+  }
+  // The buffer now contains a string of the form XX.YY.ZZ, where
+  // XX is the major kernel version component.
+  // Make sure the buffer is 0-terminated.
+  buffer[kBufferSize - 1] = '\0';
+  char* period_pos = strchr(buffer, '.');
+  *period_pos = '\0';
+  int kernel_version_major =
+      static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
+  // The constants below are taken from pthreads.s from the XNU kernel
+  // sources archive at www.opensource.apple.com.
+  if (kernel_version_major < 11) {
+    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+    // same offsets.
+#if V8_HOST_ARCH_IA32
+    kMacTlsBaseOffset = 0x48;
+#else
+    kMacTlsBaseOffset = 0x60;
+#endif
+  } else {
+    // 11.x.x (Lion) changed the offset.
+    kMacTlsBaseOffset = 0;
+  }
+
+  Release_Store(&tls_base_offset_initialized, 1);
+}
+
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+  void* expected = reinterpret_cast<void*>(0x1234CAFE);
+  Thread::SetThreadLocal(key, expected);
+  void* actual = Thread::GetExistingThreadLocal(key);
+  if (expected != actual) {
+    V8_Fatal(__FILE__, __LINE__,
+             "V8 failed to initialize fast TLS on current kernel");
+  }
+  Thread::SetThreadLocal(key, NULL);
+}
+
+#endif  // V8_FAST_TLS_SUPPORTED
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+  bool check_fast_tls = false;
+  if (tls_base_offset_initialized == 0) {
+    check_fast_tls = true;
+    InitializeTlsBaseOffset();
+  }
+#endif
+  pthread_key_t key;
+  int result = pthread_key_create(&key, NULL);
+  DCHECK_EQ(0, result);
+  USE(result);
+  LocalStorageKey local_key = PthreadKeyToLocalKey(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+  // If we just initialized fast TLS support, make sure it works.
+  if (check_fast_tls) CheckFastTls(local_key);
+#endif
+  return local_key;
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  int result = pthread_key_delete(pthread_key);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  int result = pthread_setspecific(pthread_key, value);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-qnx.cc b/src/base/platform/platform-qnx.cc
new file mode 100644
index 0000000..2cb3228
--- /dev/null
+++ b/src/base/platform/platform-qnx.cc
@@ -0,0 +1,374 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for QNX goes here. For the POSIX-compatible
+// parts the implementation is in platform-posix.cc.
+
+#include <backtrace.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <ucontext.h>
+
+// QNX requires memory pages to be marked as executable.
+// Otherwise, the OS raises an exception when executing code in that page.
+#include <errno.h>
+#include <fcntl.h>      // open
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/procfs.h>
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+// 0 is never a valid thread id on Qnx since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+  // We use these as well as a couple of other defines to statically determine
+  // what FP ABI used.
+  // GCC versions 4.4 and below don't support hard-fp.
+  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+  // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000                                          \
+                     + __GNUC_MINOR__ * 100                                    \
+                     + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+  return true;
+#else
+  return false;
+#endif
+
+#elif GCC_VERSION < 40500
+  return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+  return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+      !defined(__VFP_FP__)
+  return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for."          \
+       "Please report it on this issue"                                        \
+       "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif  // __arm__
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = OS::GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  procfs_mapinfo *mapinfos = NULL, *mapinfo;
+  int proc_fd, num, i;
+
+  struct {
+    procfs_debuginfo info;
+    char buff[PATH_MAX];
+  } map;
+
+  char buf[PATH_MAX + 1];
+  snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
+
+  if ((proc_fd = open(buf, O_RDONLY)) == -1) {
+    close(proc_fd);
+    return result;
+  }
+
+  /* Get the number of map entries.  */
+  if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+    close(proc_fd);
+    return result;
+  }
+
+  mapinfos = reinterpret_cast<procfs_mapinfo *>(
+      malloc(num * sizeof(procfs_mapinfo)));
+  if (mapinfos == NULL) {
+    close(proc_fd);
+    return result;
+  }
+
+  /* Fill the map entries.  */
+  if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
+      mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
+    free(mapinfos);
+    close(proc_fd);
+    return result;
+  }
+
+  for (i = 0; i < num; i++) {
+    mapinfo = mapinfos + i;
+    if (mapinfo->flags & MAP_ELF) {
+      map.info.vaddr = mapinfo->vaddr;
+      if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
+        continue;
+      }
+      result.push_back(SharedLibraryAddress(
+          map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
+    }
+  }
+  free(mapinfos);
+  close(proc_fd);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-solaris.cc b/src/base/platform/platform-solaris.cc
new file mode 100644
index 0000000..b9ef465
--- /dev/null
+++ b/src/base/platform/platform-solaris.cc
@@ -0,0 +1,279 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#ifdef __sparc
+# error "V8 does not support the SPARC CPU architecture."
+#endif
+
+#include <dlfcn.h>  // dladdr
+#include <errno.h>
+#include <ieeefp.h>  // finite()
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>  // sigemptyset(), etc
+#include <sys/mman.h>  // mmap()
+#include <sys/regset.h>
+#include <sys/stack.h>  // for stack alignment
+#include <sys/time.h>  // gettimeofday(), timeradd()
+#include <time.h>
+#include <ucontext.h>  // walkstack(), getcontext()
+#include <unistd.h>  // getpagesize(), usleep()
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+// It seems there is a bug in some Solaris distributions (experienced in
+// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
+// access signbit() despite the availability of other C99 math functions.
+#ifndef signbit
+namespace std {
+// Test sign - usually defined in math.h
+int signbit(double x) {
+  // We need to take care of the special case of both positive and negative
+  // versions of zero.
+  if (x == 0) {
+    return fpclass(x) & FP_NZERO;
+  } else {
+    // This won't detect negative NaN but that should be okay since we don't
+    // assume that behavior.
+    return x < 0;
+  }
+}
+}  // namespace std
+#endif  // signbit
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return tzname[0];  // The location of the timezone string on Solaris.
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  tzset();
+  return -static_cast<double>(timezone * msPerSecond);
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  return std::vector<SharedLibraryAddress>();
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-unittest.cc b/src/base/platform/platform-unittest.cc
new file mode 100644
index 0000000..06fbee0
--- /dev/null
+++ b/src/base/platform/platform-unittest.cc
@@ -0,0 +1,110 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/platform.h"
+
+#if V8_OS_POSIX
+#include <unistd.h>  // NOLINT
+#endif
+
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(OS, GetCurrentProcessId) {
+#if V8_OS_POSIX
+  EXPECT_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
+#endif
+
+#if V8_OS_WIN
+  EXPECT_EQ(static_cast<int>(::GetCurrentProcessId()),
+            OS::GetCurrentProcessId());
+#endif
+}
+
+
+namespace {
+
+class SelfJoinThread FINAL : public Thread {
+ public:
+  SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
+  virtual void Run() OVERRIDE { Join(); }
+};
+
+}  // namespace
+
+
+TEST(Thread, SelfJoin) {
+  SelfJoinThread thread;
+  thread.Start();
+  thread.Join();
+}
+
+
+namespace {
+
+class ThreadLocalStorageTest : public Thread, public ::testing::Test {
+ public:
+  ThreadLocalStorageTest() : Thread(Options("ThreadLocalStorageTest")) {
+    for (size_t i = 0; i < arraysize(keys_); ++i) {
+      keys_[i] = Thread::CreateThreadLocalKey();
+    }
+  }
+  ~ThreadLocalStorageTest() {
+    for (size_t i = 0; i < arraysize(keys_); ++i) {
+      Thread::DeleteThreadLocalKey(keys_[i]);
+    }
+  }
+
+  virtual void Run() FINAL OVERRIDE {
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK(!Thread::HasThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      Thread::SetThreadLocal(keys_[i], GetValue(i));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK(Thread::HasThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK_EQ(GetValue(i), Thread::GetThreadLocal(keys_[i]));
+      CHECK_EQ(GetValue(i), Thread::GetExistingThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      Thread::SetThreadLocal(keys_[i], GetValue(arraysize(keys_) - i - 1));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK(Thread::HasThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
+               Thread::GetThreadLocal(keys_[i]));
+      CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
+               Thread::GetExistingThreadLocal(keys_[i]));
+    }
+  }
+
+ private:
+  static void* GetValue(size_t x) {
+    return reinterpret_cast<void*>(static_cast<uintptr_t>(x + 1));
+  }
+
+  Thread::LocalStorageKey keys_[256];
+};
+
+}  // namespace
+
+
+TEST_F(ThreadLocalStorageTest, DoTest) {
+  Run();
+  Start();
+  Join();
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/platform-win32.cc b/src/base/platform/platform-win32.cc
new file mode 100644
index 0000000..10f89de
--- /dev/null
+++ b/src/base/platform/platform-win32.cc
@@ -0,0 +1,1406 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Win32.
+
+// Secure API functions are not available using MinGW with msvcrt.dll
+// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
+// disable definition of secure API functions in standard headers that
+// would conflict with our own implementation.
+#ifdef __MINGW32__
+#include <_mingw.h>
+#ifdef MINGW_HAS_SECURE_API
+#undef MINGW_HAS_SECURE_API
+#endif  // MINGW_HAS_SECURE_API
+#endif  // __MINGW32__
+
+#ifdef _MSC_VER
+#include <limits>
+#endif
+
+#include "src/base/win32-headers.h"
+
+#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
+
+#ifdef _MSC_VER
+
+// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
+// defined in strings.h.
+int strncasecmp(const char* s1, const char* s2, int n) {
+  return _strnicmp(s1, s2, n);
+}
+
+#endif  // _MSC_VER
+
+
+// Extra functions for MinGW. Most of these are the _s functions which are in
+// the Microsoft Visual Studio C++ CRT.
+#ifdef __MINGW32__
+
+
+#ifndef __MINGW64_VERSION_MAJOR
+
+#define _TRUNCATE 0
+#define STRUNCATE 80
+
+inline void MemoryBarrier() {
+  int barrier = 0;
+  __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
+}
+
+#endif  // __MINGW64_VERSION_MAJOR
+
+
+int localtime_s(tm* out_tm, const time_t* time) {
+  tm* posix_local_time_struct = localtime(time);
+  if (posix_local_time_struct == NULL) return 1;
+  *out_tm = *posix_local_time_struct;
+  return 0;
+}
+
+
+int fopen_s(FILE** pFile, const char* filename, const char* mode) {
+  *pFile = fopen(filename, mode);
+  return *pFile != NULL ? 0 : 1;
+}
+
+int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
+                 const char* format, va_list argptr) {
+  DCHECK(count == _TRUNCATE);
+  return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
+}
+
+
+int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
+  CHECK(source != NULL);
+  CHECK(dest != NULL);
+  CHECK_GT(dest_size, 0);
+
+  if (count == _TRUNCATE) {
+    while (dest_size > 0 && *source != 0) {
+      *(dest++) = *(source++);
+      --dest_size;
+    }
+    if (dest_size == 0) {
+      *(dest - 1) = 0;
+      return STRUNCATE;
+    }
+  } else {
+    while (dest_size > 0 && count > 0 && *source != 0) {
+      *(dest++) = *(source++);
+      --dest_size;
+      --count;
+    }
+  }
+  CHECK_GT(dest_size, 0);
+  *dest = 0;
+  return 0;
+}
+
+#endif  // __MINGW32__
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+bool g_hard_abort = false;
+
+}  // namespace
+
+class TimezoneCache {
+ public:
+  TimezoneCache() : initialized_(false) { }
+
+  void Clear() {
+    initialized_ = false;
+  }
+
+  // Initialize timezone information. The timezone information is obtained from
+  // windows. If we cannot get the timezone information we fall back to CET.
+  void InitializeIfNeeded() {
+    // Just return if timezone information has already been initialized.
+    if (initialized_) return;
+
+    // Initialize POSIX time zone data.
+    _tzset();
+    // Obtain timezone information from operating system.
+    memset(&tzinfo_, 0, sizeof(tzinfo_));
+    if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+      // If we cannot get timezone information we fall back to CET.
+      tzinfo_.Bias = -60;
+      tzinfo_.StandardDate.wMonth = 10;
+      tzinfo_.StandardDate.wDay = 5;
+      tzinfo_.StandardDate.wHour = 3;
+      tzinfo_.StandardBias = 0;
+      tzinfo_.DaylightDate.wMonth = 3;
+      tzinfo_.DaylightDate.wDay = 5;
+      tzinfo_.DaylightDate.wHour = 2;
+      tzinfo_.DaylightBias = -60;
+    }
+
+    // Make standard and DST timezone names.
+    WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+                        std_tz_name_, kTzNameSize, NULL, NULL);
+    std_tz_name_[kTzNameSize - 1] = '\0';
+    WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+                        dst_tz_name_, kTzNameSize, NULL, NULL);
+    dst_tz_name_[kTzNameSize - 1] = '\0';
+
+    // If OS returned empty string or resource id (like "@tzres.dll,-211")
+    // simply guess the name from the UTC bias of the timezone.
+    // To properly resolve the resource identifier requires a library load,
+    // which is not possible in a sandbox.
+    if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+      OS::SNPrintF(std_tz_name_, kTzNameSize - 1,
+                   "%s Standard Time",
+                   GuessTimezoneNameFromBias(tzinfo_.Bias));
+    }
+    if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+      OS::SNPrintF(dst_tz_name_, kTzNameSize - 1,
+                   "%s Daylight Time",
+                   GuessTimezoneNameFromBias(tzinfo_.Bias));
+    }
+    // Timezone information initialized.
+    initialized_ = true;
+  }
+
+  // Guess the name of the timezone from the bias.
+  // The guess is very biased towards the northern hemisphere.
+  const char* GuessTimezoneNameFromBias(int bias) {
+    static const int kHour = 60;
+    switch (-bias) {
+      case -9*kHour: return "Alaska";
+      case -8*kHour: return "Pacific";
+      case -7*kHour: return "Mountain";
+      case -6*kHour: return "Central";
+      case -5*kHour: return "Eastern";
+      case -4*kHour: return "Atlantic";
+      case  0*kHour: return "GMT";
+      case +1*kHour: return "Central Europe";
+      case +2*kHour: return "Eastern Europe";
+      case +3*kHour: return "Russia";
+      case +5*kHour + 30: return "India";
+      case +8*kHour: return "China";
+      case +9*kHour: return "Japan";
+      case +12*kHour: return "New Zealand";
+      default: return "Local";
+    }
+  }
+
+
+ private:
+  static const int kTzNameSize = 128;
+  bool initialized_;
+  char std_tz_name_[kTzNameSize];
+  char dst_tz_name_[kTzNameSize];
+  TIME_ZONE_INFORMATION tzinfo_;
+  friend class Win32Time;
+};
+
+
+// ----------------------------------------------------------------------------
+// The Time class represents time on win32. A timestamp is represented as
+// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
+// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
+// January 1, 1970.
+
+class Win32Time {
+ public:
+  // Constructors.
+  Win32Time();
+  explicit Win32Time(double jstime);
+  Win32Time(int year, int mon, int day, int hour, int min, int sec);
+
+  // Convert timestamp to JavaScript representation.
+  double ToJSTime();
+
+  // Set timestamp to current time.
+  void SetToCurrentTime();
+
+  // Returns the local timezone offset in milliseconds east of UTC. This is
+  // the number of milliseconds you must add to UTC to get local time, i.e.
+  // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
+  // routine also takes into account whether daylight saving is effect
+  // at the time.
+  int64_t LocalOffset(TimezoneCache* cache);
+
+  // Returns the daylight savings time offset for the time in milliseconds.
+  int64_t DaylightSavingsOffset(TimezoneCache* cache);
+
+  // Returns a string identifying the current timezone for the
+  // timestamp taking into account daylight saving.
+  char* LocalTimezone(TimezoneCache* cache);
+
+ private:
+  // Constants for time conversion.
+  static const int64_t kTimeEpoc = 116444736000000000LL;
+  static const int64_t kTimeScaler = 10000;
+  static const int64_t kMsPerMinute = 60000;
+
+  // Constants for timezone information.
+  static const bool kShortTzNames = false;
+
+  // Return whether or not daylight savings time is in effect at this time.
+  bool InDST(TimezoneCache* cache);
+
+  // Accessor for FILETIME representation.
+  FILETIME& ft() { return time_.ft_; }
+
+  // Accessor for integer representation.
+  int64_t& t() { return time_.t_; }
+
+  // Although win32 uses 64-bit integers for representing timestamps,
+  // these are packed into a FILETIME structure. The FILETIME structure
+  // is just a struct representing a 64-bit integer. The TimeStamp union
+  // allows access to both a FILETIME and an integer representation of
+  // the timestamp.
+  union TimeStamp {
+    FILETIME ft_;
+    int64_t t_;
+  };
+
+  TimeStamp time_;
+};
+
+
+// Initialize timestamp to start of epoc.
+Win32Time::Win32Time() {
+  t() = 0;
+}
+
+
+// Initialize timestamp from a JavaScript timestamp.
+Win32Time::Win32Time(double jstime) {
+  t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
+}
+
+
+// Initialize timestamp from date/time components.
+Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
+  SYSTEMTIME st;
+  st.wYear = year;
+  st.wMonth = mon;
+  st.wDay = day;
+  st.wHour = hour;
+  st.wMinute = min;
+  st.wSecond = sec;
+  st.wMilliseconds = 0;
+  SystemTimeToFileTime(&st, &ft());
+}
+
+
+// Convert timestamp to JavaScript timestamp.
+double Win32Time::ToJSTime() {
+  return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
+}
+
+
+// Set timestamp to current time.
+void Win32Time::SetToCurrentTime() {
+  // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+  // Because we're fast, we like fast timers which have at least a
+  // 1ms resolution.
+  //
+  // timeGetTime() provides 1ms granularity when combined with
+  // timeBeginPeriod().  If the host application for v8 wants fast
+  // timers, it can use timeBeginPeriod to increase the resolution.
+  //
+  // Using timeGetTime() has a drawback because it is a 32bit value
+  // and hence rolls-over every ~49days.
+  //
+  // To use the clock, we use GetSystemTimeAsFileTime as our base;
+  // and then use timeGetTime to extrapolate current time from the
+  // start time.  To deal with rollovers, we resync the clock
+  // any time when more than kMaxClockElapsedTime has passed or
+  // whenever timeGetTime creates a rollover.
+
+  static bool initialized = false;
+  static TimeStamp init_time;
+  static DWORD init_ticks;
+  static const int64_t kHundredNanosecondsPerSecond = 10000000;
+  static const int64_t kMaxClockElapsedTime =
+      60*kHundredNanosecondsPerSecond;  // 1 minute
+
+  // If we are uninitialized, we need to resync the clock.
+  bool needs_resync = !initialized;
+
+  // Get the current time.
+  TimeStamp time_now;
+  GetSystemTimeAsFileTime(&time_now.ft_);
+  DWORD ticks_now = timeGetTime();
+
+  // Check if we need to resync due to clock rollover.
+  needs_resync |= ticks_now < init_ticks;
+
+  // Check if we need to resync due to elapsed time.
+  needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+  // Check if we need to resync due to backwards time change.
+  needs_resync |= time_now.t_ < init_time.t_;
+
+  // Resync the clock if necessary.
+  if (needs_resync) {
+    GetSystemTimeAsFileTime(&init_time.ft_);
+    init_ticks = ticks_now = timeGetTime();
+    initialized = true;
+  }
+
+  // Finally, compute the actual time.  Why is this so hard.
+  DWORD elapsed = ticks_now - init_ticks;
+  this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
+// Return the local timezone offset in milliseconds east of UTC. This
+// takes into account whether daylight saving is in effect at the time.
+// Only times in the 32-bit Unix range may be passed to this function.
+// Also, adding the time-zone offset to the input must not overflow.
+// The function EquivalentTime() in date.js guarantees this.
+int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
+  cache->InitializeIfNeeded();
+
+  Win32Time rounded_to_second(*this);
+  rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
+      1000 * kTimeScaler;
+  // Convert to local time using POSIX localtime function.
+  // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
+  // very slow.  Other browsers use localtime().
+
+  // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
+  // POSIX seconds past 1/1/1970 0:00:00.
+  double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
+  if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
+    return 0;
+  }
+  // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
+  time_t posix_time = static_cast<time_t>(unchecked_posix_time);
+
+  // Convert to local time, as struct with fields for day, hour, year, etc.
+  tm posix_local_time_struct;
+  if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
+
+  if (posix_local_time_struct.tm_isdst > 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
+  } else if (posix_local_time_struct.tm_isdst == 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
+  } else {
+    return cache->tzinfo_.Bias * -kMsPerMinute;
+  }
+}
+
+
+// Return whether or not daylight savings time is in effect at this time.
+bool Win32Time::InDST(TimezoneCache* cache) {
+  cache->InitializeIfNeeded();
+
+  // Determine if DST is in effect at the specified time.
+  bool in_dst = false;
+  if (cache->tzinfo_.StandardDate.wMonth != 0 ||
+      cache->tzinfo_.DaylightDate.wMonth != 0) {
+    // Get the local timezone offset for the timestamp in milliseconds.
+    int64_t offset = LocalOffset(cache);
+
+    // Compute the offset for DST. The bias parameters in the timezone info
+    // are specified in minutes. These must be converted to milliseconds.
+    int64_t dstofs =
+        -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
+
+    // If the local time offset equals the timezone bias plus the daylight
+    // bias then DST is in effect.
+    in_dst = offset == dstofs;
+  }
+
+  return in_dst;
+}
+
+
+// Return the daylight savings time offset for this time.
+int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
+  return InDST(cache) ? 60 * kMsPerMinute : 0;
+}
+
+
+// Returns a string identifying the current timezone for the
+// timestamp taking into account daylight saving.
+char* Win32Time::LocalTimezone(TimezoneCache* cache) {
+  // Return the standard or DST time zone name based on whether daylight
+  // saving is in effect at the given time.
+  return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
+}
+
+
+// Returns the accumulated user time for thread.
+int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
+  FILETIME dummy;
+  uint64_t usertime;
+
+  // Get the amount of time that the thread has executed in user mode.
+  if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
+                      reinterpret_cast<FILETIME*>(&usertime))) return -1;
+
+  // Adjust the resolution to micro-seconds.
+  usertime /= 10;
+
+  // Convert to seconds and microseconds
+  *secs = static_cast<uint32_t>(usertime / 1000000);
+  *usecs = static_cast<uint32_t>(usertime % 1000000);
+  return 0;
+}
+
+
+// Returns current time as the number of milliseconds since
+// 00:00:00 UTC, January 1, 1970.
+double OS::TimeCurrentMillis() {
+  return Time::Now().ToJsTime();
+}
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+  return new TimezoneCache();
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+  delete cache;
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+  cache->Clear();
+}
+
+
+// Returns a string identifying the current timezone taking into
+// account daylight saving.
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  return Win32Time(time).LocalTimezone(cache);
+}
+
+
+// Returns the local time offset in milliseconds east of UTC without
+// taking daylight savings time into account.
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  // Use current time, rounded to the millisecond.
+  Win32Time t(TimeCurrentMillis());
+  // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
+  return static_cast<double>(t.LocalOffset(cache) -
+                             t.DaylightSavingsOffset(cache));
+}
+
+
+// Returns the daylight savings offset in milliseconds for the given
+// time.
+double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
+  int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
+  return static_cast<double>(offset);
+}
+
+
+int OS::GetLastError() {
+  return ::GetLastError();
+}
+
+
+int OS::GetCurrentProcessId() {
+  return static_cast<int>(::GetCurrentProcessId());
+}
+
+
+int OS::GetCurrentThreadId() {
+  return static_cast<int>(::GetCurrentThreadId());
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 console output.
+//
+// If a Win32 application is linked as a console application it has a normal
+// standard output and standard error. In this case normal printf works fine
+// for output. However, if the application is linked as a GUI application,
+// the process doesn't have a console, and therefore (debugging) output is lost.
+// This is the case if we are embedded in a windows program (like a browser).
+// In order to be able to get debug output in this case the the debugging
+// facility using OutputDebugString. This output goes to the active debugger
+// for the process (if any). Else the output can be monitored using DBMON.EXE.
+
+enum OutputMode {
+  UNKNOWN,  // Output method has not yet been determined.
+  CONSOLE,  // Output is written to stdout.
+  ODS       // Output is written to debug facility.
+};
+
+static OutputMode output_mode = UNKNOWN;  // Current output mode.
+
+
+// Determine if the process has a console for output.
+static bool HasConsole() {
+  // Only check the first time. Eventual race conditions are not a problem,
+  // because all threads will eventually determine the same mode.
+  if (output_mode == UNKNOWN) {
+    // We cannot just check that the standard output is attached to a console
+    // because this would fail if output is redirected to a file. Therefore we
+    // say that a process does not have an output console if either the
+    // standard output handle is invalid or its file type is unknown.
+    if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
+        GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
+      output_mode = CONSOLE;
+    else
+      output_mode = ODS;
+  }
+  return output_mode == CONSOLE;
+}
+
+
+static void VPrintHelper(FILE* stream, const char* format, va_list args) {
+  if ((stream == stdout || stream == stderr) && !HasConsole()) {
+    // It is important to use safe print here in order to avoid
+    // overflowing the buffer. We might truncate the output, but this
+    // does not crash.
+    char buffer[4096];
+    OS::VSNPrintF(buffer, sizeof(buffer), format, args);
+    OutputDebugStringA(buffer);
+  } else {
+    vfprintf(stream, format, args);
+  }
+}
+
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+  FILE* result;
+  if (fopen_s(&result, path, mode) == 0) {
+    return result;
+  } else {
+    return NULL;
+  }
+}
+
+
+bool OS::Remove(const char* path) {
+  return (DeleteFileA(path) != 0);
+}
+
+
+FILE* OS::OpenTemporaryFile() {
+  // tmpfile_s tries to use the root dir, don't use it.
+  char tempPathBuffer[MAX_PATH];
+  DWORD path_result = 0;
+  path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
+  if (path_result > MAX_PATH || path_result == 0) return NULL;
+  UINT name_result = 0;
+  char tempNameBuffer[MAX_PATH];
+  name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
+  if (name_result == 0) return NULL;
+  FILE* result = FOpen(tempNameBuffer, "w+");  // Same mode as tmpfile uses.
+  if (result != NULL) {
+    Remove(tempNameBuffer);  // Delete on close.
+  }
+  return result;
+}
+
+
+// Open log file in binary mode to avoid /n -> /r/n conversion.
+const char* const OS::LogFileOpenMode = "wb";
+
+
+// Print (debug) message to console.
+void OS::Print(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrint(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+  VPrintHelper(stdout, format, args);
+}
+
+
+void OS::FPrint(FILE* out, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VFPrint(out, format, args);
+  va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+  VPrintHelper(out, format, args);
+}
+
+
+// Print error message to console.
+void OS::PrintError(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+  VPrintHelper(stderr, format, args);
+}
+
+
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  int result = VSNPrintF(str, length, format, args);
+  va_end(args);
+  return result;
+}
+
+
+int OS::VSNPrintF(char* str, int length, const char* format, va_list args) {
+  int n = _vsnprintf_s(str, length, _TRUNCATE, format, args);
+  // Make sure to zero-terminate the string if the output was
+  // truncated or if there was an error.
+  if (n < 0 || n >= length) {
+    if (length > 0)
+      str[length - 1] = '\0';
+    return -1;
+  } else {
+    return n;
+  }
+}
+
+
+char* OS::StrChr(char* str, int c) {
+  return const_cast<char*>(strchr(str, c));
+}
+
+
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
+  // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
+  size_t buffer_size = static_cast<size_t>(length);
+  if (n + 1 > buffer_size)  // count for trailing '\0'
+    n = _TRUNCATE;
+  int result = strncpy_s(dest, length, src, n);
+  USE(result);
+  DCHECK(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
+}
+
+
+#undef _TRUNCATE
+#undef STRUNCATE
+
+
+// Get the system's page size used by VirtualAlloc() or the next power
+// of two. The reason for always returning a power of two is that the
+// rounding up in OS::Allocate expects that.
+static size_t GetPageSize() {
+  static size_t page_size = 0;
+  if (page_size == 0) {
+    SYSTEM_INFO info;
+    GetSystemInfo(&info);
+    page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize);
+  }
+  return page_size;
+}
+
+
+// The allocation alignment is the guaranteed alignment for
+// VirtualAlloc'ed blocks of memory.
+size_t OS::AllocateAlignment() {
+  static size_t allocate_alignment = 0;
+  if (allocate_alignment == 0) {
+    SYSTEM_INFO info;
+    GetSystemInfo(&info);
+    allocate_alignment = info.dwAllocationGranularity;
+  }
+  return allocate_alignment;
+}
+
+
+static LazyInstance<RandomNumberGenerator>::type
+    platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+                    const char* const gc_fake_mmap) {
+  if (random_seed) {
+    platform_random_number_generator.Pointer()->SetSeed(random_seed);
+  }
+  g_hard_abort = hard_abort;
+}
+
+
+void* OS::GetRandomMmapAddr() {
+  // The address range used to randomize RWX allocations in OS::Allocate
+  // Try not to map pages into the default range that windows loads DLLs
+  // Use a multiple of 64k to prevent committing unused memory.
+  // Note: This does not guarantee RWX regions will be within the
+  // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+  static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+  static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+  static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+  static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+  uintptr_t address =
+      (platform_random_number_generator.Pointer()->NextInt() << kPageSizeBits) |
+      kAllocationRandomAddressMin;
+  address &= kAllocationRandomAddressMax;
+  return reinterpret_cast<void *>(address);
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+  LPVOID base = NULL;
+
+  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+    // For exectutable pages try and randomize the allocation address
+    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+      base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
+    }
+  }
+
+  // After three attempts give up and let the OS find an address to use.
+  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+  return base;
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  // VirtualAlloc rounds allocated size to page size automatically.
+  size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
+
+  // Windows XP SP2 allows Data Excution Prevention (DEP).
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+
+  LPVOID mbase = RandomizedVirtualAlloc(msize,
+                                        MEM_COMMIT | MEM_RESERVE,
+                                        prot);
+
+  if (mbase == NULL) return NULL;
+
+  DCHECK((reinterpret_cast<uintptr_t>(mbase) % OS::AllocateAlignment()) == 0);
+
+  *allocated = msize;
+  return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): VirtualFree has a return value which is ignored here.
+  VirtualFree(address, 0, MEM_RELEASE);
+  USE(size);
+}
+
+
+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
+void OS::ProtectCode(void* address, const size_t size) {
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+}
+
+
+void OS::Guard(void* address, const size_t size) {
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+}
+
+
+void OS::Sleep(int milliseconds) {
+  ::Sleep(milliseconds);
+}
+
+
+void OS::Abort() {
+  if (g_hard_abort) {
+    V8_IMMEDIATE_CRASH();
+  }
+  // Make the MSVCRT do a silent abort.
+  raise(SIGABRT);
+}
+
+
+void OS::DebugBreak() {
+#ifdef _MSC_VER
+  // To avoid Visual Studio runtime support the following code can be used
+  // instead
+  // __asm { int 3 }
+  __debugbreak();
+#else
+  ::DebugBreak();
+#endif
+}
+
+
+class Win32MemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  Win32MemoryMappedFile(HANDLE file,
+                        HANDLE file_mapping,
+                        void* memory,
+                        int size)
+      : file_(file),
+        file_mapping_(file_mapping),
+        memory_(memory),
+        size_(size) { }
+  virtual ~Win32MemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  HANDLE file_;
+  HANDLE file_mapping_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  // Open a physical file
+  HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+      FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+  if (file == INVALID_HANDLE_VALUE) return NULL;
+
+  int size = static_cast<int>(GetFileSize(file, NULL));
+
+  // Create a file mapping for the physical file
+  HANDLE file_mapping = CreateFileMapping(file, NULL,
+      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+  if (file_mapping == NULL) return NULL;
+
+  // Map a view of the file into memory
+  void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+  return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  // Open a physical file
+  HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+      FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
+  if (file == NULL) return NULL;
+  // Create a file mapping for the physical file
+  HANDLE file_mapping = CreateFileMapping(file, NULL,
+      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+  if (file_mapping == NULL) return NULL;
+  // Map a view of the file into memory
+  void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+  if (memory) memmove(memory, initial, size);
+  return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
+Win32MemoryMappedFile::~Win32MemoryMappedFile() {
+  if (memory_ != NULL)
+    UnmapViewOfFile(memory_);
+  CloseHandle(file_mapping_);
+  CloseHandle(file_);
+}
+
+
+// The following code loads functions defined in DbhHelp.h and TlHelp32.h
+// dynamically. This is to avoid being depending on dbghelp.dll and
+// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
+// kernel32.dll at some point so loading functions defines in TlHelp32.h
+// dynamically might not be necessary any more - for some versions of Windows?).
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define DBGHELP_FUNCTION_LIST(V)  \
+  V(SymInitialize)                \
+  V(SymGetOptions)                \
+  V(SymSetOptions)                \
+  V(SymGetSearchPath)             \
+  V(SymLoadModule64)              \
+  V(StackWalk64)                  \
+  V(SymGetSymFromAddr64)          \
+  V(SymGetLineFromAddr64)         \
+  V(SymFunctionTableAccess64)     \
+  V(SymGetModuleBase64)
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define TLHELP32_FUNCTION_LIST(V)  \
+  V(CreateToolhelp32Snapshot)      \
+  V(Module32FirstW)                \
+  V(Module32NextW)
+
+// Define the decoration to use for the type and variable name used for
+// dynamically loaded DLL function..
+#define DLL_FUNC_TYPE(name) _##name##_
+#define DLL_FUNC_VAR(name) _##name
+
+// Define the type for each dynamically loaded DLL function. The function
+// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
+// from the Windows include files are redefined here to have the function
+// definitions to be as close to the ones in the original .h files as possible.
+#ifndef IN
+#define IN
+#endif
+#ifndef VOID
+#define VOID void
+#endif
+
+// DbgHelp isn't supported on MinGW yet
+#ifndef __MINGW32__
+// DbgHelp.h functions.
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
+                                                       IN PSTR UserSearchPath,
+                                                       IN BOOL fInvadeProcess);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
+    IN HANDLE hProcess,
+    OUT PSTR SearchPath,
+    IN DWORD SearchPathLength);
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
+    IN HANDLE hProcess,
+    IN HANDLE hFile,
+    IN PSTR ImageName,
+    IN PSTR ModuleName,
+    IN DWORD64 BaseOfDll,
+    IN DWORD SizeOfDll);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
+    DWORD MachineType,
+    HANDLE hProcess,
+    HANDLE hThread,
+    LPSTACKFRAME64 StackFrame,
+    PVOID ContextRecord,
+    PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+    PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+    PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+    PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
+    IN HANDLE hProcess,
+    IN DWORD64 qwAddr,
+    OUT PDWORD64 pdwDisplacement,
+    OUT PIMAGEHLP_SYMBOL64 Symbol);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
+    IN HANDLE hProcess,
+    IN DWORD64 qwAddr,
+    OUT PDWORD pdwDisplacement,
+    OUT PIMAGEHLP_LINE64 Line64);
+// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
+typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
+    HANDLE hProcess,
+    DWORD64 AddrBase);  // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
+    HANDLE hProcess,
+    DWORD64 AddrBase);  // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
+
+// TlHelp32.h functions.
+typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
+    DWORD dwFlags,
+    DWORD th32ProcessID);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
+                                                        LPMODULEENTRY32W lpme);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
+                                                       LPMODULEENTRY32W lpme);
+
+#undef IN
+#undef VOID
+
+// Declare a variable for each dynamically loaded DLL function.
+#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
+DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
+TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
+#undef DEF_DLL_FUNCTION
+
+// Load the functions. This function has a lot of "ugly" macros in order to
+// keep down code duplication.
+
+static bool LoadDbgHelpAndTlHelp32() {
+  static bool dbghelp_loaded = false;
+
+  if (dbghelp_loaded) return true;
+
+  HMODULE module;
+
+  // Load functions from the dbghelp.dll module.
+  module = LoadLibrary(TEXT("dbghelp.dll"));
+  if (module == NULL) {
+    return false;
+  }
+
+#define LOAD_DLL_FUNC(name)                                                 \
+  DLL_FUNC_VAR(name) =                                                      \
+      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+  // Load functions from the kernel32.dll module (the TlHelp32.h function used
+  // to be in tlhelp32.dll but are now moved to kernel32.dll).
+  module = LoadLibrary(TEXT("kernel32.dll"));
+  if (module == NULL) {
+    return false;
+  }
+
+#define LOAD_DLL_FUNC(name)                                                 \
+  DLL_FUNC_VAR(name) =                                                      \
+      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+  // Check that all functions where loaded.
+  bool result =
+#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
+
+DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
+TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
+
+#undef DLL_FUNC_LOADED
+  true;
+
+  dbghelp_loaded = result;
+  return result;
+  // NOTE: The modules are never unloaded and will stay around until the
+  // application is closed.
+}
+
+#undef DBGHELP_FUNCTION_LIST
+#undef TLHELP32_FUNCTION_LIST
+#undef DLL_FUNC_VAR
+#undef DLL_FUNC_TYPE
+
+
+// Load the symbols for generating stack traces.
+static std::vector<OS::SharedLibraryAddress> LoadSymbols(
+    HANDLE process_handle) {
+  static std::vector<OS::SharedLibraryAddress> result;
+
+  static bool symbols_loaded = false;
+
+  if (symbols_loaded) return result;
+
+  BOOL ok;
+
+  // Initialize the symbol engine.
+  ok = _SymInitialize(process_handle,  // hProcess
+                      NULL,            // UserSearchPath
+                      false);          // fInvadeProcess
+  if (!ok) return result;
+
+  DWORD options = _SymGetOptions();
+  options |= SYMOPT_LOAD_LINES;
+  options |= SYMOPT_FAIL_CRITICAL_ERRORS;
+  options = _SymSetOptions(options);
+
+  char buf[OS::kStackWalkMaxNameLen] = {0};
+  ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
+  if (!ok) {
+    int err = GetLastError();
+    OS::Print("%d\n", err);
+    return result;
+  }
+
+  HANDLE snapshot = _CreateToolhelp32Snapshot(
+      TH32CS_SNAPMODULE,       // dwFlags
+      GetCurrentProcessId());  // th32ProcessId
+  if (snapshot == INVALID_HANDLE_VALUE) return result;
+  MODULEENTRY32W module_entry;
+  module_entry.dwSize = sizeof(module_entry);  // Set the size of the structure.
+  BOOL cont = _Module32FirstW(snapshot, &module_entry);
+  while (cont) {
+    DWORD64 base;
+    // NOTE the SymLoadModule64 function has the peculiarity of accepting a
+    // both unicode and ASCII strings even though the parameter is PSTR.
+    base = _SymLoadModule64(
+        process_handle,                                       // hProcess
+        0,                                                    // hFile
+        reinterpret_cast<PSTR>(module_entry.szExePath),       // ImageName
+        reinterpret_cast<PSTR>(module_entry.szModule),        // ModuleName
+        reinterpret_cast<DWORD64>(module_entry.modBaseAddr),  // BaseOfDll
+        module_entry.modBaseSize);                            // SizeOfDll
+    if (base == 0) {
+      int err = GetLastError();
+      if (err != ERROR_MOD_NOT_FOUND &&
+          err != ERROR_INVALID_HANDLE) {
+        result.clear();
+        return result;
+      }
+    }
+    int lib_name_length = WideCharToMultiByte(
+        CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
+    std::string lib_name(lib_name_length, 0);
+    WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
+                        lib_name_length, NULL, NULL);
+    result.push_back(OS::SharedLibraryAddress(
+        lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
+        reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
+                                       module_entry.modBaseSize)));
+    cont = _Module32NextW(snapshot, &module_entry);
+  }
+  CloseHandle(snapshot);
+
+  symbols_loaded = true;
+  return result;
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  // SharedLibraryEvents are logged when loading symbol information.
+  // Only the shared libraries loaded at the time of the call to
+  // GetSharedLibraryAddresses are logged.  DLLs loaded after
+  // initialization are not accounted for.
+  if (!LoadDbgHelpAndTlHelp32()) return std::vector<OS::SharedLibraryAddress>();
+  HANDLE process_handle = GetCurrentProcess();
+  return LoadSymbols(process_handle);
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+#else  // __MINGW32__
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  return std::vector<OS::SharedLibraryAddress>();
+}
+
+
+void OS::SignalCodeMovingGC() { }
+#endif  // __MINGW32__
+
+
+double OS::nan_value() {
+#ifdef _MSC_VER
+  return std::numeric_limits<double>::quiet_NaN();
+#else  // _MSC_VER
+  return NAN;
+#endif  // _MSC_VER
+}
+
+
+int OS::ActivationFrameAlignment() {
+#ifdef _WIN64
+  return 16;  // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#elif defined(__MINGW32__)
+  // With gcc 4.4 the tree vectorization optimizer can generate code
+  // that requires 16 byte alignment such as movdqa on x86.
+  return 16;
+#else
+  return 8;  // Floating-point math runs faster with 8-byte alignment.
+#endif
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size);
+  if (address == NULL) return;
+  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  DCHECK(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != NULL) {
+    request_size = size;
+    DCHECK(base == static_cast<uint8_t*>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size);
+    if (address == NULL) return;
+  }
+  address_ = address;
+  size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  DCHECK(IsReserved());
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_NOACCESS)) {
+    return false;
+  }
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 thread support.
+
+// Definition of invalid thread handle and id.
+static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
+
+// Entry point for threads. The supplied argument is a pointer to the thread
+// object. The entry function dispatches to the run method in the thread
+// object. It is important that this function has __stdcall calling
+// convention.
+static unsigned int __stdcall ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  thread->NotifyStartedAndRun();
+  return 0;
+}
+
+
+class Thread::PlatformData {
+ public:
+  explicit PlatformData(HANDLE thread) : thread_(thread) {}
+  HANDLE thread_;
+  unsigned thread_id_;
+};
+
+
+// Initialize a Win32 thread object. The thread has an invalid thread
+// handle until it is started.
+
+Thread::Thread(const Options& options)
+    : stack_size_(options.stack_size()),
+      start_semaphore_(NULL) {
+  data_ = new PlatformData(kNoThread);
+  set_name(options.name());
+}
+
+
+void Thread::set_name(const char* name) {
+  OS::StrNCpy(name_, sizeof(name_), name, strlen(name));
+  name_[sizeof(name_) - 1] = '\0';
+}
+
+
+// Close our own handle for the thread.
+Thread::~Thread() {
+  if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
+  delete data_;
+}
+
+
+// Create a new thread. It is important to use _beginthreadex() instead of
+// the Win32 function CreateThread(), because the CreateThread() does not
+// initialize thread specific structures in the C runtime library.
+void Thread::Start() {
+  data_->thread_ = reinterpret_cast<HANDLE>(
+      _beginthreadex(NULL,
+                     static_cast<unsigned>(stack_size_),
+                     ThreadEntry,
+                     this,
+                     0,
+                     &data_->thread_id_));
+}
+
+
+// Wait for thread to terminate.
+void Thread::Join() {
+  if (data_->thread_id_ != GetCurrentThreadId()) {
+    WaitForSingleObject(data_->thread_, INFINITE);
+  }
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  DWORD result = TlsAlloc();
+  DCHECK(result != TLS_OUT_OF_INDEXES);
+  return static_cast<LocalStorageKey>(result);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  BOOL result = TlsFree(static_cast<DWORD>(key));
+  USE(result);
+  DCHECK(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  return TlsGetValue(static_cast<DWORD>(key));
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
+  USE(result);
+  DCHECK(result);
+}
+
+
+
+void Thread::YieldCPU() {
+  Sleep(0);
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform.h b/src/base/platform/platform.h
new file mode 100644
index 0000000..9e20c08
--- /dev/null
+++ b/src/base/platform/platform.h
@@ -0,0 +1,516 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module contains the platform-specific code. This make the rest of the
+// code less dependent on operating system, compilers and runtime libraries.
+// This module does specifically not deal with differences between different
+// processor architecture.
+// The platform classes have the same definition for all platforms. The
+// implementation for a particular platform is put in platform_<os>.cc.
+// The build system then uses the implementation for the target platform.
+//
+// This design has been chosen because it is simple and fast. Alternatively,
+// the platform dependent classes could have been implemented using abstract
+// superclasses with virtual methods and having specializations for each
+// platform. This design was rejected because it was more complicated and
+// slower. It would require factory methods for selecting the right
+// implementation and the overhead of virtual methods for performance
+// sensitive like mutex locking/unlocking.
+
+#ifndef V8_BASE_PLATFORM_PLATFORM_H_
+#define V8_BASE_PLATFORM_PLATFORM_H_
+
+#include <stdarg.h>
+#include <string>
+#include <vector>
+
+#include "src/base/build_config.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+
+#ifdef __sun
+# ifndef signbit
+namespace std {
+int signbit(double x);
+}
+# endif
+#endif
+
+#if V8_OS_QNX
+#include "src/base/qnx-math.h"
+#endif
+
+// Microsoft Visual C++ specific stuff.
+#if V8_LIBC_MSVCRT
+
+#include "src/base/win32-headers.h"
+#include "src/base/win32-math.h"
+
+int strncasecmp(const char* s1, const char* s2, int n);
+
+// Visual C++ 2013 and higher implement this function.
+#if (_MSC_VER < 1800)
+inline int lrint(double flt) {
+  int intgr;
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+  __asm {
+    fld flt
+    fistp intgr
+  };
+#else
+  intgr = static_cast<int>(flt + 0.5);
+  if ((intgr & 1) != 0 && intgr - flt == 0.5) {
+    // If the number is halfway between two integers, round to the even one.
+    intgr--;
+  }
+#endif
+  return intgr;
+}
+#endif  // _MSC_VER < 1800
+
+#endif  // V8_LIBC_MSVCRT
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// Fast TLS support
+
+#ifndef V8_NO_FAST_TLS
+
+#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+  const intptr_t kTibInlineTlsOffset = 0xE10;
+  const intptr_t kTibExtraTlsOffset = 0xF94;
+  const intptr_t kMaxInlineSlots = 64;
+  const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
+  const intptr_t kPointerSize = sizeof(void*);
+  DCHECK(0 <= index && index < kMaxSlots);
+  if (index < kMaxInlineSlots) {
+    return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
+                                               kPointerSize * index));
+  }
+  intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
+  DCHECK(extra != 0);
+  return *reinterpret_cast<intptr_t*>(extra +
+                                      kPointerSize * (index - kMaxInlineSlots));
+}
+
+#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+extern intptr_t kMacTlsBaseOffset;
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+  intptr_t result;
+#if V8_HOST_ARCH_IA32
+  asm("movl %%gs:(%1,%2,4), %0;"
+      :"=r"(result)  // Output must be a writable register.
+      :"r"(kMacTlsBaseOffset), "r"(index));
+#else
+  asm("movq %%gs:(%1,%2,8), %0;"
+      :"=r"(result)
+      :"r"(kMacTlsBaseOffset), "r"(index));
+#endif
+  return result;
+}
+
+#endif
+
+#endif  // V8_NO_FAST_TLS
+
+
+class TimezoneCache;
+
+
+// ----------------------------------------------------------------------------
+// OS
+//
+// This class has static methods for the different platform specific
+// functions. Add methods here to cope with differences between the
+// supported platforms.
+
+class OS {
+ public:
+  // Initialize the OS class.
+  // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
+  // - hard_abort: If true, OS::Abort() will crash instead of aborting.
+  // - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
+  static void Initialize(int64_t random_seed,
+                         bool hard_abort,
+                         const char* const gc_fake_mmap);
+
+  // Returns the accumulated user time for thread. This routine
+  // can be used for profiling. The implementation should
+  // strive for high-precision timer resolution, preferable
+  // micro-second resolution.
+  static int GetUserTime(uint32_t* secs,  uint32_t* usecs);
+
+  // Returns current time as the number of milliseconds since
+  // 00:00:00 UTC, January 1, 1970.
+  static double TimeCurrentMillis();
+
+  static TimezoneCache* CreateTimezoneCache();
+  static void DisposeTimezoneCache(TimezoneCache* cache);
+  static void ClearTimezoneCache(TimezoneCache* cache);
+
+  // Returns a string identifying the current time zone. The
+  // timestamp is used for determining if DST is in effect.
+  static const char* LocalTimezone(double time, TimezoneCache* cache);
+
+  // Returns the local time offset in milliseconds east of UTC without
+  // taking daylight savings time into account.
+  static double LocalTimeOffset(TimezoneCache* cache);
+
+  // Returns the daylight savings offset for the given time.
+  static double DaylightSavingsOffset(double time, TimezoneCache* cache);
+
+  // Returns last OS error.
+  static int GetLastError();
+
+  static FILE* FOpen(const char* path, const char* mode);
+  static bool Remove(const char* path);
+
+  // Opens a temporary file, the file is auto removed on close.
+  static FILE* OpenTemporaryFile();
+
+  // Log file open mode is platform-dependent due to line ends issues.
+  static const char* const LogFileOpenMode;
+
+  // Print output to console. This is mostly used for debugging output.
+  // On platforms that has standard terminal output, the output
+  // should go to stdout.
+  static void Print(const char* format, ...);
+  static void VPrint(const char* format, va_list args);
+
+  // Print output to a file. This is mostly used for debugging output.
+  static void FPrint(FILE* out, const char* format, ...);
+  static void VFPrint(FILE* out, const char* format, va_list args);
+
+  // Print error output to console. This is mostly used for error message
+  // output. On platforms that has standard terminal output, the output
+  // should go to stderr.
+  static void PrintError(const char* format, ...);
+  static void VPrintError(const char* format, va_list args);
+
+  // Allocate/Free memory used by JS heap. Pages are readable/writable, but
+  // they are not guaranteed to be executable unless 'executable' is true.
+  // Returns the address of allocated memory, or NULL if failed.
+  static void* Allocate(const size_t requested,
+                        size_t* allocated,
+                        bool is_executable);
+  static void Free(void* address, const size_t size);
+
+  // This is the granularity at which the ProtectCode(...) call can set page
+  // permissions.
+  static intptr_t CommitPageSize();
+
+  // Mark code segments non-writable.
+  static void ProtectCode(void* address, const size_t size);
+
+  // Assign memory as a guard page so that access will cause an exception.
+  static void Guard(void* address, const size_t size);
+
+  // Generate a random address to be used for hinting mmap().
+  static void* GetRandomMmapAddr();
+
+  // Get the Alignment guaranteed by Allocate().
+  static size_t AllocateAlignment();
+
+  // Sleep for a number of milliseconds.
+  static void Sleep(const int milliseconds);
+
+  // Abort the current process.
+  static void Abort();
+
+  // Debug break.
+  static void DebugBreak();
+
+  // Walk the stack.
+  static const int kStackWalkError = -1;
+  static const int kStackWalkMaxNameLen = 256;
+  static const int kStackWalkMaxTextLen = 256;
+  struct StackFrame {
+    void* address;
+    char text[kStackWalkMaxTextLen];
+  };
+
+  class MemoryMappedFile {
+   public:
+    static MemoryMappedFile* open(const char* name);
+    static MemoryMappedFile* create(const char* name, int size, void* initial);
+    virtual ~MemoryMappedFile() { }
+    virtual void* memory() = 0;
+    virtual int size() = 0;
+  };
+
+  // Safe formatting print. Ensures that str is always null-terminated.
+  // Returns the number of chars written, or -1 if output was truncated.
+  static int SNPrintF(char* str, int length, const char* format, ...);
+  static int VSNPrintF(char* str,
+                       int length,
+                       const char* format,
+                       va_list args);
+
+  static char* StrChr(char* str, int c);
+  static void StrNCpy(char* dest, int length, const char* src, size_t n);
+
+  // Support for the profiler.  Can do nothing, in which case ticks
+  // occuring in shared libraries will not be properly accounted for.
+  struct SharedLibraryAddress {
+    SharedLibraryAddress(
+        const std::string& library_path, uintptr_t start, uintptr_t end)
+        : library_path(library_path), start(start), end(end) {}
+
+    std::string library_path;
+    uintptr_t start;
+    uintptr_t end;
+  };
+
+  static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
+
+  // Support for the profiler.  Notifies the external profiling
+  // process that a code moving garbage collection starts.  Can do
+  // nothing, in which case the code objects must not move (e.g., by
+  // using --never-compact) if accurate profiling is desired.
+  static void SignalCodeMovingGC();
+
+  // Returns the double constant NAN
+  static double nan_value();
+
+  // Support runtime detection of whether the hard float option of the
+  // EABI is used.
+  static bool ArmUsingHardFloat();
+
+  // Returns the activation frame alignment constraint or zero if
+  // the platform doesn't care. Guaranteed to be a power of two.
+  static int ActivationFrameAlignment();
+
+  static int GetCurrentProcessId();
+
+  static int GetCurrentThreadId();
+
+ private:
+  static const int msPerSecond = 1000;
+
+#if V8_OS_POSIX
+  static const char* GetGCFakeMMapFile();
+#endif
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
+};
+
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
+class VirtualMemory {
+ public:
+  // Empty VirtualMemory object, controlling no reserved memory.
+  VirtualMemory();
+
+  // Reserves virtual memory with size.
+  explicit VirtualMemory(size_t size);
+
+  // Reserves virtual memory containing an area of the given size that
+  // is aligned per alignment. This may not be at the position returned
+  // by address().
+  VirtualMemory(size_t size, size_t alignment);
+
+  // Releases the reserved memory, if any, controlled by this VirtualMemory
+  // object.
+  ~VirtualMemory();
+
+  // Returns whether the memory has been reserved.
+  bool IsReserved();
+
+  // Initialize or resets an embedded VirtualMemory object.
+  void Reset();
+
+  // Returns the start address of the reserved memory.
+  // If the memory was reserved with an alignment, this address is not
+  // necessarily aligned. The user might need to round it up to a multiple of
+  // the alignment to get the start of the aligned block.
+  void* address() {
+    DCHECK(IsReserved());
+    return address_;
+  }
+
+  // Returns the size of the reserved memory. The returned value is only
+  // meaningful when IsReserved() returns true.
+  // If the memory was reserved with an alignment, this size may be larger
+  // than the requested size.
+  size_t size() { return size_; }
+
+  // Commits real memory. Returns whether the operation succeeded.
+  bool Commit(void* address, size_t size, bool is_executable);
+
+  // Uncommit real memory.  Returns whether the operation succeeded.
+  bool Uncommit(void* address, size_t size);
+
+  // Creates a single guard page at the given address.
+  bool Guard(void* address);
+
+  void Release() {
+    DCHECK(IsReserved());
+    // Notice: Order is important here. The VirtualMemory object might live
+    // inside the allocated region.
+    void* address = address_;
+    size_t size = size_;
+    Reset();
+    bool result = ReleaseRegion(address, size);
+    USE(result);
+    DCHECK(result);
+  }
+
+  // Assign control of the reserved region to a different VirtualMemory object.
+  // The old object is no longer functional (IsReserved() returns false).
+  void TakeControl(VirtualMemory* from) {
+    DCHECK(!IsReserved());
+    address_ = from->address_;
+    size_ = from->size_;
+    from->Reset();
+  }
+
+  static void* ReserveRegion(size_t size);
+
+  static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+  static bool UncommitRegion(void* base, size_t size);
+
+  // Must be called with a base pointer that has been returned by ReserveRegion
+  // and the same size it was reserved with.
+  static bool ReleaseRegion(void* base, size_t size);
+
+  // Returns true if OS performs lazy commits, i.e. the memory allocation call
+  // defers actual physical memory allocation till the first memory access.
+  // Otherwise returns false.
+  static bool HasLazyCommits();
+
+ private:
+  void* address_;  // Start address of the virtual memory.
+  size_t size_;  // Size of the virtual memory.
+};
+
+
+// ----------------------------------------------------------------------------
+// Thread
+//
+// Thread objects are used for creating and running threads. When the start()
+// method is called the new thread starts running the run() method in the new
+// thread. The Thread object should not be deallocated before the thread has
+// terminated.
+
+class Thread {
+ public:
+  // Opaque data type for thread-local storage keys.
+  typedef int32_t LocalStorageKey;
+
+  class Options {
+   public:
+    Options() : name_("v8:<unknown>"), stack_size_(0) {}
+    explicit Options(const char* name, int stack_size = 0)
+        : name_(name), stack_size_(stack_size) {}
+
+    const char* name() const { return name_; }
+    int stack_size() const { return stack_size_; }
+
+   private:
+    const char* name_;
+    int stack_size_;
+  };
+
+  // Create new thread.
+  explicit Thread(const Options& options);
+  virtual ~Thread();
+
+  // Start new thread by calling the Run() method on the new thread.
+  void Start();
+
+  // Start new thread and wait until Run() method is called on the new thread.
+  void StartSynchronously() {
+    start_semaphore_ = new Semaphore(0);
+    Start();
+    start_semaphore_->Wait();
+    delete start_semaphore_;
+    start_semaphore_ = NULL;
+  }
+
+  // Wait until thread terminates.
+  void Join();
+
+  inline const char* name() const {
+    return name_;
+  }
+
+  // Abstract method for run handler.
+  virtual void Run() = 0;
+
+  // Thread-local storage.
+  static LocalStorageKey CreateThreadLocalKey();
+  static void DeleteThreadLocalKey(LocalStorageKey key);
+  static void* GetThreadLocal(LocalStorageKey key);
+  static int GetThreadLocalInt(LocalStorageKey key) {
+    return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
+  }
+  static void SetThreadLocal(LocalStorageKey key, void* value);
+  static void SetThreadLocalInt(LocalStorageKey key, int value) {
+    SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
+  }
+  static bool HasThreadLocal(LocalStorageKey key) {
+    return GetThreadLocal(key) != NULL;
+  }
+
+#ifdef V8_FAST_TLS_SUPPORTED
+  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+    void* result = reinterpret_cast<void*>(
+        InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
+    DCHECK(result == GetThreadLocal(key));
+    return result;
+  }
+#else
+  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+    return GetThreadLocal(key);
+  }
+#endif
+
+  // A hint to the scheduler to let another thread run.
+  static void YieldCPU();
+
+
+  // The thread name length is limited to 16 based on Linux's implementation of
+  // prctl().
+  static const int kMaxThreadNameLength = 16;
+
+  class PlatformData;
+  PlatformData* data() { return data_; }
+
+  void NotifyStartedAndRun() {
+    if (start_semaphore_) start_semaphore_->Signal();
+    Run();
+  }
+
+ private:
+  void set_name(const char* name);
+
+  PlatformData* data_;
+
+  char name_[kMaxThreadNameLength];
+  int stack_size_;
+  Semaphore* start_semaphore_;
+
+  DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_PLATFORM_H_
diff --git a/src/base/platform/semaphore-unittest.cc b/src/base/platform/semaphore-unittest.cc
new file mode 100644
index 0000000..c68435f
--- /dev/null
+++ b/src/base/platform/semaphore-unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+static const char kAlphabet[] = "XKOAD";
+static const size_t kAlphabetSize = sizeof(kAlphabet) - 1;
+static const size_t kBufferSize = 987;  // GCD(buffer size, alphabet size) = 1
+static const size_t kDataSize = kBufferSize * kAlphabetSize * 10;
+
+
+class ProducerThread FINAL : public Thread {
+ public:
+  ProducerThread(char* buffer, Semaphore* free_space, Semaphore* used_space)
+      : Thread(Options("ProducerThread")),
+        buffer_(buffer),
+        free_space_(free_space),
+        used_space_(used_space) {}
+  virtual ~ProducerThread() {}
+
+  virtual void Run() OVERRIDE {
+    for (size_t n = 0; n < kDataSize; ++n) {
+      free_space_->Wait();
+      buffer_[n % kBufferSize] = kAlphabet[n % kAlphabetSize];
+      used_space_->Signal();
+    }
+  }
+
+ private:
+  char* buffer_;
+  Semaphore* const free_space_;
+  Semaphore* const used_space_;
+};
+
+
+class ConsumerThread FINAL : public Thread {
+ public:
+  ConsumerThread(const char* buffer, Semaphore* free_space,
+                 Semaphore* used_space)
+      : Thread(Options("ConsumerThread")),
+        buffer_(buffer),
+        free_space_(free_space),
+        used_space_(used_space) {}
+  virtual ~ConsumerThread() {}
+
+  virtual void Run() OVERRIDE {
+    for (size_t n = 0; n < kDataSize; ++n) {
+      used_space_->Wait();
+      EXPECT_EQ(kAlphabet[n % kAlphabetSize], buffer_[n % kBufferSize]);
+      free_space_->Signal();
+    }
+  }
+
+ private:
+  const char* buffer_;
+  Semaphore* const free_space_;
+  Semaphore* const used_space_;
+};
+
+
+class WaitAndSignalThread FINAL : public Thread {
+ public:
+  explicit WaitAndSignalThread(Semaphore* semaphore)
+      : Thread(Options("WaitAndSignalThread")), semaphore_(semaphore) {}
+  virtual ~WaitAndSignalThread() {}
+
+  virtual void Run() OVERRIDE {
+    for (int n = 0; n < 100; ++n) {
+      semaphore_->Wait();
+      ASSERT_FALSE(semaphore_->WaitFor(TimeDelta::FromMicroseconds(1)));
+      semaphore_->Signal();
+    }
+  }
+
+ private:
+  Semaphore* const semaphore_;
+};
+
+}  // namespace
+
+
+TEST(Semaphore, ProducerConsumer) {
+  char buffer[kBufferSize];
+  std::memset(buffer, 0, sizeof(buffer));
+  Semaphore free_space(kBufferSize);
+  Semaphore used_space(0);
+  ProducerThread producer_thread(buffer, &free_space, &used_space);
+  ConsumerThread consumer_thread(buffer, &free_space, &used_space);
+  producer_thread.Start();
+  consumer_thread.Start();
+  producer_thread.Join();
+  consumer_thread.Join();
+}
+
+
+TEST(Semaphore, WaitAndSignal) {
+  Semaphore semaphore(0);
+  WaitAndSignalThread t1(&semaphore);
+  WaitAndSignalThread t2(&semaphore);
+
+  t1.Start();
+  t2.Start();
+
+  // Make something available.
+  semaphore.Signal();
+
+  t1.Join();
+  t2.Join();
+
+  semaphore.Wait();
+
+  EXPECT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1)));
+}
+
+
+TEST(Semaphore, WaitFor) {
+  Semaphore semaphore(0);
+
+  // Semaphore not signalled - timeout.
+  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+
+  // Semaphore signalled - no timeout.
+  semaphore.Signal();
+  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+  semaphore.Signal();
+  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+  semaphore.Signal();
+  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/semaphore.cc b/src/base/platform/semaphore.cc
new file mode 100644
index 0000000..0679c00
--- /dev/null
+++ b/src/base/platform/semaphore.cc
@@ -0,0 +1,204 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/semaphore.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#endif
+
+#include <errno.h>
+
+#include "src/base/logging.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_MACOSX
+
+Semaphore::Semaphore(int count) {
+  kern_return_t result = semaphore_create(
+      mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
+  DCHECK_EQ(KERN_SUCCESS, result);
+  USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+  kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
+  DCHECK_EQ(KERN_SUCCESS, result);
+  USE(result);
+}
+
+
+void Semaphore::Signal() {
+  kern_return_t result = semaphore_signal(native_handle_);
+  DCHECK_EQ(KERN_SUCCESS, result);
+  USE(result);
+}
+
+
+void Semaphore::Wait() {
+  while (true) {
+    kern_return_t result = semaphore_wait(native_handle_);
+    if (result == KERN_SUCCESS) return;  // Semaphore was signalled.
+    DCHECK_EQ(KERN_ABORTED, result);
+  }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+  TimeTicks now = TimeTicks::Now();
+  TimeTicks end = now + rel_time;
+  while (true) {
+    mach_timespec_t ts;
+    if (now >= end) {
+      // Return immediately if semaphore was not signalled.
+      ts.tv_sec = 0;
+      ts.tv_nsec = 0;
+    } else {
+      ts = (end - now).ToMachTimespec();
+    }
+    kern_return_t result = semaphore_timedwait(native_handle_, ts);
+    if (result == KERN_SUCCESS) return true;  // Semaphore was signalled.
+    if (result == KERN_OPERATION_TIMED_OUT) return false;  // Timeout.
+    DCHECK_EQ(KERN_ABORTED, result);
+    now = TimeTicks::Now();
+  }
+}
+
+#elif V8_OS_POSIX
+
+Semaphore::Semaphore(int count) {
+  DCHECK(count >= 0);
+  int result = sem_init(&native_handle_, 0, count);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+  int result = sem_destroy(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void Semaphore::Signal() {
+  int result = sem_post(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void Semaphore::Wait() {
+  while (true) {
+    int result = sem_wait(&native_handle_);
+    if (result == 0) return;  // Semaphore was signalled.
+    // Signal caused spurious wakeup.
+    DCHECK_EQ(-1, result);
+    DCHECK_EQ(EINTR, errno);
+  }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+#if V8_OS_NACL
+  // PNaCL doesn't support sem_timedwait, do ugly busy waiting.
+  ElapsedTimer timer;
+  timer.Start();
+  do {
+    int result = sem_trywait(&native_handle_);
+    if (result == 0) return true;
+    DCHECK(errno == EAGAIN || errno == EINTR);
+  } while (!timer.HasExpired(rel_time));
+  return false;
+#else
+  // Compute the time for end of timeout.
+  const Time time = Time::NowFromSystemTime() + rel_time;
+  const struct timespec ts = time.ToTimespec();
+
+  // Wait for semaphore signalled or timeout.
+  while (true) {
+    int result = sem_timedwait(&native_handle_, &ts);
+    if (result == 0) return true;  // Semaphore was signalled.
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+    if (result > 0) {
+      // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
+      errno = result;
+      result = -1;
+    }
+#endif
+    if (result == -1 && errno == ETIMEDOUT) {
+      // Timed out while waiting for semaphore.
+      return false;
+    }
+    // Signal caused spurious wakeup.
+    DCHECK_EQ(-1, result);
+    DCHECK_EQ(EINTR, errno);
+  }
+#endif
+}
+
+#elif V8_OS_WIN
+
+Semaphore::Semaphore(int count) {
+  DCHECK(count >= 0);
+  native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+  DCHECK(native_handle_ != NULL);
+}
+
+
+Semaphore::~Semaphore() {
+  BOOL result = CloseHandle(native_handle_);
+  DCHECK(result);
+  USE(result);
+}
+
+
+void Semaphore::Signal() {
+  LONG dummy;
+  BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
+  DCHECK(result);
+  USE(result);
+}
+
+
+void Semaphore::Wait() {
+  DWORD result = WaitForSingleObject(native_handle_, INFINITE);
+  DCHECK(result == WAIT_OBJECT_0);
+  USE(result);
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+  TimeTicks now = TimeTicks::Now();
+  TimeTicks end = now + rel_time;
+  while (true) {
+    int64_t msec = (end - now).InMilliseconds();
+    if (msec >= static_cast<int64_t>(INFINITE)) {
+      DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
+      if (result == WAIT_OBJECT_0) {
+        return true;
+      }
+      DCHECK(result == WAIT_TIMEOUT);
+      now = TimeTicks::Now();
+    } else {
+      DWORD result = WaitForSingleObject(
+          native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
+      if (result == WAIT_TIMEOUT) {
+        return false;
+      }
+      DCHECK(result == WAIT_OBJECT_0);
+      return true;
+    }
+  }
+}
+
+#endif  // V8_OS_MACOSX
+
+} }  // namespace v8::base
diff --git a/src/base/platform/semaphore.h b/src/base/platform/semaphore.h
new file mode 100644
index 0000000..cbf8df2
--- /dev/null
+++ b/src/base/platform/semaphore.h
@@ -0,0 +1,101 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_SEMAPHORE_H_
+#define V8_BASE_PLATFORM_SEMAPHORE_H_
+
+#include "src/base/lazy-instance.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/semaphore.h>  // NOLINT
+#elif V8_OS_POSIX
+#include <semaphore.h>  // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+// Forward declarations.
+class TimeDelta;
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero,  threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore FINAL {
+ public:
+  explicit Semaphore(int count);
+  ~Semaphore();
+
+  // Increments the semaphore counter.
+  void Signal();
+
+  // Suspends the calling thread until the semaphore counter is non zero
+  // and then decrements the semaphore counter.
+  void Wait();
+
+  // Suspends the calling thread until the counter is non zero or the timeout
+  // time has passed. If timeout happens the return value is false and the
+  // counter is unchanged. Otherwise the semaphore counter is decremented and
+  // true is returned.
+  bool WaitFor(const TimeDelta& rel_time) WARN_UNUSED_RESULT;
+
+#if V8_OS_MACOSX
+  typedef semaphore_t NativeHandle;
+#elif V8_OS_POSIX
+  typedef sem_t NativeHandle;
+#elif V8_OS_WIN
+  typedef HANDLE NativeHandle;
+#endif
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(Semaphore);
+};
+
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   // The following semaphore starts at 0.
+//   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+//   void my_function() {
+//     // Do something with my_semaphore.Pointer().
+//   }
+//
+
+template <int N>
+struct CreateSemaphoreTrait {
+  static Semaphore* Create() {
+    return new Semaphore(N);
+  }
+};
+
+template <int N>
+struct LazySemaphore {
+  typedef typename LazyDynamicInstance<Semaphore, CreateSemaphoreTrait<N>,
+                                       ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_SEMAPHORE_H_
diff --git a/src/base/platform/time-unittest.cc b/src/base/platform/time-unittest.cc
new file mode 100644
index 0000000..b3bfbab
--- /dev/null
+++ b/src/base/platform/time-unittest.cc
@@ -0,0 +1,186 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/time.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+#include "src/base/platform/elapsed-timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(TimeDelta, FromAndIn) {
+  EXPECT_EQ(TimeDelta::FromDays(2), TimeDelta::FromHours(48));
+  EXPECT_EQ(TimeDelta::FromHours(3), TimeDelta::FromMinutes(180));
+  EXPECT_EQ(TimeDelta::FromMinutes(2), TimeDelta::FromSeconds(120));
+  EXPECT_EQ(TimeDelta::FromSeconds(2), TimeDelta::FromMilliseconds(2000));
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2), TimeDelta::FromMicroseconds(2000));
+  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromDays(13).InDays());
+  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromHours(13).InHours());
+  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromMinutes(13).InMinutes());
+  EXPECT_EQ(static_cast<int64_t>(13), TimeDelta::FromSeconds(13).InSeconds());
+  EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromSeconds(13).InSecondsF());
+  EXPECT_EQ(static_cast<int64_t>(13),
+            TimeDelta::FromMilliseconds(13).InMilliseconds());
+  EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromMilliseconds(13).InMillisecondsF());
+  EXPECT_EQ(static_cast<int64_t>(13),
+            TimeDelta::FromMicroseconds(13).InMicroseconds());
+}
+
+
+#if V8_OS_MACOSX
+TEST(TimeDelta, MachTimespec) {
+  TimeDelta null = TimeDelta();
+  EXPECT_EQ(null, TimeDelta::FromMachTimespec(null.ToMachTimespec()));
+  TimeDelta delta1 = TimeDelta::FromMilliseconds(42);
+  EXPECT_EQ(delta1, TimeDelta::FromMachTimespec(delta1.ToMachTimespec()));
+  TimeDelta delta2 = TimeDelta::FromDays(42);
+  EXPECT_EQ(delta2, TimeDelta::FromMachTimespec(delta2.ToMachTimespec()));
+}
+#endif
+
+
+TEST(Time, JsTime) {
+  Time t = Time::FromJsTime(700000.3);
+  EXPECT_DOUBLE_EQ(700000.3, t.ToJsTime());
+}
+
+
+#if V8_OS_POSIX
+TEST(Time, Timespec) {
+  Time null;
+  EXPECT_TRUE(null.IsNull());
+  EXPECT_EQ(null, Time::FromTimespec(null.ToTimespec()));
+  Time now = Time::Now();
+  EXPECT_EQ(now, Time::FromTimespec(now.ToTimespec()));
+  Time now_sys = Time::NowFromSystemTime();
+  EXPECT_EQ(now_sys, Time::FromTimespec(now_sys.ToTimespec()));
+  Time unix_epoch = Time::UnixEpoch();
+  EXPECT_EQ(unix_epoch, Time::FromTimespec(unix_epoch.ToTimespec()));
+  Time max = Time::Max();
+  EXPECT_TRUE(max.IsMax());
+  EXPECT_EQ(max, Time::FromTimespec(max.ToTimespec()));
+}
+
+
+TEST(Time, Timeval) {
+  Time null;
+  EXPECT_TRUE(null.IsNull());
+  EXPECT_EQ(null, Time::FromTimeval(null.ToTimeval()));
+  Time now = Time::Now();
+  EXPECT_EQ(now, Time::FromTimeval(now.ToTimeval()));
+  Time now_sys = Time::NowFromSystemTime();
+  EXPECT_EQ(now_sys, Time::FromTimeval(now_sys.ToTimeval()));
+  Time unix_epoch = Time::UnixEpoch();
+  EXPECT_EQ(unix_epoch, Time::FromTimeval(unix_epoch.ToTimeval()));
+  Time max = Time::Max();
+  EXPECT_TRUE(max.IsMax());
+  EXPECT_EQ(max, Time::FromTimeval(max.ToTimeval()));
+}
+#endif
+
+
+#if V8_OS_WIN
+TEST(Time, Filetime) {
+  Time null;
+  EXPECT_TRUE(null.IsNull());
+  EXPECT_EQ(null, Time::FromFiletime(null.ToFiletime()));
+  Time now = Time::Now();
+  EXPECT_EQ(now, Time::FromFiletime(now.ToFiletime()));
+  Time now_sys = Time::NowFromSystemTime();
+  EXPECT_EQ(now_sys, Time::FromFiletime(now_sys.ToFiletime()));
+  Time unix_epoch = Time::UnixEpoch();
+  EXPECT_EQ(unix_epoch, Time::FromFiletime(unix_epoch.ToFiletime()));
+  Time max = Time::Max();
+  EXPECT_TRUE(max.IsMax());
+  EXPECT_EQ(max, Time::FromFiletime(max.ToFiletime()));
+}
+#endif
+
+
+namespace {
+
+template <typename T>
+static void ResolutionTest(T (*Now)(), TimeDelta target_granularity) {
+  // We're trying to measure that intervals increment in a VERY small amount
+  // of time -- according to the specified target granularity. Unfortunately,
+  // if we happen to have a context switch in the middle of our test, the
+  // context switch could easily exceed our limit. So, we iterate on this
+  // several times. As long as we're able to detect the fine-granularity
+  // timers at least once, then the test has succeeded.
+  static const TimeDelta kExpirationTimeout = TimeDelta::FromSeconds(1);
+  ElapsedTimer timer;
+  timer.Start();
+  TimeDelta delta;
+  do {
+    T start = Now();
+    T now = start;
+    // Loop until we can detect that the clock has changed. Non-HighRes timers
+    // will increment in chunks, i.e. 15ms. By spinning until we see a clock
+    // change, we detect the minimum time between measurements.
+    do {
+      now = Now();
+      delta = now - start;
+    } while (now <= start);
+    EXPECT_NE(static_cast<int64_t>(0), delta.InMicroseconds());
+  } while (delta > target_granularity && !timer.HasExpired(kExpirationTimeout));
+  EXPECT_LE(delta, target_granularity);
+}
+
+}  // namespace
+
+
+TEST(Time, NowResolution) {
+  // We assume that Time::Now() has at least 16ms resolution.
+  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+  ResolutionTest<Time>(&Time::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, NowResolution) {
+  // We assume that TimeTicks::Now() has at least 16ms resolution.
+  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+  ResolutionTest<TimeTicks>(&TimeTicks::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, HighResolutionNowResolution) {
+  if (!TimeTicks::IsHighResolutionClockWorking()) return;
+
+  // We assume that TimeTicks::HighResolutionNow() has sub-ms resolution.
+  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(1);
+  ResolutionTest<TimeTicks>(&TimeTicks::HighResolutionNow, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, IsMonotonic) {
+  TimeTicks previous_normal_ticks;
+  TimeTicks previous_highres_ticks;
+  ElapsedTimer timer;
+  timer.Start();
+  while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
+    TimeTicks normal_ticks = TimeTicks::Now();
+    TimeTicks highres_ticks = TimeTicks::HighResolutionNow();
+    EXPECT_GE(normal_ticks, previous_normal_ticks);
+    EXPECT_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
+    EXPECT_GE(highres_ticks, previous_highres_ticks);
+    EXPECT_GE((highres_ticks - previous_highres_ticks).InMicroseconds(), 0);
+    previous_normal_ticks = normal_ticks;
+    previous_highres_ticks = highres_ticks;
+  }
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc
new file mode 100644
index 0000000..d47ccaf
--- /dev/null
+++ b/src/base/platform/time.cc
@@ -0,0 +1,654 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/time.h"
+
+#if V8_OS_POSIX
+#include <fcntl.h>  // for O_RDONLY
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+
+#include <string.h>
+
+#if V8_OS_WIN
+#include "src/base/lazy-instance.h"
+#include "src/base/win32-headers.h"
+#endif
+#include "src/base/cpu.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+TimeDelta TimeDelta::FromDays(int days) {
+  return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+
+TimeDelta TimeDelta::FromHours(int hours) {
+  return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+
+TimeDelta TimeDelta::FromMinutes(int minutes) {
+  return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+
+TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
+  return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
+}
+
+
+TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
+  return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
+}
+
+
+TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
+  return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
+}
+
+
+int TimeDelta::InDays() const {
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+
+int TimeDelta::InHours() const {
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+
+int TimeDelta::InMinutes() const {
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+
+double TimeDelta::InSecondsF() const {
+  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+
+int64_t TimeDelta::InSeconds() const {
+  return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+
+double TimeDelta::InMillisecondsF() const {
+  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InMilliseconds() const {
+  return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InNanoseconds() const {
+  return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+
+#if V8_OS_MACOSX
+
+TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
+  DCHECK_GE(ts.tv_nsec, 0);
+  DCHECK_LT(ts.tv_nsec,
+            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
+  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct mach_timespec TimeDelta::ToMachTimespec() const {
+  struct mach_timespec ts;
+  DCHECK(delta_ >= 0);
+  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+      Time::kNanosecondsPerMicrosecond;
+  return ts;
+}
+
+#endif  // V8_OS_MACOSX
+
+
+#if V8_OS_POSIX
+
+TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
+  DCHECK_GE(ts.tv_nsec, 0);
+  DCHECK_LT(ts.tv_nsec,
+            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
+  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec TimeDelta::ToTimespec() const {
+  struct timespec ts;
+  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+      Time::kNanosecondsPerMicrosecond;
+  return ts;
+}
+
+#endif  // V8_OS_POSIX
+
+
+#if V8_OS_WIN
+
+// We implement time using the high-resolution timers so that we can get
+// timeouts which are smaller than 10-15ms. To avoid any drift, we
+// periodically resync the internal clock to the system clock.
+class Clock FINAL {
+ public:
+  Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
+
+  Time Now() {
+    // Time between resampling the un-granular clock for this API (1 minute).
+    const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
+
+    LockGuard<Mutex> lock_guard(&mutex_);
+
+    // Determine current time and ticks.
+    TimeTicks ticks = GetSystemTicks();
+    Time time = GetSystemTime();
+
+    // Check if we need to synchronize with the system clock due to a backwards
+    // time change or the amount of time elapsed.
+    TimeDelta elapsed = ticks - initial_ticks_;
+    if (time < initial_time_ || elapsed > kMaxElapsedTime) {
+      initial_ticks_ = ticks;
+      initial_time_ = time;
+      return time;
+    }
+
+    return initial_time_ + elapsed;
+  }
+
+  Time NowFromSystemTime() {
+    LockGuard<Mutex> lock_guard(&mutex_);
+    initial_ticks_ = GetSystemTicks();
+    initial_time_ = GetSystemTime();
+    return initial_time_;
+  }
+
+ private:
+  static TimeTicks GetSystemTicks() {
+    return TimeTicks::Now();
+  }
+
+  static Time GetSystemTime() {
+    FILETIME ft;
+    ::GetSystemTimeAsFileTime(&ft);
+    return Time::FromFiletime(ft);
+  }
+
+  TimeTicks initial_ticks_;
+  Time initial_time_;
+  Mutex mutex_;
+};
+
+
+static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
+                          ThreadSafeInitOnceTrait>::type clock =
+    LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+Time Time::Now() {
+  return clock.Pointer()->Now();
+}
+
+
+Time Time::NowFromSystemTime() {
+  return clock.Pointer()->NowFromSystemTime();
+}
+
+
+// Time between windows epoch and standard epoch.
+static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
+
+
+Time Time::FromFiletime(FILETIME ft) {
+  if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
+    return Time();
+  }
+  if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
+      ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
+    return Max();
+  }
+  int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
+                (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
+  return Time(us - kTimeToEpochInMicroseconds);
+}
+
+
+FILETIME Time::ToFiletime() const {
+  DCHECK(us_ >= 0);
+  FILETIME ft;
+  if (IsNull()) {
+    ft.dwLowDateTime = 0;
+    ft.dwHighDateTime = 0;
+    return ft;
+  }
+  if (IsMax()) {
+    ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
+    ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
+    return ft;
+  }
+  uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
+  ft.dwLowDateTime = static_cast<DWORD>(us);
+  ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
+  return ft;
+}
+
+#elif V8_OS_POSIX
+
+Time Time::Now() {
+  struct timeval tv;
+  int result = gettimeofday(&tv, NULL);
+  DCHECK_EQ(0, result);
+  USE(result);
+  return FromTimeval(tv);
+}
+
+
+Time Time::NowFromSystemTime() {
+  return Now();
+}
+
+
+Time Time::FromTimespec(struct timespec ts) {
+  DCHECK(ts.tv_nsec >= 0);
+  DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond));  // NOLINT
+  if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
+    return Time();
+  }
+  if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
+      ts.tv_sec == std::numeric_limits<time_t>::max()) {
+    return Max();
+  }
+  return Time(ts.tv_sec * kMicrosecondsPerSecond +
+              ts.tv_nsec / kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec Time::ToTimespec() const {
+  struct timespec ts;
+  if (IsNull()) {
+    ts.tv_sec = 0;
+    ts.tv_nsec = 0;
+    return ts;
+  }
+  if (IsMax()) {
+    ts.tv_sec = std::numeric_limits<time_t>::max();
+    ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
+    return ts;
+  }
+  ts.tv_sec = us_ / kMicrosecondsPerSecond;
+  ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
+  return ts;
+}
+
+
+Time Time::FromTimeval(struct timeval tv) {
+  DCHECK(tv.tv_usec >= 0);
+  DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
+  if (tv.tv_usec == 0 && tv.tv_sec == 0) {
+    return Time();
+  }
+  if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
+      tv.tv_sec == std::numeric_limits<time_t>::max()) {
+    return Max();
+  }
+  return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+
+struct timeval Time::ToTimeval() const {
+  struct timeval tv;
+  if (IsNull()) {
+    tv.tv_sec = 0;
+    tv.tv_usec = 0;
+    return tv;
+  }
+  if (IsMax()) {
+    tv.tv_sec = std::numeric_limits<time_t>::max();
+    tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
+    return tv;
+  }
+  tv.tv_sec = us_ / kMicrosecondsPerSecond;
+  tv.tv_usec = us_ % kMicrosecondsPerSecond;
+  return tv;
+}
+
+#endif  // V8_OS_WIN
+
+
+Time Time::FromJsTime(double ms_since_epoch) {
+  // The epoch is a valid time, so this constructor doesn't interpret
+  // 0 as the null time.
+  if (ms_since_epoch == std::numeric_limits<double>::max()) {
+    return Max();
+  }
+  return Time(
+      static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
+}
+
+
+double Time::ToJsTime() const {
+  if (IsNull()) {
+    // Preserve 0 so the invalid result doesn't depend on the platform.
+    return 0;
+  }
+  if (IsMax()) {
+    // Preserve max without offset to prevent overflow.
+    return std::numeric_limits<double>::max();
+  }
+  return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
+}
+
+
+#if V8_OS_WIN
+
+class TickClock {
+ public:
+  virtual ~TickClock() {}
+  virtual int64_t Now() = 0;
+  virtual bool IsHighResolution() = 0;
+};
+
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResolutionTickClock FINAL : public TickClock {
+ public:
+  explicit HighResolutionTickClock(int64_t ticks_per_second)
+      : ticks_per_second_(ticks_per_second) {
+    DCHECK_LT(0, ticks_per_second);
+  }
+  virtual ~HighResolutionTickClock() {}
+
+  virtual int64_t Now() OVERRIDE {
+    LARGE_INTEGER now;
+    BOOL result = QueryPerformanceCounter(&now);
+    DCHECK(result);
+    USE(result);
+
+    // Intentionally calculate microseconds in a round about manner to avoid
+    // overflow and precision issues. Think twice before simplifying!
+    int64_t whole_seconds = now.QuadPart / ticks_per_second_;
+    int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+    int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
+        ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
+
+    // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
+    // will never return 0.
+    return ticks + 1;
+  }
+
+  virtual bool IsHighResolution() OVERRIDE {
+    return true;
+  }
+
+ private:
+  int64_t ticks_per_second_;
+};
+
+
+class RolloverProtectedTickClock FINAL : public TickClock {
+ public:
+  // We initialize rollover_ms_ to 1 to ensure that we will never
+  // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
+  RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+  virtual ~RolloverProtectedTickClock() {}
+
+  virtual int64_t Now() OVERRIDE {
+    LockGuard<Mutex> lock_guard(&mutex_);
+    // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
+    // every ~49.7 days. We try to track rollover ourselves, which works if
+    // TimeTicks::Now() is called at least every 49 days.
+    // Note that we do not use GetTickCount() here, since timeGetTime() gives
+    // more predictable delta values, as described here:
+    // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+    // timeGetTime() provides 1ms granularity when combined with
+    // timeBeginPeriod(). If the host application for V8 wants fast timers, it
+    // can use timeBeginPeriod() to increase the resolution.
+    DWORD now = timeGetTime();
+    if (now < last_seen_now_) {
+      rollover_ms_ += V8_INT64_C(0x100000000);  // ~49.7 days.
+    }
+    last_seen_now_ = now;
+    return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+  }
+
+  virtual bool IsHighResolution() OVERRIDE {
+    return false;
+  }
+
+ private:
+  Mutex mutex_;
+  DWORD last_seen_now_;
+  int64_t rollover_ms_;
+};
+
+
+static LazyStaticInstance<RolloverProtectedTickClock,
+                          DefaultConstructTrait<RolloverProtectedTickClock>,
+                          ThreadSafeInitOnceTrait>::type tick_clock =
+    LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+struct CreateHighResTickClockTrait {
+  static TickClock* Create() {
+    // Check if the installed hardware supports a high-resolution performance
+    // counter, and if not fallback to the low-resolution tick clock.
+    LARGE_INTEGER ticks_per_second;
+    if (!QueryPerformanceFrequency(&ticks_per_second)) {
+      return tick_clock.Pointer();
+    }
+
+    // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
+    // is unreliable, fallback to the low-resolution tick clock.
+    CPU cpu;
+    if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+      return tick_clock.Pointer();
+    }
+
+    return new HighResolutionTickClock(ticks_per_second.QuadPart);
+  }
+};
+
+
+static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
+                           ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+    LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+TimeTicks TimeTicks::Now() {
+  // Make sure we never return 0 here.
+  TimeTicks ticks(tick_clock.Pointer()->Now());
+  DCHECK(!ticks.IsNull());
+  return ticks;
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+  // Make sure we never return 0 here.
+  TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
+  DCHECK(!ticks.IsNull());
+  return ticks;
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+  return high_res_tick_clock.Pointer()->IsHighResolution();
+}
+
+
+// static
+TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); }
+
+
+// static
+bool TimeTicks::KernelTimestampAvailable() { return false; }
+
+#else  // V8_OS_WIN
+
+TimeTicks TimeTicks::Now() {
+  return HighResolutionNow();
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+  int64_t ticks;
+#if V8_OS_MACOSX
+  static struct mach_timebase_info info;
+  if (info.denom == 0) {
+    kern_return_t result = mach_timebase_info(&info);
+    DCHECK_EQ(KERN_SUCCESS, result);
+    USE(result);
+  }
+  ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+           info.numer / info.denom);
+#elif V8_OS_SOLARIS
+  ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
+#elif V8_LIBRT_NOT_AVAILABLE
+  // TODO(bmeurer): This is a temporary hack to support cross-compiling
+  // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
+  // cleanup the tools/gyp/v8.gyp file.
+  struct timeval tv;
+  int result = gettimeofday(&tv, NULL);
+  DCHECK_EQ(0, result);
+  USE(result);
+  ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
+#elif V8_OS_POSIX
+  struct timespec ts;
+  int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+  DCHECK_EQ(0, result);
+  USE(result);
+  ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
+           ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+#endif  // V8_OS_MACOSX
+  // Make sure we never return 0 here.
+  return TimeTicks(ticks + 1);
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+  return true;
+}
+
+
+#if V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
+
+class KernelTimestampClock {
+ public:
+  KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) {
+    clock_fd_ = open(kTraceClockDevice, O_RDONLY);
+    if (clock_fd_ == -1) {
+      return;
+    }
+    clock_id_ = get_clockid(clock_fd_);
+  }
+
+  virtual ~KernelTimestampClock() {
+    if (clock_fd_ != -1) {
+      close(clock_fd_);
+    }
+  }
+
+  int64_t Now() {
+    if (clock_id_ == kClockInvalid) {
+      return 0;
+    }
+
+    struct timespec ts;
+
+    clock_gettime(clock_id_, &ts);
+    return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
+  }
+
+  bool Available() { return clock_id_ != kClockInvalid; }
+
+ private:
+  static const clockid_t kClockInvalid = -1;
+  static const char kTraceClockDevice[];
+  static const uint64_t kNsecPerSec = 1000000000;
+
+  int clock_fd_;
+  clockid_t clock_id_;
+
+  static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); }
+};
+
+
+// Timestamp module name
+const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock";
+
+#else
+
+class KernelTimestampClock {
+ public:
+  KernelTimestampClock() {}
+
+  int64_t Now() { return 0; }
+  bool Available() { return false; }
+};
+
+#endif  // V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
+
+static LazyStaticInstance<KernelTimestampClock,
+                          DefaultConstructTrait<KernelTimestampClock>,
+                          ThreadSafeInitOnceTrait>::type kernel_tick_clock =
+    LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+// static
+TimeTicks TimeTicks::KernelTimestampNow() {
+  return TimeTicks(kernel_tick_clock.Pointer()->Now());
+}
+
+
+// static
+bool TimeTicks::KernelTimestampAvailable() {
+  return kernel_tick_clock.Pointer()->Available();
+}
+
+#endif  // V8_OS_WIN
+
+} }  // namespace v8::base
diff --git a/src/base/platform/time.h b/src/base/platform/time.h
new file mode 100644
index 0000000..9dfa47d
--- /dev/null
+++ b/src/base/platform/time.h
@@ -0,0 +1,400 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_TIME_H_
+#define V8_BASE_PLATFORM_TIME_H_
+
+#include <time.h>
+#include <limits>
+
+#include "src/base/macros.h"
+
+// Forward declarations.
+extern "C" {
+struct _FILETIME;
+struct mach_timespec;
+struct timespec;
+struct timeval;
+}
+
+namespace v8 {
+namespace base {
+
+class Time;
+class TimeTicks;
+
+// -----------------------------------------------------------------------------
+// TimeDelta
+//
+// This class represents a duration of time, internally represented in
+// microseonds.
+
+class TimeDelta FINAL {
+ public:
+  TimeDelta() : delta_(0) {}
+
+  // Converts units of time to TimeDeltas.
+  static TimeDelta FromDays(int days);
+  static TimeDelta FromHours(int hours);
+  static TimeDelta FromMinutes(int minutes);
+  static TimeDelta FromSeconds(int64_t seconds);
+  static TimeDelta FromMilliseconds(int64_t milliseconds);
+  static TimeDelta FromMicroseconds(int64_t microseconds) {
+    return TimeDelta(microseconds);
+  }
+  static TimeDelta FromNanoseconds(int64_t nanoseconds);
+
+  // Returns the time delta in some unit. The F versions return a floating
+  // point value, the "regular" versions return a rounded-down value.
+  //
+  // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+  // to the next full millisecond.
+  int InDays() const;
+  int InHours() const;
+  int InMinutes() const;
+  double InSecondsF() const;
+  int64_t InSeconds() const;
+  double InMillisecondsF() const;
+  int64_t InMilliseconds() const;
+  int64_t InMillisecondsRoundedUp() const;
+  int64_t InMicroseconds() const { return delta_; }
+  int64_t InNanoseconds() const;
+
+  // Converts to/from Mach time specs.
+  static TimeDelta FromMachTimespec(struct mach_timespec ts);
+  struct mach_timespec ToMachTimespec() const;
+
+  // Converts to/from POSIX time specs.
+  static TimeDelta FromTimespec(struct timespec ts);
+  struct timespec ToTimespec() const;
+
+  TimeDelta& operator=(const TimeDelta& other) {
+    delta_ = other.delta_;
+    return *this;
+  }
+
+  // Computations with other deltas.
+  TimeDelta operator+(const TimeDelta& other) const {
+    return TimeDelta(delta_ + other.delta_);
+  }
+  TimeDelta operator-(const TimeDelta& other) const {
+    return TimeDelta(delta_ - other.delta_);
+  }
+
+  TimeDelta& operator+=(const TimeDelta& other) {
+    delta_ += other.delta_;
+    return *this;
+  }
+  TimeDelta& operator-=(const TimeDelta& other) {
+    delta_ -= other.delta_;
+    return *this;
+  }
+  TimeDelta operator-() const {
+    return TimeDelta(-delta_);
+  }
+
+  double TimesOf(const TimeDelta& other) const {
+    return static_cast<double>(delta_) / static_cast<double>(other.delta_);
+  }
+  double PercentOf(const TimeDelta& other) const {
+    return TimesOf(other) * 100.0;
+  }
+
+  // Computations with ints, note that we only allow multiplicative operations
+  // with ints, and additive operations with other deltas.
+  TimeDelta operator*(int64_t a) const {
+    return TimeDelta(delta_ * a);
+  }
+  TimeDelta operator/(int64_t a) const {
+    return TimeDelta(delta_ / a);
+  }
+  TimeDelta& operator*=(int64_t a) {
+    delta_ *= a;
+    return *this;
+  }
+  TimeDelta& operator/=(int64_t a) {
+    delta_ /= a;
+    return *this;
+  }
+  int64_t operator/(const TimeDelta& other) const {
+    return delta_ / other.delta_;
+  }
+
+  // Comparison operators.
+  bool operator==(const TimeDelta& other) const {
+    return delta_ == other.delta_;
+  }
+  bool operator!=(const TimeDelta& other) const {
+    return delta_ != other.delta_;
+  }
+  bool operator<(const TimeDelta& other) const {
+    return delta_ < other.delta_;
+  }
+  bool operator<=(const TimeDelta& other) const {
+    return delta_ <= other.delta_;
+  }
+  bool operator>(const TimeDelta& other) const {
+    return delta_ > other.delta_;
+  }
+  bool operator>=(const TimeDelta& other) const {
+    return delta_ >= other.delta_;
+  }
+
+ private:
+  // Constructs a delta given the duration in microseconds. This is private
+  // to avoid confusion by callers with an integer constructor. Use
+  // FromSeconds, FromMilliseconds, etc. instead.
+  explicit TimeDelta(int64_t delta) : delta_(delta) {}
+
+  // Delta in microseconds.
+  int64_t delta_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time FINAL {
+ public:
+  static const int64_t kMillisecondsPerSecond = 1000;
+  static const int64_t kMicrosecondsPerMillisecond = 1000;
+  static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+                                                kMillisecondsPerSecond;
+  static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+  static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+  static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+  static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+  static const int64_t kNanosecondsPerMicrosecond = 1000;
+  static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+                                               kMicrosecondsPerSecond;
+
+  // Contains the NULL time. Use Time::Now() to get the current time.
+  Time() : us_(0) {}
+
+  // Returns true if the time object has not been initialized.
+  bool IsNull() const { return us_ == 0; }
+
+  // Returns true if the time object is the maximum time.
+  bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+  // Returns the current time. Watch out, the system might adjust its clock
+  // in which case time will actually go backwards. We don't guarantee that
+  // times are increasing, or that two calls to Now() won't be the same.
+  static Time Now();
+
+  // Returns the current time. Same as Now() except that this function always
+  // uses system time so that there are no discrepancies between the returned
+  // time and system time even on virtual environments including our test bot.
+  // For timing sensitive unittests, this function should be used.
+  static Time NowFromSystemTime();
+
+  // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+  static Time UnixEpoch() { return Time(0); }
+
+  // Returns the maximum time, which should be greater than any reasonable time
+  // with which we might compare it.
+  static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
+
+  // Converts to/from internal values. The meaning of the "internal value" is
+  // completely up to the implementation, so it should be treated as opaque.
+  static Time FromInternalValue(int64_t value) {
+    return Time(value);
+  }
+  int64_t ToInternalValue() const {
+    return us_;
+  }
+
+  // Converts to/from POSIX time specs.
+  static Time FromTimespec(struct timespec ts);
+  struct timespec ToTimespec() const;
+
+  // Converts to/from POSIX time values.
+  static Time FromTimeval(struct timeval tv);
+  struct timeval ToTimeval() const;
+
+  // Converts to/from Windows file times.
+  static Time FromFiletime(struct _FILETIME ft);
+  struct _FILETIME ToFiletime() const;
+
+  // Converts to/from the Javascript convention for times, a number of
+  // milliseconds since the epoch:
+  static Time FromJsTime(double ms_since_epoch);
+  double ToJsTime() const;
+
+  Time& operator=(const Time& other) {
+    us_ = other.us_;
+    return *this;
+  }
+
+  // Compute the difference between two times.
+  TimeDelta operator-(const Time& other) const {
+    return TimeDelta::FromMicroseconds(us_ - other.us_);
+  }
+
+  // Modify by some time delta.
+  Time& operator+=(const TimeDelta& delta) {
+    us_ += delta.InMicroseconds();
+    return *this;
+  }
+  Time& operator-=(const TimeDelta& delta) {
+    us_ -= delta.InMicroseconds();
+    return *this;
+  }
+
+  // Return a new time modified by some delta.
+  Time operator+(const TimeDelta& delta) const {
+    return Time(us_ + delta.InMicroseconds());
+  }
+  Time operator-(const TimeDelta& delta) const {
+    return Time(us_ - delta.InMicroseconds());
+  }
+
+  // Comparison operators
+  bool operator==(const Time& other) const {
+    return us_ == other.us_;
+  }
+  bool operator!=(const Time& other) const {
+    return us_ != other.us_;
+  }
+  bool operator<(const Time& other) const {
+    return us_ < other.us_;
+  }
+  bool operator<=(const Time& other) const {
+    return us_ <= other.us_;
+  }
+  bool operator>(const Time& other) const {
+    return us_ > other.us_;
+  }
+  bool operator>=(const Time& other) const {
+    return us_ >= other.us_;
+  }
+
+ private:
+  explicit Time(int64_t us) : us_(us) {}
+
+  // Time in microseconds in UTC.
+  int64_t us_;
+};
+
+inline Time operator+(const TimeDelta& delta, const Time& time) {
+  return time + delta;
+}
+
+
+// -----------------------------------------------------------------------------
+// TimeTicks
+//
+// This class represents an abstract time that is most of the time incrementing
+// for use in measuring time durations. It is internally represented in
+// microseconds.  It can not be converted to a human-readable time, but is
+// guaranteed not to decrease (if the user changes the computer clock,
+// Time::Now() may actually decrease or jump).  But note that TimeTicks may
+// "stand still", for example if the computer suspended.
+
+class TimeTicks FINAL {
+ public:
+  TimeTicks() : ticks_(0) {}
+
+  // Platform-dependent tick count representing "right now."
+  // The resolution of this clock is ~1-15ms.  Resolution varies depending
+  // on hardware/operating system configuration.
+  // This method never returns a null TimeTicks.
+  static TimeTicks Now();
+
+  // Returns a platform-dependent high-resolution tick count. Implementation
+  // is hardware dependent and may or may not return sub-millisecond
+  // resolution.  THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+  // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+  // This method never returns a null TimeTicks.
+  static TimeTicks HighResolutionNow();
+
+  // Returns true if the high-resolution clock is working on this system.
+  static bool IsHighResolutionClockWorking();
+
+  // Returns Linux kernel timestamp for generating profiler events. This method
+  // returns null TimeTicks if the kernel cannot provide the timestamps (e.g.,
+  // on non-Linux OS or if the kernel module for timestamps is not loaded).
+
+  static TimeTicks KernelTimestampNow();
+  static bool KernelTimestampAvailable();
+
+  // Returns true if this object has not been initialized.
+  bool IsNull() const { return ticks_ == 0; }
+
+  // Converts to/from internal values. The meaning of the "internal value" is
+  // completely up to the implementation, so it should be treated as opaque.
+  static TimeTicks FromInternalValue(int64_t value) {
+    return TimeTicks(value);
+  }
+  int64_t ToInternalValue() const {
+    return ticks_;
+  }
+
+  TimeTicks& operator=(const TimeTicks other) {
+    ticks_ = other.ticks_;
+    return *this;
+  }
+
+  // Compute the difference between two times.
+  TimeDelta operator-(const TimeTicks other) const {
+    return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
+  }
+
+  // Modify by some time delta.
+  TimeTicks& operator+=(const TimeDelta& delta) {
+    ticks_ += delta.InMicroseconds();
+    return *this;
+  }
+  TimeTicks& operator-=(const TimeDelta& delta) {
+    ticks_ -= delta.InMicroseconds();
+    return *this;
+  }
+
+  // Return a new TimeTicks modified by some delta.
+  TimeTicks operator+(const TimeDelta& delta) const {
+    return TimeTicks(ticks_ + delta.InMicroseconds());
+  }
+  TimeTicks operator-(const TimeDelta& delta) const {
+    return TimeTicks(ticks_ - delta.InMicroseconds());
+  }
+
+  // Comparison operators
+  bool operator==(const TimeTicks& other) const {
+    return ticks_ == other.ticks_;
+  }
+  bool operator!=(const TimeTicks& other) const {
+    return ticks_ != other.ticks_;
+  }
+  bool operator<(const TimeTicks& other) const {
+    return ticks_ < other.ticks_;
+  }
+  bool operator<=(const TimeTicks& other) const {
+    return ticks_ <= other.ticks_;
+  }
+  bool operator>(const TimeTicks& other) const {
+    return ticks_ > other.ticks_;
+  }
+  bool operator>=(const TimeTicks& other) const {
+    return ticks_ >= other.ticks_;
+  }
+
+ private:
+  // Please use Now() to create a new object. This is for internal use
+  // and testing. Ticks is in microseconds.
+  explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+
+  // Tick count in microseconds.
+  int64_t ticks_;
+};
+
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+  return ticks + delta;
+}
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_TIME_H_
diff --git a/src/base/qnx-math.h b/src/base/qnx-math.h
new file mode 100644
index 0000000..6ff18f8
--- /dev/null
+++ b/src/base/qnx-math.h
@@ -0,0 +1,19 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_QNX_MATH_H_
+#define V8_QBASE_NX_MATH_H_
+
+#include <cmath>
+
+#undef fpclassify
+#undef isfinite
+#undef isinf
+#undef isnan
+#undef isnormal
+#undef signbit
+
+using std::lrint;
+
+#endif  // V8_BASE_QNX_MATH_H_
diff --git a/src/base/safe_conversions.h b/src/base/safe_conversions.h
new file mode 100644
index 0000000..0a1bd69
--- /dev/null
+++ b/src/base/safe_conversions.h
@@ -0,0 +1,67 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_H_
+#define V8_BASE_SAFE_CONVERSIONS_H_
+
+#include <limits>
+
+#include "src/base/safe_conversions_impl.h"
+
+namespace v8 {
+namespace base {
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+inline bool IsValueInRangeForNumericType(Src value) {
+  return internal::DstRangeRelationToSrcRange<Dst>(value) ==
+         internal::RANGE_VALID;
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst, typename Src>
+inline Dst checked_cast(Src value) {
+  CHECK(IsValueInRangeForNumericType<Dst>(value));
+  return static_cast<Dst>(value);
+}
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate rather than overflow or
+// underflow. NaN assignment to an integral will trigger a CHECK condition.
+template <typename Dst, typename Src>
+inline Dst saturated_cast(Src value) {
+  // Optimization for floating point values, which already saturate.
+  if (std::numeric_limits<Dst>::is_iec559)
+    return static_cast<Dst>(value);
+
+  switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
+    case internal::RANGE_VALID:
+      return static_cast<Dst>(value);
+
+    case internal::RANGE_UNDERFLOW:
+      return std::numeric_limits<Dst>::min();
+
+    case internal::RANGE_OVERFLOW:
+      return std::numeric_limits<Dst>::max();
+
+    // Should fail only on attempting to assign NaN to a saturated integer.
+    case internal::RANGE_INVALID:
+      CHECK(false);
+      return std::numeric_limits<Dst>::max();
+  }
+
+  UNREACHABLE();
+  return static_cast<Dst>(value);
+}
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_SAFE_CONVERSIONS_H_
diff --git a/src/base/safe_conversions_impl.h b/src/base/safe_conversions_impl.h
new file mode 100644
index 0000000..90c8e19
--- /dev/null
+++ b/src/base/safe_conversions_impl.h
@@ -0,0 +1,220 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute one by adding one to the number of non-sign bits. This allows
+// for accurate range comparisons between floating point and integer types.
+template <typename NumericType>
+struct MaxExponent {
+  static const int value = std::numeric_limits<NumericType>::is_iec559
+                               ? std::numeric_limits<NumericType>::max_exponent
+                               : (sizeof(NumericType) * 8 + 1 -
+                                  std::numeric_limits<NumericType>::is_signed);
+};
+
+enum IntegerRepresentation {
+  INTEGER_REPRESENTATION_UNSIGNED,
+  INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation {
+  NUMERIC_RANGE_NOT_CONTAINED,
+  NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <
+    typename Dst,
+    typename Src,
+    IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+                                            ? INTEGER_REPRESENTATION_SIGNED
+                                            : INTEGER_REPRESENTATION_UNSIGNED,
+    IntegerRepresentation SrcSign =
+        std::numeric_limits<Src>::is_signed
+            ? INTEGER_REPRESENTATION_SIGNED
+            : INTEGER_REPRESENTATION_UNSIGNED >
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
+  static const NumericRangeRepresentation value =
+      MaxExponent<Dst>::value >= MaxExponent<Src>::value
+          ? NUMERIC_RANGE_CONTAINED
+          : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+                                        Src,
+                                        INTEGER_REPRESENTATION_SIGNED,
+                                        INTEGER_REPRESENTATION_UNSIGNED> {
+  static const NumericRangeRepresentation value =
+      MaxExponent<Dst>::value > MaxExponent<Src>::value
+          ? NUMERIC_RANGE_CONTAINED
+          : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+                                        Src,
+                                        INTEGER_REPRESENTATION_UNSIGNED,
+                                        INTEGER_REPRESENTATION_SIGNED> {
+  static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+enum RangeConstraint {
+  RANGE_VALID = 0x0,  // Value can be represented by the destination type.
+  RANGE_UNDERFLOW = 0x1,  // Value would overflow.
+  RANGE_OVERFLOW = 0x2,  // Value would underflow.
+  RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW  // Invalid (i.e. NaN).
+};
+
+// Helper function for coercing an int back to a RangeContraint.
+inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
+  DCHECK(integer_range_constraint >= RANGE_VALID &&
+         integer_range_constraint <= RANGE_INVALID);
+  return static_cast<RangeConstraint>(integer_range_constraint);
+}
+
+// This function creates a RangeConstraint from an upper and lower bound
+// check by taking advantage of the fact that only NaN can be out of range in
+// both directions at once.
+inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
+                                   bool is_in_lower_bound) {
+  return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
+                            (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
+}
+
+template <
+    typename Dst,
+    typename Src,
+    IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+                                            ? INTEGER_REPRESENTATION_SIGNED
+                                            : INTEGER_REPRESENTATION_UNSIGNED,
+    IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
+                                            ? INTEGER_REPRESENTATION_SIGNED
+                                            : INTEGER_REPRESENTATION_UNSIGNED,
+    NumericRangeRepresentation DstRange =
+        StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Dst range is statically determined to contain Src: Nothing to check.
+template <typename Dst,
+          typename Src,
+          IntegerRepresentation DstSign,
+          IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      DstSign,
+                                      SrcSign,
+                                      NUMERIC_RANGE_CONTAINED> {
+  static RangeConstraint Check(Src value) { return RANGE_VALID; }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static RangeConstraint Check(Src value) {
+    return std::numeric_limits<Dst>::is_iec559
+               ? GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
+                                    value >= -std::numeric_limits<Dst>::max())
+               : GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
+                                    value >= std::numeric_limits<Dst>::min());
+  }
+};
+
+// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static RangeConstraint Check(Src value) {
+    return GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), true);
+  }
+};
+
+// Unsigned to signed: The upper boundary may be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static RangeConstraint Check(Src value) {
+    return sizeof(Dst) > sizeof(Src)
+               ? RANGE_VALID
+               : GetRangeConstraint(
+                     value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
+                     true);
+  }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static RangeConstraint Check(Src value) {
+    return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
+               ? GetRangeConstraint(true, value >= static_cast<Src>(0))
+               : GetRangeConstraint(
+                     value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
+                     value >= static_cast<Src>(0));
+  }
+};
+
+template <typename Dst, typename Src>
+inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
+  // Both source and destination must be numeric.
+  STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+  STATIC_ASSERT(std::numeric_limits<Dst>::is_specialized);
+  return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+}
+
+}  // namespace internal
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_SAFE_CONVERSIONS_IMPL_H_
diff --git a/src/base/safe_math.h b/src/base/safe_math.h
new file mode 100644
index 0000000..62a2f72
--- /dev/null
+++ b/src/base/safe_math.h
@@ -0,0 +1,276 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_MATH_H_
+#define V8_BASE_SAFE_MATH_H_
+
+#include "src/base/safe_math_impl.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// CheckedNumeric implements all the logic and operators for detecting integer
+// boundary conditions such as overflow, underflow, and invalid conversions.
+// The CheckedNumeric type implicitly converts from floating point and integer
+// data types, and contains overloads for basic arithmetic operations (i.e.: +,
+// -, *, /, %).
+//
+// The following methods convert from CheckedNumeric to standard numeric values:
+// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
+//             has not wrapped and is not the result of an invalid conversion).
+// ValueOrDie() - Returns the underlying value. If the state is not valid this
+//                call will crash on a CHECK.
+// ValueOrDefault() - Returns the current value, or the supplied default if the
+//                    state is not valid.
+// ValueFloating() - Returns the underlying floating point value (valid only
+//                   only for floating point CheckedNumeric types).
+//
+// Bitwise operations are explicitly not supported, because correct
+// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison
+// operations are explicitly not supported because they could result in a crash
+// on a CHECK condition. You should use patterns like the following for these
+// operations:
+// Bitwise operation:
+//     CheckedNumeric<int> checked_int = untrusted_input_value;
+//     int x = checked_int.ValueOrDefault(0) | kFlagValues;
+// Comparison:
+//   CheckedNumeric<size_t> checked_size;
+//   CheckedNumeric<int> checked_size = untrusted_input_value;
+//   checked_size = checked_size + HEADER LENGTH;
+//   if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size)
+//     Do stuff...
+template <typename T>
+class CheckedNumeric {
+ public:
+  typedef T type;
+
+  CheckedNumeric() {}
+
+  // Copy constructor.
+  template <typename Src>
+  CheckedNumeric(const CheckedNumeric<Src>& rhs)
+      : state_(rhs.ValueUnsafe(), rhs.validity()) {}
+
+  template <typename Src>
+  CheckedNumeric(Src value, RangeConstraint validity)
+      : state_(value, validity) {}
+
+  // This is not an explicit constructor because we implicitly upgrade regular
+  // numerics to CheckedNumerics to make them easier to use.
+  template <typename Src>
+  CheckedNumeric(Src value)  // NOLINT
+      : state_(value) {
+    // Argument must be numeric.
+    STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+  }
+
+  // IsValid() is the public API to test if a CheckedNumeric is currently valid.
+  bool IsValid() const { return validity() == RANGE_VALID; }
+
+  // ValueOrDie() The primary accessor for the underlying value. If the current
+  // state is not valid it will CHECK and crash.
+  T ValueOrDie() const {
+    CHECK(IsValid());
+    return state_.value();
+  }
+
+  // ValueOrDefault(T default_value) A convenience method that returns the
+  // current value if the state is valid, and the supplied default_value for
+  // any other state.
+  T ValueOrDefault(T default_value) const {
+    return IsValid() ? state_.value() : default_value;
+  }
+
+  // ValueFloating() - Since floating point values include their validity state,
+  // we provide an easy method for extracting them directly, without a risk of
+  // crashing on a CHECK.
+  T ValueFloating() const {
+    // Argument must be a floating-point value.
+    STATIC_ASSERT(std::numeric_limits<T>::is_iec559);
+    return CheckedNumeric<T>::cast(*this).ValueUnsafe();
+  }
+
+  // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for
+  // tests and to avoid a big matrix of friend operator overloads. But the
+  // values it returns are likely to change in the future.
+  // Returns: current validity state (i.e. valid, overflow, underflow, nan).
+  // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+  // saturation/wrapping so we can expose this state consistently and implement
+  // saturated arithmetic.
+  RangeConstraint validity() const { return state_.validity(); }
+
+  // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
+  // for tests and to avoid a big matrix of friend operator overloads. But the
+  // values it returns are likely to change in the future.
+  // Returns: the raw numeric value, regardless of the current state.
+  // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+  // saturation/wrapping so we can expose this state consistently and implement
+  // saturated arithmetic.
+  T ValueUnsafe() const { return state_.value(); }
+
+  // Prototypes for the supported arithmetic operator overloads.
+  template <typename Src> CheckedNumeric& operator+=(Src rhs);
+  template <typename Src> CheckedNumeric& operator-=(Src rhs);
+  template <typename Src> CheckedNumeric& operator*=(Src rhs);
+  template <typename Src> CheckedNumeric& operator/=(Src rhs);
+  template <typename Src> CheckedNumeric& operator%=(Src rhs);
+
+  CheckedNumeric operator-() const {
+    RangeConstraint validity;
+    T value = CheckedNeg(state_.value(), &validity);
+    // Negation is always valid for floating point.
+    if (std::numeric_limits<T>::is_iec559)
+      return CheckedNumeric<T>(value);
+
+    validity = GetRangeConstraint(state_.validity() | validity);
+    return CheckedNumeric<T>(value, validity);
+  }
+
+  CheckedNumeric Abs() const {
+    RangeConstraint validity;
+    T value = CheckedAbs(state_.value(), &validity);
+    // Absolute value is always valid for floating point.
+    if (std::numeric_limits<T>::is_iec559)
+      return CheckedNumeric<T>(value);
+
+    validity = GetRangeConstraint(state_.validity() | validity);
+    return CheckedNumeric<T>(value, validity);
+  }
+
+  CheckedNumeric& operator++() {
+    *this += 1;
+    return *this;
+  }
+
+  CheckedNumeric operator++(int) {
+    CheckedNumeric value = *this;
+    *this += 1;
+    return value;
+  }
+
+  CheckedNumeric& operator--() {
+    *this -= 1;
+    return *this;
+  }
+
+  CheckedNumeric operator--(int) {
+    CheckedNumeric value = *this;
+    *this -= 1;
+    return value;
+  }
+
+  // These static methods behave like a convenience cast operator targeting
+  // the desired CheckedNumeric type. As an optimization, a reference is
+  // returned when Src is the same type as T.
+  template <typename Src>
+  static CheckedNumeric<T> cast(
+      Src u,
+      typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+          0) {
+    return u;
+  }
+
+  template <typename Src>
+  static CheckedNumeric<T> cast(
+      const CheckedNumeric<Src>& u,
+      typename enable_if<!is_same<Src, T>::value, int>::type = 0) {
+    return u;
+  }
+
+  static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
+
+ private:
+  CheckedNumericState<T> state_;
+};
+
+// This is the boilerplate for the standard arithmetic operator overloads. A
+// macro isn't the prettiest solution, but it beats rewriting these five times.
+// Some details worth noting are:
+//  * We apply the standard arithmetic promotions.
+//  * We skip range checks for floating points.
+//  * We skip range checks for destination integers with sufficient range.
+// TODO(jschuh): extract these out into templates.
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP)              \
+  /* Binary arithmetic operator for CheckedNumerics of the same type. */      \
+  template <typename T>                                                       \
+  CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP(          \
+      const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) {           \
+    typedef typename ArithmeticPromotion<T>::type Promotion;                  \
+    /* Floating point always takes the fast path */                           \
+    if (std::numeric_limits<T>::is_iec559)                                    \
+      return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe());       \
+    if (IsIntegerArithmeticSafe<Promotion, T, T>::value)                      \
+      return CheckedNumeric<Promotion>(                                       \
+          lhs.ValueUnsafe() OP rhs.ValueUnsafe(),                             \
+          GetRangeConstraint(rhs.validity() | lhs.validity()));               \
+    RangeConstraint validity = RANGE_VALID;                                   \
+    T result = Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()),       \
+                             static_cast<Promotion>(rhs.ValueUnsafe()),       \
+                             &validity);                                      \
+    return CheckedNumeric<Promotion>(                                         \
+        result,                                                               \
+        GetRangeConstraint(validity | lhs.validity() | rhs.validity()));      \
+  }                                                                           \
+  /* Assignment arithmetic operator implementation from CheckedNumeric. */    \
+  template <typename T>                                                       \
+  template <typename Src>                                                     \
+  CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) {       \
+    *this = CheckedNumeric<T>::cast(*this) OP CheckedNumeric<Src>::cast(rhs); \
+    return *this;                                                             \
+  }                                                                           \
+  /* Binary arithmetic operator for CheckedNumeric of different type. */      \
+  template <typename T, typename Src>                                         \
+  CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
+      const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) {         \
+    typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
+    if (IsIntegerArithmeticSafe<Promotion, T, Src>::value)                    \
+      return CheckedNumeric<Promotion>(                                       \
+          lhs.ValueUnsafe() OP rhs.ValueUnsafe(),                             \
+          GetRangeConstraint(rhs.validity() | lhs.validity()));               \
+    return CheckedNumeric<Promotion>::cast(lhs)                               \
+        OP CheckedNumeric<Promotion>::cast(rhs);                              \
+  }                                                                           \
+  /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
+  template <typename T, typename Src>                                         \
+  CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
+      const CheckedNumeric<T>& lhs, Src rhs) {                                \
+    typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
+    if (IsIntegerArithmeticSafe<Promotion, T, Src>::value)                    \
+      return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs,              \
+                                       lhs.validity());                       \
+    return CheckedNumeric<Promotion>::cast(lhs)                               \
+        OP CheckedNumeric<Promotion>::cast(rhs);                              \
+  }                                                                           \
+  /* Binary arithmetic operator for right numeric and left CheckedNumeric. */ \
+  template <typename T, typename Src>                                         \
+  CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
+      Src lhs, const CheckedNumeric<T>& rhs) {                                \
+    typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
+    if (IsIntegerArithmeticSafe<Promotion, T, Src>::value)                    \
+      return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(),              \
+                                       rhs.validity());                       \
+    return CheckedNumeric<Promotion>::cast(lhs)                               \
+        OP CheckedNumeric<Promotion>::cast(rhs);                              \
+  }
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= )
+
+#undef BASE_NUMERIC_ARITHMETIC_OPERATORS
+
+}  // namespace internal
+
+using internal::CheckedNumeric;
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_SAFE_MATH_H_
diff --git a/src/base/safe_math_impl.h b/src/base/safe_math_impl.h
new file mode 100644
index 0000000..055e2a0
--- /dev/null
+++ b/src/base/safe_math_impl.h
@@ -0,0 +1,531 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_MATH_IMPL_H_
+#define V8_BASE_SAFE_MATH_IMPL_H_
+
+#include <stdint.h>
+
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+
+#include "src/base/macros.h"
+#include "src/base/safe_conversions.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+
+// From Chromium's base/template_util.h:
+
+template<class T, T v>
+struct integral_constant {
+  static const T value = v;
+  typedef T value_type;
+  typedef integral_constant<T, v> type;
+};
+
+template <class T, T v> const T integral_constant<T, v>::value;
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
+template <class T, class U> struct is_same : public false_type {};
+template <class T> struct is_same<T, T> : true_type {};
+
+template<bool B, class T = void>
+struct enable_if {};
+
+template<class T>
+struct enable_if<true, T> { typedef T type; };
+
+// </template_util.h>
+
+
+// Everything from here up to the floating point operations is portable C++,
+// but it may not be fast. This code could be split based on
+// platform/architecture and replaced with potentially faster implementations.
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForSizeAndSign;
+template <>
+struct IntegerForSizeAndSign<1, true> {
+  typedef int8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<1, false> {
+  typedef uint8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, true> {
+  typedef int16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, false> {
+  typedef uint16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, true> {
+  typedef int32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, false> {
+  typedef uint32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, true> {
+  typedef int64_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, false> {
+  typedef uint64_t type;
+};
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+
+template <typename Integer>
+struct UnsignedIntegerForSize {
+  typedef typename enable_if<
+      std::numeric_limits<Integer>::is_integer,
+      typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type;
+};
+
+template <typename Integer>
+struct SignedIntegerForSize {
+  typedef typename enable_if<
+      std::numeric_limits<Integer>::is_integer,
+      typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type;
+};
+
+template <typename Integer>
+struct TwiceWiderInteger {
+  typedef typename enable_if<
+      std::numeric_limits<Integer>::is_integer,
+      typename IntegerForSizeAndSign<
+          sizeof(Integer) * 2,
+          std::numeric_limits<Integer>::is_signed>::type>::type type;
+};
+
+template <typename Integer>
+struct PositionOfSignBit {
+  static const typename enable_if<std::numeric_limits<Integer>::is_integer,
+                                  size_t>::type value = 8 * sizeof(Integer) - 1;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename T>
+bool HasSignBit(T x) {
+  // Cast to unsigned since right shift on signed is undefined.
+  return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
+            PositionOfSignBit<T>::value);
+}
+
+// This wrapper undoes the standard integer promotions.
+template <typename T>
+T BinaryComplement(T x) {
+  return ~x;
+}
+
+// Here are the actual portable checked integer math implementations.
+// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
+// way to coalesce things into the CheckedNumericState specializations below.
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedAdd(T x, T y, RangeConstraint* validity) {
+  // Since the value of x+y is undefined if we have a signed type, we compute
+  // it using the unsigned type of the same size.
+  typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+  UnsignedDst ux = static_cast<UnsignedDst>(x);
+  UnsignedDst uy = static_cast<UnsignedDst>(y);
+  UnsignedDst uresult = ux + uy;
+  // Addition is valid if the sign of (x + y) is equal to either that of x or
+  // that of y.
+  if (std::numeric_limits<T>::is_signed) {
+    if (HasSignBit(BinaryComplement((uresult ^ ux) & (uresult ^ uy))))
+      *validity = RANGE_VALID;
+    else  // Direction of wrap is inverse of result sign.
+      *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+
+  } else {  // Unsigned is either valid or overflow.
+    *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
+  }
+  return static_cast<T>(uresult);
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedSub(T x, T y, RangeConstraint* validity) {
+  // Since the value of x+y is undefined if we have a signed type, we compute
+  // it using the unsigned type of the same size.
+  typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+  UnsignedDst ux = static_cast<UnsignedDst>(x);
+  UnsignedDst uy = static_cast<UnsignedDst>(y);
+  UnsignedDst uresult = ux - uy;
+  // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+  // the same sign.
+  if (std::numeric_limits<T>::is_signed) {
+    if (HasSignBit(BinaryComplement((uresult ^ ux) & (ux ^ uy))))
+      *validity = RANGE_VALID;
+    else  // Direction of wrap is inverse of result sign.
+      *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+
+  } else {  // Unsigned is either valid or underflow.
+    *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
+  }
+  return static_cast<T>(uresult);
+}
+
+// Integer multiplication is a bit complicated. In the fast case we just
+// we just promote to a twice wider type, and range check the result. In the
+// slow case we need to manually check that the result won't be truncated by
+// checking with division against the appropriate bound.
+template <typename T>
+typename enable_if<
+    std::numeric_limits<T>::is_integer && sizeof(T) * 2 <= sizeof(uintmax_t),
+    T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+  typedef typename TwiceWiderInteger<T>::type IntermediateType;
+  IntermediateType tmp =
+      static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
+  *validity = DstRangeRelationToSrcRange<T>(tmp);
+  return static_cast<T>(tmp);
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+                       std::numeric_limits<T>::is_signed &&
+                       (sizeof(T) * 2 > sizeof(uintmax_t)),
+                   T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+  // if either side is zero then the result will be zero.
+  if (!(x || y)) {
+    return RANGE_VALID;
+
+  } else if (x > 0) {
+    if (y > 0)
+      *validity =
+          x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW;
+    else
+      *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID
+                                                         : RANGE_UNDERFLOW;
+
+  } else {
+    if (y > 0)
+      *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID
+                                                         : RANGE_UNDERFLOW;
+    else
+      *validity =
+          y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
+  }
+
+  return x * y;
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+                       !std::numeric_limits<T>::is_signed &&
+                       (sizeof(T) * 2 > sizeof(uintmax_t)),
+                   T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+  *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
+                  ? RANGE_VALID
+                  : RANGE_OVERFLOW;
+  return x * y;
+}
+
+// Division just requires a check for an invalid negation on signed min/-1.
+template <typename T>
+T CheckedDiv(
+    T x,
+    T y,
+    RangeConstraint* validity,
+    typename enable_if<std::numeric_limits<T>::is_integer, int>::type = 0) {
+  if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() &&
+      y == static_cast<T>(-1)) {
+    *validity = RANGE_OVERFLOW;
+    return std::numeric_limits<T>::min();
+  }
+
+  *validity = RANGE_VALID;
+  return x / y;
+}
+
+template <typename T>
+typename enable_if<
+    std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+    T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+  *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
+  return x % y;
+}
+
+template <typename T>
+typename enable_if<
+    std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+    T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+  *validity = RANGE_VALID;
+  return x % y;
+}
+
+template <typename T>
+typename enable_if<
+    std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+    T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+  *validity =
+      value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+  // The negation of signed min is min, so catch that one.
+  return -value;
+}
+
+template <typename T>
+typename enable_if<
+    std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+    T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+  // The only legal unsigned negation is zero.
+  *validity = value ? RANGE_UNDERFLOW : RANGE_VALID;
+  return static_cast<T>(
+      -static_cast<typename SignedIntegerForSize<T>::type>(value));
+}
+
+template <typename T>
+typename enable_if<
+    std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+    T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+  *validity =
+      value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+  return std::abs(value);
+}
+
+template <typename T>
+typename enable_if<
+    std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+    T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+  // Absolute value of a positive is just its identiy.
+  *validity = RANGE_VALID;
+  return value;
+}
+
+// These are the floating point stubs that the compiler needs to see. Only the
+// negation operation is ever called.
+#define BASE_FLOAT_ARITHMETIC_STUBS(NAME)                        \
+  template <typename T>                                          \
+  typename enable_if<std::numeric_limits<T>::is_iec559, T>::type \
+  Checked##NAME(T, T, RangeConstraint*) {                        \
+    UNREACHABLE();                                               \
+    return 0;                                                    \
+  }
+
+BASE_FLOAT_ARITHMETIC_STUBS(Add)
+BASE_FLOAT_ARITHMETIC_STUBS(Sub)
+BASE_FLOAT_ARITHMETIC_STUBS(Mul)
+BASE_FLOAT_ARITHMETIC_STUBS(Div)
+BASE_FLOAT_ARITHMETIC_STUBS(Mod)
+
+#undef BASE_FLOAT_ARITHMETIC_STUBS
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
+    T value,
+    RangeConstraint*) {
+  return -value;
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
+    T value,
+    RangeConstraint*) {
+  return std::abs(value);
+}
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation {
+  NUMERIC_INTEGER,
+  NUMERIC_FLOATING,
+  NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation {
+  static const NumericRepresentation value =
+      std::numeric_limits<NumericType>::is_integer
+          ? NUMERIC_INTEGER
+          : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING
+                                                         : NUMERIC_UNKNOWN);
+};
+
+template <typename T, NumericRepresentation type =
+                          GetNumericRepresentation<T>::value>
+class CheckedNumericState {};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER> {
+ private:
+  T value_;
+  RangeConstraint validity_;
+
+ public:
+  template <typename Src, NumericRepresentation type>
+  friend class CheckedNumericState;
+
+  CheckedNumericState() : value_(0), validity_(RANGE_VALID) {}
+
+  template <typename Src>
+  CheckedNumericState(Src value, RangeConstraint validity)
+      : value_(value),
+        validity_(GetRangeConstraint(validity |
+                                     DstRangeRelationToSrcRange<T>(value))) {
+    // Argument must be numeric.
+    STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+  }
+
+  // Copy constructor.
+  template <typename Src>
+  CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : value_(static_cast<T>(rhs.value())),
+        validity_(GetRangeConstraint(
+            rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {}
+
+  template <typename Src>
+  explicit CheckedNumericState(
+      Src value,
+      typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+          0)
+      : value_(static_cast<T>(value)),
+        validity_(DstRangeRelationToSrcRange<T>(value)) {}
+
+  RangeConstraint validity() const { return validity_; }
+  T value() const { return value_; }
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING> {
+ private:
+  T value_;
+
+ public:
+  template <typename Src, NumericRepresentation type>
+  friend class CheckedNumericState;
+
+  CheckedNumericState() : value_(0.0) {}
+
+  template <typename Src>
+  CheckedNumericState(
+      Src value,
+      RangeConstraint validity,
+      typename enable_if<std::numeric_limits<Src>::is_integer, int>::type = 0) {
+    switch (DstRangeRelationToSrcRange<T>(value)) {
+      case RANGE_VALID:
+        value_ = static_cast<T>(value);
+        break;
+
+      case RANGE_UNDERFLOW:
+        value_ = -std::numeric_limits<T>::infinity();
+        break;
+
+      case RANGE_OVERFLOW:
+        value_ = std::numeric_limits<T>::infinity();
+        break;
+
+      case RANGE_INVALID:
+        value_ = std::numeric_limits<T>::quiet_NaN();
+        break;
+    }
+  }
+
+  template <typename Src>
+  explicit CheckedNumericState(
+      Src value,
+      typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+          0)
+      : value_(static_cast<T>(value)) {}
+
+  // Copy constructor.
+  template <typename Src>
+  CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : value_(static_cast<T>(rhs.value())) {}
+
+  RangeConstraint validity() const {
+    return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(),
+                              value_ >= -std::numeric_limits<T>::max());
+  }
+  T value() const { return value_; }
+};
+
+// For integers less than 128-bit and floats 32-bit or larger, we can distil
+// C/C++ arithmetic promotions down to two simple rules:
+// 1. The type with the larger maximum exponent always takes precedence.
+// 2. The resulting type must be promoted to at least an int.
+// The following template specializations implement that promotion logic.
+enum ArithmeticPromotionCategory {
+  LEFT_PROMOTION,
+  RIGHT_PROMOTION,
+  DEFAULT_PROMOTION
+};
+
+template <typename Lhs,
+          typename Rhs = Lhs,
+          ArithmeticPromotionCategory Promotion =
+              (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+                  ? (MaxExponent<Lhs>::value > MaxExponent<int>::value
+                         ? LEFT_PROMOTION
+                         : DEFAULT_PROMOTION)
+                  : (MaxExponent<Rhs>::value > MaxExponent<int>::value
+                         ? RIGHT_PROMOTION
+                         : DEFAULT_PROMOTION) >
+struct ArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+  typedef Lhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+  typedef Rhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, DEFAULT_PROMOTION> {
+  typedef int type;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs>
+struct IsIntegerArithmeticSafe {
+  static const bool value = !std::numeric_limits<T>::is_iec559 &&
+                            StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
+                                NUMERIC_RANGE_CONTAINED &&
+                            sizeof(T) >= (2 * sizeof(Lhs)) &&
+                            StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
+                                NUMERIC_RANGE_CONTAINED &&
+                            sizeof(T) >= (2 * sizeof(Rhs));
+};
+
+}  // namespace internal
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_SAFE_MATH_IMPL_H_
diff --git a/src/base/sys-info-unittest.cc b/src/base/sys-info-unittest.cc
new file mode 100644
index 0000000..a760f94
--- /dev/null
+++ b/src/base/sys-info-unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sys-info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_NACL
+#define DISABLE_ON_NACL(Name) DISABLED_##Name
+#else
+#define DISABLE_ON_NACL(Name) Name
+#endif
+
+namespace v8 {
+namespace base {
+
+TEST(SysInfoTest, NumberOfProcessors) {
+  EXPECT_LT(0, SysInfo::NumberOfProcessors());
+}
+
+
+TEST(SysInfoTest, DISABLE_ON_NACL(AmountOfPhysicalMemory)) {
+  EXPECT_LT(0, SysInfo::AmountOfPhysicalMemory());
+}
+
+
+TEST(SysInfoTest, AmountOfVirtualMemory) {
+  EXPECT_LE(0, SysInfo::AmountOfVirtualMemory());
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/sys-info.cc b/src/base/sys-info.cc
new file mode 100644
index 0000000..06c4f24
--- /dev/null
+++ b/src/base/sys-info.cc
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sys-info.h"
+
+#if V8_OS_POSIX
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if V8_OS_BSD
+#include <sys/sysctl.h>
+#endif
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+namespace v8 {
+namespace base {
+
+// static
+int SysInfo::NumberOfProcessors() {
+#if V8_OS_OPENBSD
+  int mib[2] = {CTL_HW, HW_NCPU};
+  int ncpu = 0;
+  size_t len = sizeof(ncpu);
+  if (sysctl(mib, arraysize(mib), &ncpu, &len, NULL, 0) != 0) {
+    UNREACHABLE();
+    return 1;
+  }
+  return ncpu;
+#elif V8_OS_POSIX
+  long result = sysconf(_SC_NPROCESSORS_ONLN);  // NOLINT(runtime/int)
+  if (result == -1) {
+    UNREACHABLE();
+    return 1;
+  }
+  return static_cast<int>(result);
+#elif V8_OS_WIN
+  SYSTEM_INFO system_info = {0};
+  ::GetNativeSystemInfo(&system_info);
+  return static_cast<int>(system_info.dwNumberOfProcessors);
+#endif
+}
+
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+#if V8_OS_MACOSX
+  int mib[2] = {CTL_HW, HW_MEMSIZE};
+  int64_t memsize = 0;
+  size_t len = sizeof(memsize);
+  if (sysctl(mib, arraysize(mib), &memsize, &len, NULL, 0) != 0) {
+    UNREACHABLE();
+    return 0;
+  }
+  return memsize;
+#elif V8_OS_FREEBSD
+  int pages, page_size;
+  size_t size = sizeof(pages);
+  sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+  sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+  if (pages == -1 || page_size == -1) {
+    UNREACHABLE();
+    return 0;
+  }
+  return static_cast<int64_t>(pages) * page_size;
+#elif V8_OS_CYGWIN || V8_OS_WIN
+  MEMORYSTATUSEX memory_info;
+  memory_info.dwLength = sizeof(memory_info);
+  if (!GlobalMemoryStatusEx(&memory_info)) {
+    UNREACHABLE();
+    return 0;
+  }
+  int64_t result = static_cast<int64_t>(memory_info.ullTotalPhys);
+  if (result < 0) result = std::numeric_limits<int64_t>::max();
+  return result;
+#elif V8_OS_QNX
+  struct stat stat_buf;
+  if (stat("/proc", &stat_buf) != 0) {
+    UNREACHABLE();
+    return 0;
+  }
+  return static_cast<int64_t>(stat_buf.st_size);
+#elif V8_OS_NACL
+  // No support for _SC_PHYS_PAGES, assume 2GB.
+  return static_cast<int64_t>(1) << 31;
+#elif V8_OS_POSIX
+  long pages = sysconf(_SC_PHYS_PAGES);    // NOLINT(runtime/int)
+  long page_size = sysconf(_SC_PAGESIZE);  // NOLINT(runtime/int)
+  if (pages == -1 || page_size == -1) {
+    UNREACHABLE();
+    return 0;
+  }
+  return static_cast<int64_t>(pages) * page_size;
+#endif
+}
+
+
+// static
+int64_t SysInfo::AmountOfVirtualMemory() {
+#if V8_OS_NACL || V8_OS_WIN
+  return 0;
+#elif V8_OS_POSIX
+  struct rlimit rlim;
+  int result = getrlimit(RLIMIT_DATA, &rlim);
+  if (result != 0) {
+    UNREACHABLE();
+    return 0;
+  }
+  return (rlim.rlim_cur == RLIM_INFINITY) ? 0 : rlim.rlim_cur;
+#endif
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/sys-info.h b/src/base/sys-info.h
new file mode 100644
index 0000000..d1658fc
--- /dev/null
+++ b/src/base/sys-info.h
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_SYS_INFO_H_
+#define V8_BASE_SYS_INFO_H_
+
+#include "include/v8stdint.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+class SysInfo FINAL {
+ public:
+  // Returns the number of logical processors/core on the current machine.
+  static int NumberOfProcessors();
+
+  // Returns the number of bytes of physical memory on the current machine.
+  static int64_t AmountOfPhysicalMemory();
+
+  // Returns the number of bytes of virtual memory of this process. A return
+  // value of zero means that there is no limit on the available virtual memory.
+  static int64_t AmountOfVirtualMemory();
+};
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_SYS_INFO_H_
diff --git a/src/base/utils/random-number-generator-unittest.cc b/src/base/utils/random-number-generator-unittest.cc
new file mode 100644
index 0000000..7c533db
--- /dev/null
+++ b/src/base/utils/random-number-generator-unittest.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <climits>
+
+#include "src/base/utils/random-number-generator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+class RandomNumberGeneratorTest : public ::testing::TestWithParam<int> {};
+
+
+static const int kMaxRuns = 12345;
+
+
+TEST_P(RandomNumberGeneratorTest, NextIntWithMaxValue) {
+  RandomNumberGenerator rng(GetParam());
+  for (int max = 1; max <= kMaxRuns; ++max) {
+    int n = rng.NextInt(max);
+    EXPECT_LE(0, n);
+    EXPECT_LT(n, max);
+  }
+}
+
+
+TEST_P(RandomNumberGeneratorTest, NextBooleanReturnsFalseOrTrue) {
+  RandomNumberGenerator rng(GetParam());
+  for (int k = 0; k < kMaxRuns; ++k) {
+    bool b = rng.NextBool();
+    EXPECT_TRUE(b == false || b == true);
+  }
+}
+
+
+TEST_P(RandomNumberGeneratorTest, NextDoubleReturnsValueBetween0And1) {
+  RandomNumberGenerator rng(GetParam());
+  for (int k = 0; k < kMaxRuns; ++k) {
+    double d = rng.NextDouble();
+    EXPECT_LE(0.0, d);
+    EXPECT_LT(d, 1.0);
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(RandomSeeds, RandomNumberGeneratorTest,
+                        ::testing::Values(INT_MIN, -1, 0, 1, 42, 100,
+                                          1234567890, 987654321, INT_MAX));
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/utils/random-number-generator.cc b/src/base/utils/random-number-generator.cc
new file mode 100644
index 0000000..9454936
--- /dev/null
+++ b/src/base/utils/random-number-generator.cc
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <new>
+
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+static RandomNumberGenerator::EntropySource entropy_source = NULL;
+
+
+// static
+void RandomNumberGenerator::SetEntropySource(EntropySource source) {
+  LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+  entropy_source = source;
+}
+
+
+RandomNumberGenerator::RandomNumberGenerator() {
+  // Check if embedder supplied an entropy source.
+  { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+    if (entropy_source != NULL) {
+      int64_t seed;
+      if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
+                         sizeof(seed))) {
+        SetSeed(seed);
+        return;
+      }
+    }
+  }
+
+#if V8_OS_CYGWIN || V8_OS_WIN
+  // Use rand_s() to gather entropy on Windows. See:
+  // https://code.google.com/p/v8/issues/detail?id=2905
+  unsigned first_half, second_half;
+  errno_t result = rand_s(&first_half);
+  DCHECK_EQ(0, result);
+  result = rand_s(&second_half);
+  DCHECK_EQ(0, result);
+  SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#else
+  // Gather entropy from /dev/urandom if available.
+  FILE* fp = fopen("/dev/urandom", "rb");
+  if (fp != NULL) {
+    int64_t seed;
+    size_t n = fread(&seed, sizeof(seed), 1, fp);
+    fclose(fp);
+    if (n == 1) {
+      SetSeed(seed);
+      return;
+    }
+  }
+
+  // We cannot assume that random() or rand() were seeded
+  // properly, so instead of relying on random() or rand(),
+  // we just seed our PRNG using timing data as fallback.
+  // This is weak entropy, but it's sufficient, because
+  // it is the responsibility of the embedder to install
+  // an entropy source using v8::V8::SetEntropySource(),
+  // which provides reasonable entropy, see:
+  // https://code.google.com/p/v8/issues/detail?id=2905
+  int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
+  seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
+  seed ^= TimeTicks::Now().ToInternalValue() << 8;
+  SetSeed(seed);
+#endif  // V8_OS_CYGWIN || V8_OS_WIN
+}
+
+
+int RandomNumberGenerator::NextInt(int max) {
+  DCHECK_LE(0, max);
+
+  // Fast path if max is a power of 2.
+  if (IS_POWER_OF_TWO(max)) {
+    return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
+  }
+
+  while (true) {
+    int rnd = Next(31);
+    int val = rnd % max;
+    if (rnd - val + (max - 1) >= 0) {
+      return val;
+    }
+  }
+}
+
+
+double RandomNumberGenerator::NextDouble() {
+  return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
+      static_cast<double>(static_cast<int64_t>(1) << 53);
+}
+
+
+void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
+  for (size_t n = 0; n < buflen; ++n) {
+    static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
+  }
+}
+
+
+int RandomNumberGenerator::Next(int bits) {
+  DCHECK_LT(0, bits);
+  DCHECK_GE(32, bits);
+  // Do unsigned multiplication, which has the intended modulo semantics, while
+  // signed multiplication would expose undefined behavior.
+  uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
+  // Assigning a uint64_t to an int64_t is implementation defined, but this
+  // should be OK. Use a static_cast to explicitly state that we know what we're
+  // doing. (Famous last words...)
+  int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
+  seed_ = seed;
+  return static_cast<int>(seed >> (48 - bits));
+}
+
+
+void RandomNumberGenerator::SetSeed(int64_t seed) {
+  initial_seed_ = seed;
+  seed_ = (seed ^ kMultiplier) & kMask;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/utils/random-number-generator.h b/src/base/utils/random-number-generator.h
new file mode 100644
index 0000000..479423d
--- /dev/null
+++ b/src/base/utils/random-number-generator.h
@@ -0,0 +1,92 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// -----------------------------------------------------------------------------
+// RandomNumberGenerator
+//
+// This class is used to generate a stream of pseudorandom numbers. The class
+// uses a 48-bit seed, which is modified using a linear congruential formula.
+// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+// If two instances of RandomNumberGenerator are created with the same seed, and
+// the same sequence of method calls is made for each, they will generate and
+// return identical sequences of numbers.
+// This class uses (probably) weak entropy by default, but it's sufficient,
+// because it is the responsibility of the embedder to install an entropy source
+// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
+// https://code.google.com/p/v8/issues/detail?id=2905
+// This class is neither reentrant nor threadsafe.
+
+class RandomNumberGenerator FINAL {
+ public:
+  // EntropySource is used as a callback function when V8 needs a source of
+  // entropy.
+  typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
+  static void SetEntropySource(EntropySource entropy_source);
+
+  RandomNumberGenerator();
+  explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
+
+  // Returns the next pseudorandom, uniformly distributed int value from this
+  // random number generator's sequence. The general contract of |NextInt()| is
+  // that one int value is pseudorandomly generated and returned.
+  // All 2^32 possible integer values are produced with (approximately) equal
+  // probability.
+  V8_INLINE int NextInt() WARN_UNUSED_RESULT {
+    return Next(32);
+  }
+
+  // Returns a pseudorandom, uniformly distributed int value between 0
+  // (inclusive) and the specified max value (exclusive), drawn from this random
+  // number generator's sequence. The general contract of |NextInt(int)| is that
+  // one int value in the specified range is pseudorandomly generated and
+  // returned. All max possible int values are produced with (approximately)
+  // equal probability.
+  int NextInt(int max) WARN_UNUSED_RESULT;
+
+  // Returns the next pseudorandom, uniformly distributed boolean value from
+  // this random number generator's sequence. The general contract of
+  // |NextBoolean()| is that one boolean value is pseudorandomly generated and
+  // returned. The values true and false are produced with (approximately) equal
+  // probability.
+  V8_INLINE bool NextBool() WARN_UNUSED_RESULT {
+    return Next(1) != 0;
+  }
+
+  // Returns the next pseudorandom, uniformly distributed double value between
+  // 0.0 and 1.0 from this random number generator's sequence.
+  // The general contract of |NextDouble()| is that one double value, chosen
+  // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
+  // (exclusive), is pseudorandomly generated and returned.
+  double NextDouble() WARN_UNUSED_RESULT;
+
+  // Fills the elements of a specified array of bytes with random numbers.
+  void NextBytes(void* buffer, size_t buflen);
+
+  // Override the current ssed.
+  void SetSeed(int64_t seed);
+
+  int64_t initial_seed() const { return initial_seed_; }
+
+ private:
+  static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
+  static const int64_t kAddend = 0xb;
+  static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
+
+  int Next(int bits) WARN_UNUSED_RESULT;
+
+  int64_t initial_seed_;
+  int64_t seed_;
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/src/base/win32-headers.h b/src/base/win32-headers.h
new file mode 100644
index 0000000..2d94abd
--- /dev/null
+++ b/src/base/win32-headers.h
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_WIN32_HEADERS_H_
+#define V8_BASE_WIN32_HEADERS_H_
+
+#ifndef WIN32_LEAN_AND_MEAN
+// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
+#define WIN32_LEAN_AND_MEAN
+#endif
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#ifndef NOKERNEL
+#define NOKERNEL
+#endif
+#ifndef NOUSER
+#define NOUSER
+#endif
+#ifndef NOSERVICE
+#define NOSERVICE
+#endif
+#ifndef NOSOUND
+#define NOSOUND
+#endif
+#ifndef NOMCX
+#define NOMCX
+#endif
+// Require Windows XP or higher (this is required for the RtlCaptureContext
+// function to be present).
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif
+
+#include <windows.h>
+
+#include <mmsystem.h>  // For timeGetTime().
+#include <signal.h>  // For raise().
+#include <time.h>  // For LocalOffset() implementation.
+#ifdef __MINGW32__
+// Require Windows XP or higher when compiling with MinGW. This is for MinGW
+// header files to expose getaddrinfo.
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif  // __MINGW32__
+#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#include <dbghelp.h>  // For SymLoadModule64 and al.
+#include <errno.h>  // For STRUNCATE
+#endif  // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#include <limits.h>  // For INT_MAX and al.
+#include <tlhelp32.h>  // For Module32First and al.
+
+// These additional WIN32 includes have to be right here as the #undef's below
+// makes it impossible to have them elsewhere.
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#ifndef __MINGW32__
+#include <wspiapi.h>
+#endif  // __MINGW32__
+#include <process.h>  // For _beginthreadex().
+#include <stdlib.h>
+
+#undef VOID
+#undef DELETE
+#undef IN
+#undef THIS
+#undef CONST
+#undef NAN
+#undef UNKNOWN
+#undef NONE
+#undef ANY
+#undef IGNORE
+#undef STRICT
+#undef GetObject
+#undef CreateSemaphore
+#undef Yield
+#undef RotateRight32
+#undef RotateRight64
+
+#endif  // V8_BASE_WIN32_HEADERS_H_
diff --git a/src/base/win32-math.cc b/src/base/win32-math.cc
new file mode 100644
index 0000000..d6fc78b
--- /dev/null
+++ b/src/base/win32-math.cc
@@ -0,0 +1,82 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
+
+#include "src/base/win32-headers.h"
+#include <float.h>         // Required for DBL_MAX and on Win32 for finite()
+#include <limits.h>        // Required for INT_MAX etc.
+#include <cmath>
+#include "src/base/win32-math.h"
+
+#include "src/base/logging.h"
+
+
+namespace std {
+
+// Test for a NaN (not a number) value - usually defined in math.h
+int isnan(double x) {
+  return _isnan(x);
+}
+
+
+// Test for infinity - usually defined in math.h
+int isinf(double x) {
+  return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
+}
+
+
+// Test for finite value - usually defined in math.h
+int isfinite(double x) {
+  return _finite(x);
+}
+
+
+// Test if x is less than y and both nominal - usually defined in math.h
+int isless(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x < y;
+}
+
+
+// Test if x is greater than y and both nominal - usually defined in math.h
+int isgreater(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x > y;
+}
+
+
+// Classify floating point number - usually defined in math.h
+int fpclassify(double x) {
+  // Use the MS-specific _fpclass() for classification.
+  int flags = _fpclass(x);
+
+  // Determine class. We cannot use a switch statement because
+  // the _FPCLASS_ constants are defined as flags.
+  if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
+  if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
+  if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
+  if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
+
+  // All cases should be covered by the code above.
+  DCHECK(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
+  return FP_NAN;
+}
+
+
+// Test sign - usually defined in math.h
+int signbit(double x) {
+  // We need to take care of the special case of both positive
+  // and negative versions of zero.
+  if (x == 0)
+    return _fpclass(x) & _FPCLASS_NZ;
+  else
+    return x < 0;
+}
+
+}  // namespace std
+
+#endif  // _MSC_VER
diff --git a/src/base/win32-math.h b/src/base/win32-math.h
new file mode 100644
index 0000000..e1c0350
--- /dev/null
+++ b/src/base/win32-math.h
@@ -0,0 +1,42 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+
+#ifndef V8_BASE_WIN32_MATH_H_
+#define V8_BASE_WIN32_MATH_H_
+
+#ifndef _MSC_VER
+#error Wrong environment, expected MSVC.
+#endif  // _MSC_VER
+
+// MSVC 2013+ provides implementations of all standard math functions.
+#if (_MSC_VER < 1800)
+enum {
+  FP_NAN,
+  FP_INFINITE,
+  FP_ZERO,
+  FP_SUBNORMAL,
+  FP_NORMAL
+};
+
+
+namespace std {
+
+int isfinite(double x);
+int isinf(double x);
+int isnan(double x);
+int isless(double x, double y);
+int isgreater(double x, double y);
+int fpclassify(double x);
+int signbit(double x);
+
+}  // namespace std
+
+#endif  // _MSC_VER < 1800
+
+#endif  // V8_BASE_WIN32_MATH_H_