Version 3.16.2

Added Makefile options to build for the Raspberry Pi (armv7=0, arm_fpu=vfp2).

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@13309 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/AUTHORS b/AUTHORS
index c279e7c..d25fc5a 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -34,6 +34,7 @@
 John Jozwiak <jjozwiak@codeaurora.org>
 Jonathan Liu <net147@gmail.com>
 Kun Zhang <zhangk@codeaurora.org>
+Luis Reis <luis.m.reis@gmail.com>
 Martyn Capewell <martyn.capewell@arm.com>
 Mathias Bynens <mathias@qiwi.be>
 Matt Hanselman <mjhanselman@gmail.com>
diff --git a/ChangeLog b/ChangeLog
index 91724a6..000b04e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2013-01-04: Version 3.16.2
+
+        Added Makefile options to build for the Raspberry Pi (armv7=0,
+        arm_fpu=vfp2).
+
+        Performance and stability improvements on all platforms.
+
+
 2012-12-27: Version 3.16.1
 
         Fixed x64 MathMinMax for negative untagged int32 arguments.
diff --git a/Makefile b/Makefile
index 63cfbf4..db32a95 100644
--- a/Makefile
+++ b/Makefile
@@ -91,13 +91,13 @@
 ifeq ($(vfp2), off)
   GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
 else
-  GYPFLAGS += -Dv8_can_use_vfp2_instructions=true
+  GYPFLAGS += -Dv8_can_use_vfp2_instructions=true -Darm_fpu=vfpv2
 endif
 # vfp3=off
 ifeq ($(vfp3), off)
   GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
 else
-  GYPFLAGS += -Dv8_can_use_vfp3_instructions=true
+  GYPFLAGS += -Dv8_can_use_vfp3_instructions=true -Darm_fpu=vfpv3
 endif
 # debuggersupport=off
 ifeq ($(debuggersupport), off)
@@ -127,6 +127,10 @@
 ifeq ($(hardfp), on)
   GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
 endif
+# armv7=false
+ifeq ($(armv7), false)
+  GYPFLAGS += -Darmv7=0
+endif
 
 # ----------------- available targets: --------------------
 # - "dependencies": pulls in external dependencies (currently: GYP)
diff --git a/src/api.cc b/src/api.cc
index c2bbc18..b926c22 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3873,102 +3873,6 @@
   return str->length();
 }
 
-
-int String::Utf8Length() const {
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
-  return i::Utf8Length(str);
-}
-
-
-// Will fail with a negative answer if the recursion depth is too high.
-static int RecursivelySerializeToUtf8(i::String* string,
-                                      char* buffer,
-                                      int start,
-                                      int end,
-                                      int recursion_budget,
-                                      int32_t previous_character,
-                                      int32_t* last_character) {
-  int utf8_bytes = 0;
-  while (true) {
-    if (string->IsOneByteRepresentation()) {
-      i::String::WriteToFlat(string, buffer, start, end);
-      *last_character = unibrow::Utf16::kNoPreviousCharacter;
-      return utf8_bytes + end - start;
-    }
-    switch (i::StringShape(string).representation_tag()) {
-      case i::kExternalStringTag: {
-        const uint16_t* data = i::ExternalTwoByteString::cast(string)->
-          ExternalTwoByteStringGetData(0);
-        char* current = buffer;
-        for (int i = start; i < end; i++) {
-          uint16_t character = data[i];
-          current +=
-              unibrow::Utf8::Encode(current, character, previous_character);
-          previous_character = character;
-        }
-        *last_character = previous_character;
-        return static_cast<int>(utf8_bytes + current - buffer);
-      }
-      case i::kSeqStringTag: {
-        const uint16_t* data =
-            i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
-        char* current = buffer;
-        for (int i = start; i < end; i++) {
-          uint16_t character = data[i];
-          current +=
-              unibrow::Utf8::Encode(current, character, previous_character);
-          previous_character = character;
-        }
-        *last_character = previous_character;
-        return static_cast<int>(utf8_bytes + current - buffer);
-      }
-      case i::kSlicedStringTag: {
-        i::SlicedString* slice = i::SlicedString::cast(string);
-        unsigned offset = slice->offset();
-        string = slice->parent();
-        start += offset;
-        end += offset;
-        continue;
-      }
-      case i::kConsStringTag: {
-        i::ConsString* cons_string = i::ConsString::cast(string);
-        i::String* first = cons_string->first();
-        int boundary = first->length();
-        if (start >= boundary) {
-          // Only need RHS.
-          string = cons_string->second();
-          start -= boundary;
-          end -= boundary;
-          continue;
-        } else if (end <= boundary) {
-          // Only need LHS.
-          string = first;
-        } else {
-          if (recursion_budget == 0) return -1;
-          int extra_utf8_bytes =
-              RecursivelySerializeToUtf8(first,
-                                         buffer,
-                                         start,
-                                         boundary,
-                                         recursion_budget - 1,
-                                         previous_character,
-                                         &previous_character);
-          if (extra_utf8_bytes < 0) return extra_utf8_bytes;
-          buffer += extra_utf8_bytes;
-          utf8_bytes += extra_utf8_bytes;
-          string = cons_string->second();
-          start = 0;
-          end -= boundary;
-        }
-      }
-    }
-  }
-  UNREACHABLE();
-  return 0;
-}
-
-
 bool String::MayContainNonAscii() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
@@ -3978,6 +3882,222 @@
 }
 
 
+class Utf8LengthVisitor {
+ public:
+  explicit Utf8LengthVisitor()
+    : utf8_length_(0),
+      last_character_(unibrow::Utf16::kNoPreviousCharacter) {}
+
+  inline int GetLength() {
+    return utf8_length_;
+  }
+
+  template<typename Char>
+  inline void Visit(const Char* chars, unsigned length) {
+    ASSERT(length > 0);
+    // TODO(dcarney) Add back ascii fast path.
+    int utf8_length = 0;
+    int last_character = last_character_;
+    for (unsigned i = 0; i < length; i++) {
+      uint16_t c = chars[i];
+      utf8_length += unibrow::Utf8::Length(c, last_character);
+      last_character = c;
+    }
+    last_character_ = last_character;
+    utf8_length_ += utf8_length;
+  }
+
+  inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+    Visit(chars, length);
+  }
+
+  inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+    Visit(chars, length);
+  }
+
+ private:
+  int utf8_length_;
+  int last_character_;
+  DISALLOW_COPY_AND_ASSIGN(Utf8LengthVisitor);
+};
+
+
+static int Utf8Length(i::String* str, i::Isolate* isolate) {
+  unsigned length = static_cast<unsigned>(str->length());
+  if (length == 0) return 0;
+  int32_t type = str->map()->instance_type();
+  Utf8LengthVisitor visitor;
+  // Non ConsString branch.
+  if ((type & i::kStringRepresentationMask) != i::kConsStringTag) {
+    i::ConsStringNullOp null_op;
+    i::String::Visit(str, 0, visitor, null_op, type, length);
+    return visitor.GetLength();
+  }
+  i::ConsStringIteratorOp* op = isolate->write_iterator();
+  unsigned offset = 0;
+  i::String* leaf = op->Operate(str, &offset, &type, &length);
+  ASSERT(leaf != NULL);
+  while (leaf != NULL) {
+    i::ConsStringNullOp null_op;
+    ASSERT(offset == 0);
+    i::String::Visit(leaf, 0, visitor, null_op, type, length);
+    leaf = op->ContinueOperation(&type, &length);
+  }
+  return visitor.GetLength();
+}
+
+
+int String::Utf8Length() const {
+  i::Handle<i::String> str = Utils::OpenHandle(this);
+  i::Isolate* isolate = str->GetIsolate();
+  if (IsDeadCheck(isolate, "v8::String::Utf8Length()")) return 0;
+  return v8::Utf8Length(*str, isolate);
+}
+
+
+class Utf8WriterVisitor {
+ public:
+  Utf8WriterVisitor(char* buffer, int capacity)
+    : early_termination_(false),
+      last_character_(unibrow::Utf16::kNoPreviousCharacter),
+      buffer_(buffer),
+      start_(buffer),
+      capacity_(capacity),
+      utf16_chars_read_(0) {
+  }
+
+  static int WriteEndCharacter(uint16_t character,
+                               int last_character,
+                               int remaining,
+                               char* const buffer) {
+    using namespace unibrow;
+    ASSERT(remaining > 0);
+    // We can't use a local buffer here because Encode needs to modify
+    // previous characters in the stream.  We know, however, that
+    // exactly one character will be advanced.
+    if (Utf16::IsTrailSurrogate(character) &&
+        Utf16::IsLeadSurrogate(last_character)) {
+      int written = Utf8::Encode(buffer, character, last_character);
+      ASSERT(written == 1);
+      return written;
+    }
+    // Use a scratch buffer to check the required characters.
+    char temp_buffer[Utf8::kMaxEncodedSize];
+    // Can't encode using last_character as gcc has array bounds issues.
+    int written = Utf8::Encode(temp_buffer,
+                               character,
+                               unibrow::Utf16::kNoPreviousCharacter);
+    // Won't fit.
+    if (written > remaining) return 0;
+    // Copy over the character from temp_buffer.
+    for (int j = 0; j < written; j++) {
+      buffer[j] = temp_buffer[j];
+    }
+    return written;
+  }
+
+  template<typename Char>
+  void Visit(const Char* chars, const int length) {
+    using namespace unibrow;
+    // TODO(dcarney): Add back ascii fast path.
+    ASSERT(!early_termination_);
+    ASSERT(length > 0);
+    // Copy state to stack.
+    char* buffer = buffer_;
+    int last_character = last_character_;
+    int i = 0;
+    // Do a fast loop where there is no exit capacity check.
+    while (true) {
+      int fast_length;
+      if (capacity_ == -1) {
+        fast_length = length;
+      } else {
+        int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
+        // Need enough space to write everything but one character.
+        STATIC_ASSERT(Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3);
+        int writable_length = (remaining_capacity - 3)/3;
+        // Need to drop into slow loop.
+        if (writable_length <= 0) break;
+        fast_length = i + writable_length;
+        if (fast_length > length) fast_length = length;
+      }
+      // Write the characters to the stream.
+      for (; i < fast_length; i++) {
+        uint16_t character = *chars++;
+        buffer += Utf8::Encode(buffer, character, last_character);
+        last_character = character;
+        ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
+      }
+      // Array is fully written. Exit.
+      if (fast_length == length) {
+        // Write state back out to object.
+        last_character_ = last_character;
+        buffer_ = buffer;
+        utf16_chars_read_ += i;
+        return;
+      }
+    }
+    ASSERT(capacity_ != -1);
+    // Slow loop. Must check capacity on each iteration.
+    int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
+    ASSERT(remaining_capacity >= 0);
+    for (; i < length && remaining_capacity > 0; i++) {
+      uint16_t character = *chars++;
+      int written = WriteEndCharacter(character,
+                                      last_character,
+                                      remaining_capacity,
+                                      buffer);
+      if (written == 0) {
+        early_termination_ = true;
+        break;
+      }
+      buffer += written;
+      remaining_capacity -= written;
+      last_character = character;
+    }
+    // Write state back out to object.
+    last_character_ = last_character;
+    buffer_ = buffer;
+    utf16_chars_read_ += i;
+  }
+
+  inline bool IsDone() {
+    return early_termination_;
+  }
+
+  inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+    Visit(chars, static_cast<int>(length));
+  }
+
+  inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+    Visit(chars, static_cast<int>(length));
+  }
+
+  inline int CompleteWrite(bool write_null, int* utf16_chars_read_out) {
+    // Write out number of utf16 characters written to the stream.
+    if (utf16_chars_read_out != NULL) {
+      *utf16_chars_read_out = utf16_chars_read_;
+    }
+    // Only null terminate if all of the string was written and there's space.
+    if (write_null &&
+        !early_termination_ &&
+        (capacity_ == -1 || (buffer_ - start_) < capacity_)) {
+      *buffer_++ = '\0';
+    }
+    return static_cast<int>(buffer_ - start_);
+  }
+
+ private:
+  bool early_termination_;
+  int last_character_;
+  char* buffer_;
+  char* const start_;
+  int capacity_;
+  int utf16_chars_read_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor);
+};
+
+
 int String::WriteUtf8(char* buffer,
                       int capacity,
                       int* nchars_ref,
@@ -3990,123 +4110,23 @@
   if (options & HINT_MANY_WRITES_EXPECTED) {
     FlattenString(str);  // Flatten the string for efficiency.
   }
-  int string_length = str->length();
-  if (str->IsOneByteRepresentation()) {
-    int len;
-    if (capacity == -1) {
-      capacity = str->length() + 1;
-      len = string_length;
-    } else {
-      len = i::Min(capacity, str->length());
-    }
-    i::String::WriteToFlat(*str, buffer, 0, len);
-    if (nchars_ref != NULL) *nchars_ref = len;
-    if (!(options & NO_NULL_TERMINATION) && capacity > len) {
-      buffer[len] = '\0';
-      return len + 1;
-    }
-    return len;
-  }
-
-  if (capacity == -1 || capacity / 3 >= string_length) {
-    int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
-    const int kMaxRecursion = 100;
-    int utf8_bytes =
-        RecursivelySerializeToUtf8(*str,
-                                   buffer,
-                                   0,
-                                   string_length,
-                                   kMaxRecursion,
-                                   previous,
-                                   &previous);
-    if (utf8_bytes >= 0) {
-      // Success serializing with recursion.
-      if ((options & NO_NULL_TERMINATION) == 0 &&
-          (capacity > utf8_bytes || capacity == -1)) {
-        buffer[utf8_bytes++] = '\0';
-      }
-      if (nchars_ref != NULL) *nchars_ref = string_length;
-      return utf8_bytes;
-    }
-    FlattenString(str);
-    // Recurse once.  This time around the string is flat and the serializing
-    // with recursion will certainly succeed.
-    return WriteUtf8(buffer, capacity, nchars_ref, options);
-  } else if (capacity >= string_length) {
-    // First check that the buffer is large enough.  If it is, then recurse
-    // once without a capacity limit, which will get into the other branch of
-    // this 'if'.
-    int utf8_bytes = i::Utf8Length(str);
-    if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
-    if (utf8_bytes <= capacity) {
-      return WriteUtf8(buffer, -1, nchars_ref, options);
+  Utf8WriterVisitor writer(buffer, capacity);
+  i::ConsStringIteratorOp* op = isolate->write_iterator();
+  op->Reset();
+  int32_t type = str->map()->instance_type();
+  unsigned str_length = static_cast<unsigned>(str->length());
+  if (str_length != 0) {
+    i::String::Visit(*str, 0, writer, *op, type, str_length);
+    while (!writer.IsDone()) {
+      unsigned length_out;
+      i::String* next = op->ContinueOperation(&type, &length_out);
+      if (next == NULL) break;
+      // TODO(dcarney): need an asserting null op.
+      i::ConsStringNullOp null_op;
+      i::String::Visit(next, 0, writer, null_op, type, length_out);
     }
   }
-
-  // Slow case.
-  i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
-  isolate->string_tracker()->RecordWrite(str);
-
-  write_input_buffer.Reset(0, *str);
-  int len = str->length();
-  // Encode the first K - 3 bytes directly into the buffer since we
-  // know there's room for them.  If no capacity is given we copy all
-  // of them here.
-  int fast_end = capacity - (unibrow::Utf8::kMaxEncodedSize - 1);
-  int i;
-  int pos = 0;
-  int nchars = 0;
-  int previous = unibrow::Utf16::kNoPreviousCharacter;
-  for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
-    i::uc32 c = write_input_buffer.GetNext();
-    int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
-    pos += written;
-    nchars++;
-    previous = c;
-  }
-  if (i < len) {
-    // For the last characters we need to check the length for each one
-    // because they may be longer than the remaining space in the
-    // buffer.
-    char intermediate[unibrow::Utf8::kMaxEncodedSize];
-    for (; i < len && pos < capacity; i++) {
-      i::uc32 c = write_input_buffer.GetNext();
-      if (unibrow::Utf16::IsTrailSurrogate(c) &&
-          unibrow::Utf16::IsLeadSurrogate(previous)) {
-        // We can't use the intermediate buffer here because the encoding
-        // of surrogate pairs is done under assumption that you can step
-        // back and fix the UTF8 stream.  Luckily we only need space for one
-        // more byte, so there is always space.
-        ASSERT(pos < capacity);
-        int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
-        ASSERT(written == 1);
-        pos += written;
-        nchars++;
-      } else {
-        int written =
-            unibrow::Utf8::Encode(intermediate,
-                                  c,
-                                  unibrow::Utf16::kNoPreviousCharacter);
-        if (pos + written <= capacity) {
-          for (int j = 0; j < written; j++) {
-            buffer[pos + j] = intermediate[j];
-          }
-          pos += written;
-          nchars++;
-        } else {
-          // We've reached the end of the buffer
-          break;
-        }
-      }
-      previous = c;
-    }
-  }
-  if (nchars_ref != NULL) *nchars_ref = nchars;
-  if (!(options & NO_NULL_TERMINATION) &&
-      (i == len && (capacity == -1 || pos < capacity))) {
-    buffer[pos++] = '\0';
-  }
-  return pos;
+  return writer.CompleteWrite(!(options & NO_NULL_TERMINATION), nchars_ref);
 }
 
 
@@ -4126,7 +4146,7 @@
   }
 
   if (str->IsOneByteRepresentation()) {
-    // WriteToFlat is faster than using the StringInputBuffer.
+    // WriteToFlat is faster than using the StringCharacterStream.
     if (length == -1) length = str->length() + 1;
     int len = i::Min(length, str->length() - start);
     i::String::WriteToFlat(*str, buffer, start, start + len);
@@ -4141,16 +4161,15 @@
     return len;
   }
 
-  i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
   int end = length;
   if ((length == -1) || (length > str->length() - start)) {
     end = str->length() - start;
   }
   if (end < 0) return 0;
-  write_input_buffer.Reset(start, *str);
+  i::StringCharacterStream write_stream(*str, isolate->write_iterator(), start);
   int i;
   for (i = 0; i < end; i++) {
-    char c = static_cast<char>(write_input_buffer.GetNext());
+    char c = static_cast<char>(write_stream.GetNext());
     if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
     buffer[i] = c;
   }
@@ -4174,7 +4193,7 @@
   isolate->string_tracker()->RecordWrite(str);
   if (options & HINT_MANY_WRITES_EXPECTED) {
     // Flatten the string for efficiency.  This applies whether we are
-    // using StringInputBuffer or Get(i) to access the characters.
+    // using StringCharacterStream or Get(i) to access the characters.
     FlattenString(str);
   }
   int end = start + length;
@@ -5639,7 +5658,7 @@
   Handle<String> str = obj->ToString();
   if (str.IsEmpty()) return;
   i::Handle<i::String> i_str = Utils::OpenHandle(*str);
-  length_ = i::Utf8Length(i_str);
+  length_ = v8::Utf8Length(*i_str, isolate);
   str_ = i::NewArray<char>(length_ + 1);
   str->WriteUtf8(str_);
 }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 52edb39..9cd8675 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -309,8 +309,11 @@
 // mov lr, pc
 const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
 // ldr rd, [pc, #offset]
-const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// vldr dd, [pc, #offset]
+const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
 // blxcc rm
 const Instr kBlxRegMask =
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -351,6 +354,7 @@
       positions_recorder_(this) {
   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
   num_pending_reloc_info_ = 0;
+  num_pending_64_bit_reloc_info_ = 0;
   next_buffer_check_ = 0;
   const_pool_blocked_nesting_ = 0;
   no_const_pool_before_ = 0;
@@ -369,6 +373,7 @@
   // Emit constant pool if necessary.
   CheckConstPool(true, false);
   ASSERT(num_pending_reloc_info_ == 0);
+  ASSERT(num_pending_64_bit_reloc_info_ == 0);
 
   // Set up code descriptor.
   desc->buffer = buffer_;
@@ -415,6 +420,11 @@
 }
 
 
+bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
+  return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
+}
+
+
 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
   ASSERT(IsLdrRegisterImmediate(instr));
   bool positive = (instr & B23) == B23;
@@ -423,6 +433,15 @@
 }
 
 
+int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
+  ASSERT(IsVldrDRegisterImmediate(instr));
+  bool positive = (instr & B23) == B23;
+  int offset = instr & kOff8Mask;  // Zero extended offset.
+  offset <<= 2;
+  return positive ? offset : -offset;
+}
+
+
 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
   ASSERT(IsLdrRegisterImmediate(instr));
   bool positive = offset >= 0;
@@ -435,6 +454,19 @@
 }
 
 
+Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
+  ASSERT(IsVldrDRegisterImmediate(instr));
+  ASSERT((offset & ~3) == offset);  // Must be 64-bit aligned.
+  bool positive = offset >= 0;
+  if (!positive) offset = -offset;
+  ASSERT(is_uint10(offset));
+  // Set bit indicating whether the offset should be added.
+  instr = (instr & ~B23) | (positive ? B23 : 0);
+  // Set the actual offset. Its bottom 2 bits are zero.
+  return (instr & ~kOff8Mask) | (offset >> 2);
+}
+
+
 bool Assembler::IsStrRegisterImmediate(Instr instr) {
   return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
 }
@@ -520,7 +552,14 @@
 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
   // Check the instruction is indeed a
   // ldr<cond> <Rd>, [pc +/- offset_12].
-  return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
+  return (instr & kLdrPCMask) == kLdrPCPattern;
+}
+
+
+bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
+  // Check the instruction is indeed a
+  // vldr<cond> <Dd>, [pc +/- offset_10].
+  return (instr & kVldrDPCMask) == kVldrDPCPattern;
 }
 
 
@@ -796,7 +835,7 @@
 #endif  // def DEBUG
     if (assembler != NULL && assembler->predictable_code_size()) return true;
     return Serializer::enabled();
-  } else if (rmode_ == RelocInfo::NONE) {
+  } else if (RelocInfo::IsNone(rmode_)) {
     return false;
   }
   return true;
@@ -2027,9 +2066,26 @@
   if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
     // The double can be encoded in the instruction.
     emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
+  } else if (FLAG_enable_vldr_imm) {
+    // TODO(jfb) Temporarily turned off until we have constant blinding or
+    //           some equivalent mitigation: an attacker can otherwise control
+    //           generated data which also happens to be executable, a Very Bad
+    //           Thing indeed.
+    //           Blinding gets tricky because we don't have xor, we probably
+    //           need to add/subtract without losing precision, which requires a
+    //           cookie value that Lithium is probably better positioned to
+    //           choose.
+    //           We could also add a few peepholes here like detecting 0.0 and
+    //           -0.0 and doing a vmov from the sequestered d14, forcing denorms
+    //           to zero (we set flush-to-zero), and normalizing NaN values.
+    //           We could also detect redundant values.
+    //           The code could also randomize the order of values, though
+    //           that's tricky because vldr has a limited reach. Furthermore
+    //           it breaks load locality.
+    RecordRelocInfo(imm);
+    vldr(dst, MemOperand(pc, 0), cond);
   } else {
-    // Synthesise the double from ARM immediates. This could be implemented
-    // using vldr from a constant pool.
+    // Synthesise the double from ARM immediates.
     uint32_t lo, hi;
     DoubleAsTwoUInt32(imm, &lo, &hi);
     mov(ip, Operand(lo));
@@ -2592,6 +2648,7 @@
   // to write pure data with no pointers and the constant pool should
   // be emitted before using db.
   ASSERT(num_pending_reloc_info_ == 0);
+  ASSERT(num_pending_64_bit_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint8_t*>(pc_) = data;
   pc_ += sizeof(uint8_t);
@@ -2603,6 +2660,7 @@
   // to write pure data with no pointers and the constant pool should
   // be emitted before using dd.
   ASSERT(num_pending_reloc_info_ == 0);
+  ASSERT(num_pending_64_bit_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint32_t*>(pc_) = data;
   pc_ += sizeof(uint32_t);
@@ -2626,16 +2684,9 @@
            || mode == DONT_USE_CONSTANT_POOL);
     // These modes do not need an entry in the constant pool.
   } else {
-    ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
-    if (num_pending_reloc_info_ == 0) {
-      first_const_pool_use_ = pc_offset();
-    }
-    pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
-    // Make sure the constant pool is not emitted in place of the next
-    // instruction for which we just recorded relocation info.
-    BlockConstPoolFor(1);
+    RecordRelocInfoConstantPoolEntryHelper(rinfo);
   }
-  if (rinfo.rmode() != RelocInfo::NONE) {
+  if (!RelocInfo::IsNone(rinfo.rmode())) {
     // Don't record external references unless the heap will be serialized.
     if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
 #ifdef DEBUG
@@ -2661,14 +2712,38 @@
   }
 }
 
+void Assembler::RecordRelocInfo(double data) {
+  // We do not try to reuse pool constants.
+  RelocInfo rinfo(pc_, data);
+  RecordRelocInfoConstantPoolEntryHelper(rinfo);
+}
+
+
+void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
+  ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+  if (num_pending_reloc_info_ == 0) {
+    first_const_pool_use_ = pc_offset();
+  }
+  pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+  if (rinfo.rmode() == RelocInfo::NONE64) {
+    ++num_pending_64_bit_reloc_info_;
+  }
+  ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
+  // Make sure the constant pool is not emitted in place of the next
+  // instruction for which we just recorded relocation info.
+  BlockConstPoolFor(1);
+}
+
 
 void Assembler::BlockConstPoolFor(int instructions) {
   int pc_limit = pc_offset() + instructions * kInstrSize;
   if (no_const_pool_before_ < pc_limit) {
     // If there are some pending entries, the constant pool cannot be blocked
-    // further than first_const_pool_use_ + kMaxDistToPool
+    // further than constant pool instruction's reach.
     ASSERT((num_pending_reloc_info_ == 0) ||
-           (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+           (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
+    // TODO(jfb) Also check 64-bit entries are in range (requires splitting
+    //           them up from 32-bit entries).
     no_const_pool_before_ = pc_limit;
   }
 
@@ -2690,29 +2765,60 @@
 
   // There is nothing to do if there are no pending constant pool entries.
   if (num_pending_reloc_info_ == 0)  {
+    ASSERT(num_pending_64_bit_reloc_info_ == 0);
     // Calculate the offset of the next check.
     next_buffer_check_ = pc_offset() + kCheckPoolInterval;
     return;
   }
 
-  // We emit a constant pool when:
-  //  * requested to do so by parameter force_emit (e.g. after each function).
-  //  * the distance to the first instruction accessing the constant pool is
-  //    kAvgDistToPool or more.
-  //  * no jump is required and the distance to the first instruction accessing
-  //    the constant pool is at least kMaxDistToPool / 2.
-  ASSERT(first_const_pool_use_ >= 0);
-  int dist = pc_offset() - first_const_pool_use_;
-  if (!force_emit && dist < kAvgDistToPool &&
-      (require_jump || (dist < (kMaxDistToPool / 2)))) {
-    return;
-  }
-
   // Check that the code buffer is large enough before emitting the constant
   // pool (include the jump over the pool and the constant pool marker and
   // the gap to the relocation information).
+  // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
   int jump_instr = require_jump ? kInstrSize : 0;
-  int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
+  int size_up_to_marker = jump_instr + kInstrSize;
+  int size_after_marker = num_pending_reloc_info_ * kPointerSize;
+  bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
+  // 64-bit values must be 64-bit aligned.
+  // We'll start emitting at PC: branch+marker, then 32-bit values, then
+  // 64-bit values which might need to be aligned.
+  bool require_64_bit_align = has_fp_values &&
+      (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
+  if (require_64_bit_align) {
+    size_after_marker += kInstrSize;
+  }
+  // num_pending_reloc_info_ also contains 64-bit entries, the above code
+  // therefore already counted half of the size for 64-bit entries. Add the
+  // remaining size.
+  STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
+  size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
+
+  int size = size_up_to_marker + size_after_marker;
+
+  // We emit a constant pool when:
+  //  * requested to do so by parameter force_emit (e.g. after each function).
+  //  * the distance from the first instruction accessing the constant pool to
+  //    any of the constant pool entries will exceed its limit the next
+  //    time the pool is checked. This is overly restrictive, but we don't emit
+  //    constant pool entries in-order so it's conservatively correct.
+  //  * the instruction doesn't require a jump after itself to jump over the
+  //    constant pool, and we're getting close to running out of range.
+  if (!force_emit) {
+    ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
+    int dist = pc_offset() + size - first_const_pool_use_;
+    if (has_fp_values) {
+      if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
+          (require_jump || (dist < kMaxDistToFPPool / 2))) {
+        return;
+      }
+    } else {
+      if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
+          (require_jump || (dist < kMaxDistToIntPool / 2))) {
+        return;
+      }
+    }
+  }
+
   int needed_space = size + kGap;
   while (buffer_space() <= needed_space) GrowBuffer();
 
@@ -2729,10 +2835,43 @@
     }
 
     // Put down constant pool marker "Undefined instruction".
-    emit(kConstantPoolMarker |
-         EncodeConstantPoolLength(num_pending_reloc_info_));
+    // The data size helps disassembly know what to print.
+    emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker));
 
-    // Emit constant pool entries.
+    if (require_64_bit_align) {
+      emit(kConstantPoolMarker);
+    }
+
+    // Emit 64-bit constant pool entries first: their range is smaller than
+    // 32-bit entries.
+    for (int i = 0; i < num_pending_reloc_info_; i++) {
+      RelocInfo& rinfo = pending_reloc_info_[i];
+
+      if (rinfo.rmode() != RelocInfo::NONE64) {
+        // 32-bit values emitted later.
+        continue;
+      }
+
+      ASSERT(!((uintptr_t)pc_ & 0x3));  // Check 64-bit alignment.
+
+      Instr instr = instr_at(rinfo.pc());
+      // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
+      ASSERT((IsVldrDPcImmediateOffset(instr) &&
+              GetVldrDRegisterImmediateOffset(instr) == 0));
+
+      int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+      ASSERT(is_uint10(delta));
+
+      instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
+
+      const double double_data = rinfo.data64();
+      uint64_t uint_data = 0;
+      memcpy(&uint_data, &double_data, sizeof(double_data));
+      emit(uint_data & 0xFFFFFFFF);
+      emit(uint_data >> 32);
+    }
+
+    // Emit 32-bit constant pool entries.
     for (int i = 0; i < num_pending_reloc_info_; i++) {
       RelocInfo& rinfo = pending_reloc_info_[i];
       ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -2740,25 +2879,35 @@
              rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
              rinfo.rmode() != RelocInfo::CONST_POOL);
 
+      if (rinfo.rmode() == RelocInfo::NONE64) {
+        // 64-bit values emitted earlier.
+        continue;
+      }
+
       Instr instr = instr_at(rinfo.pc());
-      // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+
+      // 64-bit loads shouldn't get here.
+      ASSERT(!IsVldrDPcImmediateOffset(instr));
+
+      int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+      // 0 is the smallest delta:
+      //   ldr rd, [pc, #0]
+      //   constant pool marker
+      //   data
+
       if (IsLdrPcImmediateOffset(instr) &&
           GetLdrRegisterImmediateOffset(instr) == 0) {
-        int delta = pc_ - rinfo.pc() - kPcLoadDelta;
-        // 0 is the smallest delta:
-        //   ldr rd, [pc, #0]
-        //   constant pool marker
-        //   data
         ASSERT(is_uint12(delta));
-
         instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+        emit(rinfo.data());
       } else {
         ASSERT(IsMovW(instr));
+        emit(rinfo.data());
       }
-      emit(rinfo.data());
     }
 
     num_pending_reloc_info_ = 0;
+    num_pending_64_bit_reloc_info_ = 0;
     first_const_pool_use_ = -1;
 
     RecordComment("]");
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index a361c7e..ca63288 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1278,8 +1278,11 @@
   static bool IsBranch(Instr instr);
   static int GetBranchOffset(Instr instr);
   static bool IsLdrRegisterImmediate(Instr instr);
+  static bool IsVldrDRegisterImmediate(Instr instr);
   static int GetLdrRegisterImmediateOffset(Instr instr);
+  static int GetVldrDRegisterImmediateOffset(Instr instr);
   static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+  static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
   static bool IsStrRegisterImmediate(Instr instr);
   static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
   static bool IsAddRegisterImmediate(Instr instr);
@@ -1294,6 +1297,7 @@
   static bool IsStrRegFpNegOffset(Instr instr);
   static bool IsLdrRegFpNegOffset(Instr instr);
   static bool IsLdrPcImmediateOffset(Instr instr);
+  static bool IsVldrDPcImmediateOffset(Instr instr);
   static bool IsTstImmediate(Instr instr);
   static bool IsCmpRegister(Instr instr);
   static bool IsCmpImmediate(Instr instr);
@@ -1304,12 +1308,13 @@
   static bool IsMovW(Instr instr);
 
   // Constants in pools are accessed via pc relative addressing, which can
-  // reach +/-4KB thereby defining a maximum distance between the instruction
-  // and the accessed constant.
-  static const int kMaxDistToPool = 4*KB;
-  static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
-  STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) ==
-                kMaxNumPendingRelocInfo);
+  // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
+  // PC-relative loads, thereby defining a maximum distance between the
+  // instruction and the accessed constant.
+  static const int kMaxDistToIntPool = 4*KB;
+  static const int kMaxDistToFPPool = 1*KB;
+  // All relocations could be integer, it therefore acts as the limit.
+  static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
 
   // Postpone the generation of the constant pool for the specified number of
   // instructions.
@@ -1349,7 +1354,9 @@
     if (--const_pool_blocked_nesting_ == 0) {
       // Check the constant pool hasn't been blocked for too long.
       ASSERT((num_pending_reloc_info_ == 0) ||
-             (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+             (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
+      ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+             (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
       // Two cases:
       //  * no_const_pool_before_ >= next_buffer_check_ and the emission is
       //    still blocked
@@ -1392,13 +1399,6 @@
   static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
 
 
-  // Average distance beetween a constant pool and the first instruction
-  // accessing the constant pool. Longer distance should result in less I-cache
-  // pollution.
-  // In practice the distance will be smaller since constant pool emission is
-  // forced after function return and sometimes after unconditional branches.
-  static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
-
   // Emission of the constant pool may be blocked in some code sequences.
   int const_pool_blocked_nesting_;  // Block emission if this is not zero.
   int no_const_pool_before_;  // Block emission before this pc offset.
@@ -1423,6 +1423,9 @@
   RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
   // number of pending reloc info entries in the buffer
   int num_pending_reloc_info_;
+  // Number of pending reloc info entries included above which also happen to
+  // be 64-bit.
+  int num_pending_64_bit_reloc_info_;
 
   // The bound position, before this we cannot do instruction elimination.
   int last_bound_pos_;
@@ -1459,6 +1462,8 @@
   // Record reloc info for current pc_
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
                        UseConstantPoolMode mode = USE_CONSTANT_POOL);
+  void RecordRelocInfo(double data);
+  void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
 
   friend class RegExpMacroAssemblerARM;
   friend class RelocInfo;
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index d8cbd08..3ab09a2 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -361,12 +361,11 @@
 
   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size,
-                        r0,
-                        r1,
-                        r2,
-                        fail,
-                        TAG_OBJECT);
+  AllocationFlags flags = TAG_OBJECT;
+  if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+    flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+  }
+  __ AllocateInNewSpace(size, r0, r1, r2, fail, flags);
 
   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 5e8739c..3f1791c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -193,27 +193,10 @@
   // Allocate new FixedDoubleArray.
   // Use lr as a temporary register.
   __ mov(lr, Operand(r5, LSL, 2));
-  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
-  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
+  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
   // r6: destination FixedDoubleArray, not tagged as heap object.
 
-  // Align the array conveniently for doubles.
-  // Store a filler value in the unused memory.
-  Label aligned, aligned_done;
-  __ tst(r6, Operand(kDoubleAlignmentMask));
-  __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
-  __ b(eq, &aligned);
-  // Store at the beginning of the allocated memory and update the base pointer.
-  __ str(ip, MemOperand(r6, kPointerSize, PostIndex));
-  __ b(&aligned_done);
-
-  __ bind(&aligned);
-  // Store the filler at the end of the allocated memory.
-  __ sub(lr, lr, Operand(kPointerSize));
-  __ str(ip, MemOperand(r6, lr));
-
-  __ bind(&aligned_done);
-
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
   __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index a569383..3676032 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -267,7 +267,8 @@
   kCoprocessorMask = 15 << 8,
   kOpCodeMask = 15 << 21,  // In data-processing instructions.
   kImm24Mask  = (1 << 24) - 1,
-  kOff12Mask  = (1 << 12) - 1
+  kOff12Mask  = (1 << 12) - 1,
+  kOff8Mask  = (1 << 8) - 1
 };
 
 
@@ -464,6 +465,9 @@
 // ldr rd, [pc, #offset]
 extern const Instr kLdrPCMask;
 extern const Instr kLdrPCPattern;
+// vldr dd, [pc, #offset]
+extern const Instr kVldrDPCMask;
+extern const Instr kVldrDPCPattern;
 // blxcc rm
 extern const Instr kBlxRegMask;
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 4182f8c..b6f3e67 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -675,7 +675,7 @@
                                Label* if_false,
                                Label* fall_through) {
   ToBooleanStub stub(result_register());
-  __ CallStub(&stub);
+  __ CallStub(&stub, condition->test_id());
   __ tst(result_register(), result_register());
   Split(ne, if_true, if_false, fall_through);
 }
@@ -2330,7 +2330,7 @@
 
   CallFunctionStub stub(arg_count, flags);
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ CallStub(&stub);
+  __ CallStub(&stub, expr->CallFeedbackId());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 38092a7..f194720 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -112,7 +112,11 @@
   stream->Add("= ");
   for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    InputAt(i)->PrintTo(stream);
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
   }
 }
 
@@ -999,7 +1003,14 @@
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+  // If there is a non-return use, the context must be allocated in a register.
+  for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+    if (!it.value()->IsReturn()) {
+      return DefineAsRegister(new(zone()) LContext);
+    }
+  }
+
+  return NULL;
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 0ac64fd..e2a0f2d 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2820,9 +2820,6 @@
     __ ldm(ia_w, sp, fp.bit() | lr.bit());
     __ add(sp, sp, Operand(sp_delta));
   }
-  if (info()->IsStub()) {
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
   __ Jump(lr);
 }
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 33b557e..0764e09 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1617,6 +1617,18 @@
     ldr(ip, MemOperand(topaddr, limit - top));
   }
 
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // always safe because the limit of the heap is always aligned.
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+    Label aligned;
+    b(eq, &aligned);
+    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+    bind(&aligned);
+  }
+
   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top.
   if (obj_size_operand.is_single_instruction(this)) {
@@ -1702,6 +1714,18 @@
     ldr(ip, MemOperand(topaddr, limit - top));
   }
 
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // always safe because the limit of the heap is always aligned.
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+    Label aligned;
+    b(eq, &aligned);
+    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+    bind(&aligned);
+  }
+
   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top. Object size may be in words so a shift is
   // required to get the number of bytes.
@@ -2186,9 +2210,11 @@
 }
 
 
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
+void MacroAssembler::CallStub(CodeStub* stub,
+                              TypeFeedbackId ast_id,
+                              Condition cond) {
   ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond);
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
 }
 
 
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 3dedd29..454e645 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -54,20 +54,6 @@
 const Register cp = { 8 };  // JavaScript context pointer
 const Register kRootRegister = { 10 };  // Roots array pointer.
 
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
-  // No special flags.
-  NO_ALLOCATION_FLAGS = 0,
-  // Return the pointer to the allocated already tagged as a heap object.
-  TAG_OBJECT = 1 << 0,
-  // The content of the result register already contains the allocation top in
-  // new space.
-  RESULT_CONTAINS_TOP = 1 << 1,
-  // Specify that the requested size of the space to allocate is specified in
-  // words instead of bytes.
-  SIZE_IN_WORDS = 1 << 2
-};
-
 // Flags used for AllocateHeapNumber
 enum TaggingMode {
   // Tag the result.
@@ -1015,7 +1001,9 @@
   // Runtime calls
 
   // Call a code stub.
-  void CallStub(CodeStub* stub, Condition cond = al);
+  void CallStub(CodeStub* stub,
+                TypeFeedbackId ast_id = TypeFeedbackId::None(),
+                Condition cond = al);
 
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
diff --git a/src/assembler.cc b/src/assembler.cc
index ccaf290..f1d5de1 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -693,7 +693,9 @@
 const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
   switch (rmode) {
     case RelocInfo::NONE:
-      return "no reloc";
+      return "no reloc 32";
+    case RelocInfo::NONE64:
+      return "no reloc 64";
     case RelocInfo::EMBEDDED_OBJECT:
       return "embedded object";
     case RelocInfo::CONSTRUCT_CALL:
@@ -817,6 +819,7 @@
     case CONST_POOL:
     case DEBUG_BREAK_SLOT:
     case NONE:
+    case NONE64:
       break;
     case NUMBER_OF_MODES:
       UNREACHABLE();
diff --git a/src/assembler.h b/src/assembler.h
index 111c1d9..cebd60a 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -248,7 +248,8 @@
     // add more as needed
     // Pseudo-types
     NUMBER_OF_MODES,  // There are at most 15 modes with noncompact encoding.
-    NONE,  // never recorded
+    NONE,  // never recorded 32-bit value
+    NONE64,  // never recorded 64-bit value
     CODE_AGE_SEQUENCE,  // Not stored in RelocInfo array, used explictly by
                         // code aging.
     FIRST_REAL_RELOC_MODE = CODE_TARGET,
@@ -268,6 +269,9 @@
   RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
       : pc_(pc), rmode_(rmode), data_(data), host_(host) {
   }
+  RelocInfo(byte* pc, double data64)
+      : pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) {
+  }
 
   static inline bool IsRealRelocMode(Mode mode) {
     return mode >= FIRST_REAL_RELOC_MODE &&
@@ -315,6 +319,9 @@
   static inline bool IsDebugBreakSlot(Mode mode) {
     return mode == DEBUG_BREAK_SLOT;
   }
+  static inline bool IsNone(Mode mode) {
+    return mode == NONE || mode == NONE64;
+  }
   static inline bool IsCodeAgeSequence(Mode mode) {
     return mode == CODE_AGE_SEQUENCE;
   }
@@ -325,6 +332,7 @@
   void set_pc(byte* pc) { pc_ = pc; }
   Mode rmode() const {  return rmode_; }
   intptr_t data() const { return data_; }
+  double data64() const { return data64_; }
   Code* host() const { return host_; }
 
   // Apply a relocation by delta bytes
@@ -423,7 +431,10 @@
   // comment).
   byte* pc_;
   Mode rmode_;
-  intptr_t data_;
+  union {
+    intptr_t data_;
+    double data64_;
+  };
   Code* host_;
   // Code and Embedded Object pointers on some platforms are stored split
   // across two consecutive 32-bit instructions. Heap management
diff --git a/src/builtins.cc b/src/builtins.cc
index 97fcaeb..3143dd9 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -576,7 +576,7 @@
 
       ElementsAccessor* accessor = array->GetElementsAccessor();
       MaybeObject* maybe_failure = accessor->CopyElements(
-           NULL, 0, new_elms, kind, 0,
+           NULL, 0, kind, new_elms, 0,
            ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
       ASSERT(!maybe_failure->IsFailure());
       USE(maybe_failure);
@@ -623,7 +623,7 @@
 
       ElementsAccessor* accessor = array->GetElementsAccessor();
       MaybeObject* maybe_failure = accessor->CopyElements(
-              NULL, 0, new_elms, kind, 0,
+              NULL, 0, kind, new_elms, 0,
               ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
       ASSERT(!maybe_failure->IsFailure());
       USE(maybe_failure);
@@ -785,7 +785,7 @@
     ElementsKind kind = array->GetElementsKind();
     ElementsAccessor* accessor = array->GetElementsAccessor();
     MaybeObject* maybe_failure = accessor->CopyElements(
-            NULL, 0, new_elms, kind, to_add,
+            NULL, 0, kind, new_elms, to_add,
             ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
     ASSERT(!maybe_failure->IsFailure());
     USE(maybe_failure);
@@ -934,9 +934,8 @@
   if (!maybe_array->To(&result_array)) return maybe_array;
 
   ElementsAccessor* accessor = object->GetElementsAccessor();
-  MaybeObject* maybe_failure =
-      accessor->CopyElements(NULL, k, result_array->elements(),
-                             kind, 0, result_len, elms);
+  MaybeObject* maybe_failure = accessor->CopyElements(
+      NULL, k, kind, result_array->elements(), 0, result_len, elms);
   ASSERT(!maybe_failure->IsFailure());
   USE(maybe_failure);
 
@@ -1037,9 +1036,9 @@
   if (actual_delete_count > 0) {
     AssertNoAllocation no_gc;
     ElementsAccessor* accessor = array->GetElementsAccessor();
-    MaybeObject* maybe_failure =
-        accessor->CopyElements(NULL, actual_start, result_array->elements(),
-                               elements_kind, 0, actual_delete_count, elms_obj);
+    MaybeObject* maybe_failure = accessor->CopyElements(
+        NULL, actual_start, elements_kind, result_array->elements(),
+        0, actual_delete_count, elms_obj);
     // Cannot fail since the origin and target array are of the same elements
     // kind.
     ASSERT(!maybe_failure->IsFailure());
@@ -1105,12 +1104,12 @@
       if (actual_start > 0) {
         // Copy the part before actual_start as is.
         MaybeObject* maybe_failure = accessor->CopyElements(
-            NULL, 0, new_elms, kind, 0, actual_start, elms);
+            NULL, 0, kind, new_elms, 0, actual_start, elms);
         ASSERT(!maybe_failure->IsFailure());
         USE(maybe_failure);
       }
       MaybeObject* maybe_failure = accessor->CopyElements(
-          NULL, actual_start + actual_delete_count, new_elms, kind,
+          NULL, actual_start + actual_delete_count, kind, new_elms,
           actual_start + item_count,
           ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
       ASSERT(!maybe_failure->IsFailure());
@@ -1220,13 +1219,14 @@
 
   int j = 0;
   FixedArrayBase* storage = result_array->elements();
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
   for (int i = 0; i < n_arguments; i++) {
     JSArray* array = JSArray::cast(args[i]);
     int len = Smi::cast(array->length())->value();
+    ElementsKind from_kind = array->GetElementsKind();
     if (len > 0) {
-      ElementsAccessor* accessor = array->GetElementsAccessor();
       MaybeObject* maybe_failure =
-          accessor->CopyElements(array, 0, storage, elements_kind, j, len);
+          accessor->CopyElements(array, 0, from_kind, storage, j, len);
       if (maybe_failure->IsFailure()) return maybe_failure;
       j += len;
     }
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 74bd93f..58e17f4 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -51,17 +51,19 @@
 class CodeStubGraphBuilderBase : public HGraphBuilder {
  public:
   CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
-      : HGraphBuilder(&info_), info_(stub, isolate) {}
+      : HGraphBuilder(&info_), info_(stub, isolate), context_(NULL) {}
   virtual bool BuildGraph();
 
  protected:
   virtual void BuildCodeStub() = 0;
   HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
   HydrogenCodeStub* stub() { return info_.code_stub(); }
+  HContext* context() { return context_; }
 
  private:
   SmartArrayPointer<HParameter*> parameters_;
   CompilationInfoWithZone info_;
+  HContext* context_;
 };
 
 
@@ -77,6 +79,9 @@
   graph()->entry_block()->Finish(jump);
   set_current_block(next_block);
 
+  context_ = new(zone()) HContext();
+  AddInstruction(context_);
+
   int major_key = stub()->MajorKey();
   CodeStubInterfaceDescriptor* descriptor =
       info_.isolate()->code_stub_interface_descriptor(major_key);
@@ -121,7 +126,7 @@
       casted_stub()->is_js_array(), casted_stub()->elements_kind(), false);
   AddInstruction(load);
 
-  HReturn* ret = new(zone) HReturn(load);
+  HReturn* ret = new(zone) HReturn(load, context());
   current_block()->Finish(ret);
 }
 
diff --git a/src/codegen.cc b/src/codegen.cc
index 7112f36..47eddb0 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -131,14 +131,16 @@
       Handle<Script> script = info->script();
       if (!script->IsUndefined() && !script->source()->IsUndefined()) {
         PrintF("--- Raw source ---\n");
-        StringInputBuffer stream(String::cast(script->source()));
-        stream.Seek(function->start_position());
+        ConsStringIteratorOp op;
+        StringCharacterStream stream(String::cast(script->source()),
+                                     &op,
+                                     function->start_position());
         // fun->end_position() points to the last character in the stream. We
         // need to compensate by adding one to calculate the length.
         int source_len =
             function->end_position() - function->start_position() + 1;
         for (int i = 0; i < source_len; i++) {
-          if (stream.has_more()) PrintF("%c", stream.GetNext());
+          if (stream.HasMore()) PrintF("%c", stream.GetNext());
         }
         PrintF("\n\n");
       }
diff --git a/src/elements.cc b/src/elements.cc
index 8e1bf3e..5b454e5 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -686,7 +686,7 @@
   MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
                                                        uint32_t from_start,
                                                        FixedArrayBase* to,
-                                                       ElementsKind to_kind,
+                                                       ElementsKind from_kind,
                                                        uint32_t to_start,
                                                        int packed_size,
                                                        int copy_size) {
@@ -696,8 +696,8 @@
 
   MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
                                                     uint32_t from_start,
+                                                    ElementsKind from_kind,
                                                     FixedArrayBase* to,
-                                                    ElementsKind to_kind,
                                                     uint32_t to_start,
                                                     int copy_size,
                                                     FixedArrayBase* from) {
@@ -707,8 +707,7 @@
     }
 
     if (from_holder) {
-      ElementsKind elements_kind = from_holder->GetElementsKind();
-      bool is_packed = IsFastPackedElementsKind(elements_kind) &&
+      bool is_packed = IsFastPackedElementsKind(from_kind) &&
           from_holder->IsJSArray();
       if (is_packed) {
         packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
@@ -718,7 +717,7 @@
       }
     }
     return ElementsAccessorSubclass::CopyElementsImpl(
-        from, from_start, to, to_kind, to_start, packed_size, copy_size);
+        from, from_start, to, from_kind, to_start, packed_size, copy_size);
   }
 
   MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
@@ -1003,6 +1002,41 @@
 };
 
 
+static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
+  switch (array->map()->instance_type()) {
+    case FIXED_ARRAY_TYPE:
+      if (array->IsDictionary()) {
+        return DICTIONARY_ELEMENTS;
+      } else {
+        return FAST_HOLEY_ELEMENTS;
+      }
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      return FAST_HOLEY_DOUBLE_ELEMENTS;
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      return EXTERNAL_BYTE_ELEMENTS;
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      return EXTERNAL_SHORT_ELEMENTS;
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+    case EXTERNAL_INT_ARRAY_TYPE:
+      return EXTERNAL_INT_ELEMENTS;
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      return EXTERNAL_UNSIGNED_INT_ELEMENTS;
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      return EXTERNAL_FLOAT_ELEMENTS;
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      return EXTERNAL_DOUBLE_ELEMENTS;
+    case EXTERNAL_PIXEL_ARRAY_TYPE:
+      return EXTERNAL_PIXEL_ELEMENTS;
+    default:
+      UNREACHABLE();
+  }
+  return FAST_HOLEY_ELEMENTS;
+}
+
+
 template<typename FastElementsAccessorSubclass,
          typename KindTraits>
 class FastSmiOrObjectElementsAccessor
@@ -1018,29 +1052,49 @@
   static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
                                        uint32_t from_start,
                                        FixedArrayBase* to,
-                                       ElementsKind to_kind,
+                                       ElementsKind from_kind,
                                        uint32_t to_start,
                                        int packed_size,
                                        int copy_size) {
-    if (IsFastSmiOrObjectElementsKind(to_kind)) {
-      CopyObjectToObjectElements(
-          from, KindTraits::Kind, from_start, to, to_kind, to_start, copy_size);
-    } else if (IsFastDoubleElementsKind(to_kind)) {
-      if (IsFastSmiElementsKind(KindTraits::Kind)) {
-        if (IsFastPackedElementsKind(KindTraits::Kind) &&
-            packed_size != kPackedSizeNotKnown) {
-          CopyPackedSmiToDoubleElements(
-              from, from_start, to, to_start, packed_size, copy_size);
-        } else {
-          CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
-        }
-      } else {
-        CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
+    ElementsKind to_kind = KindTraits::Kind;
+    switch (from_kind) {
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+        CopyObjectToObjectElements(
+            from, from_kind, from_start, to, to_kind, to_start, copy_size);
+        return to->GetHeap()->undefined_value();
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+        return CopyDoubleToObjectElements(
+            from, from_start, to, to_kind, to_start, copy_size);
+      case DICTIONARY_ELEMENTS:
+        CopyDictionaryToObjectElements(
+            from, from_start, to, to_kind, to_start, copy_size);
+        return to->GetHeap()->undefined_value();
+      case NON_STRICT_ARGUMENTS_ELEMENTS: {
+        // TODO(verwaest): This is a temporary hack to support extending
+        // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
+        // This case should be UNREACHABLE().
+        FixedArray* parameter_map = FixedArray::cast(from);
+        FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+        ElementsKind from_kind = ElementsKindForArray(arguments);
+        return CopyElementsImpl(arguments, from_start, to, from_kind,
+                                to_start, packed_size, copy_size);
       }
-    } else {
-      UNREACHABLE();
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case EXTERNAL_PIXEL_ELEMENTS:
+        UNREACHABLE();
     }
-    return to->GetHeap()->undefined_value();
+    return NULL;
   }
 
 
@@ -1129,22 +1183,40 @@
   static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
                                        uint32_t from_start,
                                        FixedArrayBase* to,
-                                       ElementsKind to_kind,
+                                       ElementsKind from_kind,
                                        uint32_t to_start,
                                        int packed_size,
                                        int copy_size) {
-    switch (to_kind) {
+    switch (from_kind) {
       case FAST_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
+        CopyPackedSmiToDoubleElements(
+            from, from_start, to, to_start, packed_size, copy_size);
+        break;
       case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-        return CopyDoubleToObjectElements(
-            from, from_start, to, to_kind, to_start, copy_size);
+        CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
+        break;
       case FAST_DOUBLE_ELEMENTS:
       case FAST_HOLEY_DOUBLE_ELEMENTS:
         CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
-        return from;
-      default:
+        break;
+      case FAST_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+        CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
+        break;
+      case DICTIONARY_ELEMENTS:
+        CopyDictionaryToDoubleElements(
+            from, from_start, to, to_start, copy_size);
+        break;
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case EXTERNAL_PIXEL_ELEMENTS:
         UNREACHABLE();
     }
     return to->GetHeap()->undefined_value();
@@ -1460,27 +1532,12 @@
   MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
                                                        uint32_t from_start,
                                                        FixedArrayBase* to,
-                                                       ElementsKind to_kind,
+                                                       ElementsKind from_kind,
                                                        uint32_t to_start,
                                                        int packed_size,
                                                        int copy_size) {
-    switch (to_kind) {
-      case FAST_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-        CopyDictionaryToObjectElements(
-            from, from_start, to, to_kind, to_start, copy_size);
-        return from;
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS:
-        CopyDictionaryToDoubleElements(
-            from, from_start, to, to_start, copy_size);
-        return from;
-      default:
-        UNREACHABLE();
-    }
-    return to->GetHeap()->undefined_value();
+    UNREACHABLE();
+    return NULL;
   }
 
 
@@ -1707,15 +1764,12 @@
   MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
                                                        uint32_t from_start,
                                                        FixedArrayBase* to,
-                                                       ElementsKind to_kind,
+                                                       ElementsKind from_kind,
                                                        uint32_t to_start,
                                                        int packed_size,
                                                        int copy_size) {
-    FixedArray* parameter_map = FixedArray::cast(from);
-    FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
-    ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
-    return accessor->CopyElements(NULL, from_start, to, to_kind,
-                                  to_start, copy_size, arguments);
+    UNREACHABLE();
+    return NULL;
   }
 
   static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
@@ -1761,35 +1815,7 @@
 
 
 ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
-  switch (array->map()->instance_type()) {
-    case FIXED_ARRAY_TYPE:
-      if (array->IsDictionary()) {
-        return elements_accessors_[DICTIONARY_ELEMENTS];
-      } else {
-        return elements_accessors_[FAST_HOLEY_ELEMENTS];
-      }
-    case EXTERNAL_BYTE_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
-    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_UNSIGNED_BYTE_ELEMENTS];
-    case EXTERNAL_SHORT_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_SHORT_ELEMENTS];
-    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_UNSIGNED_SHORT_ELEMENTS];
-    case EXTERNAL_INT_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_INT_ELEMENTS];
-    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_UNSIGNED_INT_ELEMENTS];
-    case EXTERNAL_FLOAT_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_FLOAT_ELEMENTS];
-    case EXTERNAL_DOUBLE_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_DOUBLE_ELEMENTS];
-    case EXTERNAL_PIXEL_ARRAY_TYPE:
-      return elements_accessors_[EXTERNAL_PIXEL_ELEMENTS];
-    default:
-      UNREACHABLE();
-      return NULL;
-  }
+  return elements_accessors_[ElementsKindForArray(array)];
 }
 
 
diff --git a/src/elements.h b/src/elements.h
index ffd6428..e25076b 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -143,17 +143,17 @@
   MUST_USE_RESULT virtual MaybeObject* CopyElements(
       JSObject* source_holder,
       uint32_t source_start,
+      ElementsKind source_kind,
       FixedArrayBase* destination,
-      ElementsKind destination_kind,
       uint32_t destination_start,
       int copy_size,
       FixedArrayBase* source = NULL) = 0;
 
   MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
                                             FixedArrayBase* to,
-                                            ElementsKind to_kind,
+                                            ElementsKind from_kind,
                                             FixedArrayBase* from = NULL) {
-    return CopyElements(from_holder, 0, to, to_kind, 0,
+    return CopyElements(from_holder, 0, from_kind, to, 0,
                         kCopyToEndAndInitializeToHole, from);
   }
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 338060f..149c65d 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -300,6 +300,8 @@
             "enable unaligned accesses for ARMv7 (ARM only)")
 DEFINE_bool(enable_fpu, true,
             "enable use of MIPS FPU instructions if available (MIPS only)")
+DEFINE_bool(enable_vldr_imm, false,
+            "enable use of constant pools for double immediate (ARM only)")
 
 // bootstrapper.cc
 DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
diff --git a/src/handles.cc b/src/handles.cc
index 3bc1f4b..16fe0c7 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -883,165 +883,6 @@
 }
 
 
-// This method determines the type of string involved and then gets the UTF8
-// length of the string.  It doesn't flatten the string and has log(n) recursion
-// for a string of length n.  If the failure flag gets set, then we have to
-// flatten the string and retry.  Failures are caused by surrogate pairs in deep
-// cons strings.
-
-// Single surrogate characters that are encountered in the UTF-16 character
-// sequence of the input string get counted as 3 UTF-8 bytes, because that
-// is the way that WriteUtf8 will encode them.  Surrogate pairs are counted and
-// encoded as one 4-byte UTF-8 sequence.
-
-// This function conceptually uses recursion on the two halves of cons strings.
-// However, in order to avoid the recursion going too deep it recurses on the
-// second string of the cons, but iterates on the first substring (by manually
-// eliminating it as a tail recursion).  This means it counts the UTF-8 length
-// from the end to the start, which makes no difference to the total.
-
-// Surrogate pairs are recognized even if they are split across two sides of a
-// cons, which complicates the implementation somewhat.  Therefore, too deep
-// recursion cannot always be avoided.  This case is detected, and the failure
-// flag is set, a signal to the caller that the string should be flattened and
-// the operation retried.
-int Utf8LengthHelper(String* input,
-                     int from,
-                     int to,
-                     bool followed_by_surrogate,
-                     int max_recursion,
-                     bool* failure,
-                     bool* starts_with_surrogate) {
-  if (from == to) return 0;
-  int total = 0;
-  bool dummy;
-  while (true) {
-    if (input->IsOneByteRepresentation()) {
-      *starts_with_surrogate = false;
-      return total + to - from;
-    }
-    switch (StringShape(input).representation_tag()) {
-      case kConsStringTag: {
-        ConsString* str = ConsString::cast(input);
-        String* first = str->first();
-        String* second = str->second();
-        int first_length = first->length();
-        if (first_length - from > to - first_length) {
-          if (first_length < to) {
-            // Right hand side is shorter.  No need to check the recursion depth
-            // since this can only happen log(n) times.
-            bool right_starts_with_surrogate = false;
-            total += Utf8LengthHelper(second,
-                                      0,
-                                      to - first_length,
-                                      followed_by_surrogate,
-                                      max_recursion - 1,
-                                      failure,
-                                      &right_starts_with_surrogate);
-            if (*failure) return 0;
-            followed_by_surrogate = right_starts_with_surrogate;
-            input = first;
-            to = first_length;
-          } else {
-            // We only need the left hand side.
-            input = first;
-          }
-        } else {
-          if (first_length > from) {
-            // Left hand side is shorter.
-            if (first->IsOneByteRepresentation()) {
-              total += first_length - from;
-              *starts_with_surrogate = false;
-              starts_with_surrogate = &dummy;
-              input = second;
-              from = 0;
-              to -= first_length;
-            } else if (second->IsOneByteRepresentation()) {
-              followed_by_surrogate = false;
-              total += to - first_length;
-              input = first;
-              to = first_length;
-            } else if (max_recursion > 0) {
-              bool right_starts_with_surrogate = false;
-              // Recursing on the long one.  This may fail.
-              total += Utf8LengthHelper(second,
-                                        0,
-                                        to - first_length,
-                                        followed_by_surrogate,
-                                        max_recursion - 1,
-                                        failure,
-                                        &right_starts_with_surrogate);
-              if (*failure) return 0;
-              input = first;
-              to = first_length;
-              followed_by_surrogate = right_starts_with_surrogate;
-            } else {
-              *failure = true;
-              return 0;
-            }
-          } else {
-            // We only need the right hand side.
-            input = second;
-            from = 0;
-            to -= first_length;
-          }
-        }
-        continue;
-      }
-      case kExternalStringTag:
-      case kSeqStringTag: {
-        Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
-        const uc16* p = vector.start();
-        int previous = unibrow::Utf16::kNoPreviousCharacter;
-        for (int i = from; i < to; i++) {
-          uc16 c = p[i];
-          total += unibrow::Utf8::Length(c, previous);
-          previous = c;
-        }
-        if (to - from > 0) {
-          if (unibrow::Utf16::IsLeadSurrogate(previous) &&
-              followed_by_surrogate) {
-            total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
-          }
-          if (unibrow::Utf16::IsTrailSurrogate(p[from])) {
-            *starts_with_surrogate = true;
-          }
-        }
-        return total;
-      }
-      case kSlicedStringTag: {
-        SlicedString* str = SlicedString::cast(input);
-        int offset = str->offset();
-        input = str->parent();
-        from += offset;
-        to += offset;
-        continue;
-      }
-      default:
-        break;
-    }
-    UNREACHABLE();
-    return 0;
-  }
-  return 0;
-}
-
-
-int Utf8Length(Handle<String> str) {
-  bool dummy;
-  bool failure;
-  int len;
-  const int kRecursionBudget = 100;
-  do {
-    failure = false;
-    len = Utf8LengthHelper(
-        *str, 0, str->length(), false, kRecursionBudget, &failure, &dummy);
-    if (failure) FlattenString(str);
-  } while (failure);
-  return len;
-}
-
-
 DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
     : impl_(isolate->handle_scope_implementer()) {
   ASSERT(impl_->isolate() == Isolate::Current());
diff --git a/src/handles.h b/src/handles.h
index 032fbe4..684f4ca 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -214,8 +214,6 @@
 // string.
 Handle<String> FlattenGetString(Handle<String> str);
 
-int Utf8Length(Handle<String> str);
-
 Handle<Object> SetProperty(Isolate* isolate,
                            Handle<Object> object,
                            Handle<Object> key,
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index a05fa20..138bee0 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1019,6 +1019,12 @@
 }
 
 
+void HForceRepresentation::PrintDataTo(StringStream* stream) {
+  stream->Add("%s ", representation().Mnemonic());
+  value()->PrintNameTo(stream);
+}
+
+
 void HChange::PrintDataTo(StringStream* stream) {
   HUnaryOperation::PrintDataTo(stream);
   stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index d8f5dec..23390dc 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1171,10 +1171,11 @@
 };
 
 
-class HReturn: public HTemplateControlInstruction<0, 1> {
+class HReturn: public HTemplateControlInstruction<0, 2> {
  public:
-  explicit HReturn(HValue* value) {
+  HReturn(HValue* value, HValue* context) {
     SetOperandAt(0, value);
+    SetOperandAt(1, context);
   }
 
   virtual Representation RequiredInputRepresentation(int index) {
@@ -1184,6 +1185,7 @@
   virtual void PrintDataTo(StringStream* stream);
 
   HValue* value() { return OperandAt(0); }
+  HValue* context() { return OperandAt(1); }
 
   DECLARE_CONCRETE_INSTRUCTION(Return)
 };
@@ -1260,6 +1262,8 @@
     return representation();  // Same as the output representation.
   }
 
+  virtual void PrintDataTo(StringStream* stream);
+
   DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
 };
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index bf508da..55beb3a 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -3399,7 +3399,8 @@
   if (HasStackOverflow()) return false;
 
   if (current_block() != NULL) {
-    HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
+    HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined(),
+                                         context);
     current_block()->FinishExit(instr);
     set_current_block(NULL);
   }
@@ -4215,7 +4216,9 @@
     // Not an inlined return, so an actual one.
     CHECK_ALIVE(VisitForValue(stmt->expression()));
     HValue* result = environment()->Pop();
-    current_block()->FinishExit(new(zone()) HReturn(result));
+    current_block()->FinishExit(new(zone()) HReturn(
+        result,
+        environment()->LookupContext()));
   } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
     // Return from an inlined construct call. In a test context the return value
     // will always evaluate to true, in a value context the return value needs
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 114f878..f838592 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -366,7 +366,7 @@
 void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
   if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
     RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
-  } else if (rmode != RelocInfo::NONE) {
+  } else if (!RelocInfo::IsNone(rmode)) {
     RecordRelocInfo(rmode);
   }
   emit(x);
@@ -379,7 +379,7 @@
     emit_code_relative_offset(label);
     return;
   }
-  if (x.rmode_ != RelocInfo::NONE) RecordRelocInfo(x.rmode_);
+  if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
   emit(x.x_);
 }
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index e5da17b..750746c 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2643,7 +2643,7 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  ASSERT(rmode != RelocInfo::NONE);
+  ASSERT(!RelocInfo::IsNone(rmode));
   // Don't record external references unless the heap will be serialized.
   if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
 #ifdef DEBUG
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index cadff49..da9f13e 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -257,6 +257,7 @@
       __ AllocateInNewSpace(FixedArray::kHeaderSize,
                             times_pointer_size,
                             edx,
+                            REGISTER_VALUE_IS_INT32,
                             edi,
                             ecx,
                             no_reg,
@@ -1102,8 +1103,9 @@
   // requested elements.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
-                        times_half_pointer_size,  // array_size is a smi.
+                        times_pointer_size,
                         array_size,
+                        REGISTER_VALUE_IS_SMI,
                         result,
                         elements_array_end,
                         scratch,
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 92d24ba..92f55b8 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -340,7 +340,11 @@
 
   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
+  AllocationFlags flags = TAG_OBJECT;
+  if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+    flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+  }
+  __ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);
 
   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -4080,8 +4084,9 @@
   // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
   // Elements:  [Map][Length][..elements..]
   __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
-                        times_half_pointer_size,
-                        ebx,  // In: Number of elements (times 2, being a smi)
+                        times_pointer_size,
+                        ebx,  // In: Number of elements as a smi
+                        REGISTER_VALUE_IS_SMI,
                         eax,  // Out: Start of allocation (tagged).
                         ecx,  // Out: End of allocation.
                         edx,  // Scratch register
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 9477bf1..45a968d 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -434,24 +434,11 @@
   // Allocate new FixedDoubleArray.
   // edx: receiver
   // edi: length of source FixedArray (smi-tagged)
-  __ lea(esi, Operand(edi,
-                      times_4,
-                      FixedDoubleArray::kHeaderSize + kPointerSize));
-  __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
-
-  Label aligned, aligned_done;
-  __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
-  __ j(zero, &aligned, Label::kNear);
-  __ mov(FieldOperand(eax, 0),
-         Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
-  __ add(eax, Immediate(kPointerSize));
-  __ jmp(&aligned_done);
-
-  __ bind(&aligned);
-  __ mov(Operand(eax, esi, times_1, -kPointerSize-1),
-         Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
-
-  __ bind(&aligned_done);
+  AllocationFlags flags =
+      static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+  __ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8,
+                        edi, REGISTER_VALUE_IS_SMI,
+                        eax, ebx, no_reg, &gc_required, flags);
 
   // eax: destination FixedDoubleArray
   // edi: number of elements
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index bc3eed5..761f516 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -804,9 +804,9 @@
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   ASSERT(info()->IsOptimizing() || info()->IsStub());
-  Deoptimizer::BailoutType bailout_type = frame_is_built_
-      ? Deoptimizer::EAGER
-      : Deoptimizer::LAZY;
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
   if (entry == NULL) {
     Abort("bailout was not prepared");
@@ -1212,7 +1212,7 @@
 
 
 void LCodeGen::DoDivI(LDivI* instr) {
-  if (instr->hydrogen()->HasPowerOf2Divisor()) {
+  if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
     Register dividend = ToRegister(instr->left());
     int32_t divisor =
         HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -1259,13 +1259,13 @@
 
   // Check for x / 0.
   Register right_reg = ToRegister(right);
-  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+  if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(right_reg, ToOperand(right));
     DeoptimizeIf(zero, instr->environment());
   }
 
   // Check for (0 / -x) that will produce negative zero.
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     Label left_not_zero;
     __ test(left_reg, Operand(left_reg));
     __ j(not_zero, &left_not_zero, Label::kNear);
@@ -1275,7 +1275,7 @@
   }
 
   // Check for (kMinInt / -1).
-  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+  if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
     Label left_not_min_int;
     __ cmp(left_reg, kMinInt);
     __ j(not_zero, &left_not_min_int, Label::kNear);
@@ -1288,9 +1288,19 @@
   __ cdq();
   __ idiv(right_reg);
 
-  // Deoptimize if remainder is not 0.
-  __ test(edx, Operand(edx));
-  DeoptimizeIf(not_zero, instr->environment());
+  if (!instr->is_flooring()) {
+    // Deoptimize if remainder is not 0.
+    __ test(edx, Operand(edx));
+    DeoptimizeIf(not_zero, instr->environment());
+  } else {
+    Label done;
+    __ test(edx, edx);
+    __ j(zero, &done, Label::kNear);
+    __ xor_(edx, right_reg);
+    __ sar(edx, 31);
+    __ add(eax, edx);
+    __ bind(&done);
+  }
 }
 
 
@@ -2651,7 +2661,6 @@
     __ bind(&no_padding);
   }
   if (info()->IsStub()) {
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     __ Ret();
   } else {
     __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
@@ -3386,7 +3395,12 @@
 
 void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
-  __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+  if (info()->IsOptimizing()) {
+    __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in esi.
+    ASSERT(result.is(esi));
+  }
 }
 
 
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 6e07593..da1a466 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -114,7 +114,11 @@
   stream->Add("= ");
   for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    InputAt(i)->PrintTo(stream);
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
   }
 }
 
@@ -1055,7 +1059,13 @@
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, esi);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
 }
 
 
@@ -1305,12 +1315,31 @@
     return constant_val->CopyToRepresentation(Representation::Integer32(),
                                               divisor->block()->zone());
   }
+  // A value with an integer representation does not need to be transformed.
+  if (divisor->representation().IsInteger32()) {
+    return divisor;
+  // A change from an integer32 can be replaced by the integer32 value.
+  } else if (divisor->IsChange() &&
+             HChange::cast(divisor)->from().IsInteger32()) {
+    return HChange::cast(divisor)->value();
+  }
   return NULL;
 }
 
 
 LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
   HValue* right = instr->right();
+  if (!right->IsConstant()) {
+    ASSERT(right->representation().IsInteger32());
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    LOperand* temp = FixedTemp(edx);
+    LOperand* dividend = UseFixed(instr->left(), eax);
+    LOperand* divisor = UseRegister(instr->right());
+    LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
+    return AssignEnvironment(DefineFixed(flooring_div, eax));
+  }
+
   ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
   LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
   int32_t divisor_si = HConstant::cast(right)->Integer32Value();
@@ -1865,7 +1894,10 @@
 
 
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new(zone()) LReturn(UseFixed(instr->value(), eax));
+  LOperand* context = info()->IsStub()
+      ? UseFixed(instr->context(), esi)
+      : NULL;
+  return new(zone()) LReturn(UseFixed(instr->value(), eax), context);
 }
 
 
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index f4056c1..02eb6d3 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -565,6 +565,8 @@
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
+  bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
   DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
   DECLARE_HYDROGEN_ACCESSOR(Div)
 };
@@ -1342,10 +1344,11 @@
 };
 
 
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn: public LTemplateInstruction<0, 2, 0> {
  public:
-  explicit LReturn(LOperand* value) {
+  explicit LReturn(LOperand* value, LOperand* context) {
     inputs_[0] = value;
+    inputs_[1] = context;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Return, "return")
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index e9ce797..e5b580b 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -406,7 +406,7 @@
 
 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
   static const int kMaxImmediateBits = 17;
-  if (x.rmode_ != RelocInfo::NONE) return false;
+  if (!RelocInfo::IsNone(x.rmode_)) return false;
   return !is_intn(x.x_, kMaxImmediateBits);
 }
 
@@ -1241,6 +1241,7 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1260,6 +1261,19 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
 
+  // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
   Register top_reg = result_end.is_valid() ? result_end : result;
 
   // Calculate new top and bail out if new space is exhausted.
@@ -1278,26 +1292,31 @@
   UpdateAllocationTopHelper(top_reg, scratch);
 
   // Tag result if requested.
+  bool tag_result = (flags & TAG_OBJECT) != 0;
   if (top_reg.is(result)) {
-    if ((flags & TAG_OBJECT) != 0) {
+    if (tag_result) {
       sub(result, Immediate(object_size - kHeapObjectTag));
     } else {
       sub(result, Immediate(object_size));
     }
-  } else if ((flags & TAG_OBJECT) != 0) {
-    add(result, Immediate(kHeapObjectTag));
+  } else if (tag_result) {
+    ASSERT(kHeapObjectTag == 1);
+    inc(result);
   }
 }
 
 
-void MacroAssembler::AllocateInNewSpace(int header_size,
-                                        ScaleFactor element_size,
-                                        Register element_count,
-                                        Register result,
-                                        Register result_end,
-                                        Register scratch,
-                                        Label* gc_required,
-                                        AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(
+    int header_size,
+    ScaleFactor element_size,
+    Register element_count,
+    RegisterValueType element_count_type,
+    Register result,
+    Register result_end,
+    Register scratch,
+    Label* gc_required,
+    AllocationFlags flags) {
+  ASSERT((flags & SIZE_IN_WORDS) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1316,21 +1335,44 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
 
+  // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
   // Calculate new top and bail out if new space is exhausted.
   ExternalReference new_space_allocation_limit =
       ExternalReference::new_space_allocation_limit_address(isolate());
 
   // We assume that element_count*element_size + header_size does not
   // overflow.
+  if (element_count_type == REGISTER_VALUE_IS_SMI) {
+    STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
+    STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
+    STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
+    ASSERT(element_size >= times_2);
+    ASSERT(kSmiTagSize == 1);
+    element_size = static_cast<ScaleFactor>(element_size - 1);
+  } else {
+    ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+  }
   lea(result_end, Operand(element_count, element_size, header_size));
   add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
 
-  // Tag result if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    lea(result, Operand(result, kHeapObjectTag));
+    ASSERT(kHeapObjectTag == 1);
+    inc(result);
   }
 
   // Update allocation top.
@@ -1344,6 +1386,8 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
+                   SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1433,6 +1477,7 @@
   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
                      times_1,
                      scratch1,
+                     REGISTER_VALUE_IS_INT32,
                      result,
                      scratch2,
                      scratch3,
@@ -1468,6 +1513,7 @@
   AllocateInNewSpace(SeqOneByteString::kHeaderSize,
                      times_1,
                      scratch1,
+                     REGISTER_VALUE_IS_INT32,
                      result,
                      scratch2,
                      scratch3,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index e7a5d28..cc7ef29 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -35,18 +35,6 @@
 namespace v8 {
 namespace internal {
 
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
-  // No special flags.
-  NO_ALLOCATION_FLAGS = 0,
-  // Return the pointer to the allocated already tagged as a heap object.
-  TAG_OBJECT = 1 << 0,
-  // The content of the result register already contains the allocation top in
-  // new space.
-  RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
 // Convenience for platform-independent signatures.  We do not normally
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
@@ -55,6 +43,12 @@
 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
 
 
+enum RegisterValueType {
+  REGISTER_VALUE_IS_SMI,
+  REGISTER_VALUE_IS_INT32
+};
+
+
 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
 
 
@@ -576,6 +570,7 @@
   void AllocateInNewSpace(int header_size,
                           ScaleFactor element_size,
                           Register element_count,
+                          RegisterValueType element_count_type,
                           Register result,
                           Register result_end,
                           Register scratch,
diff --git a/src/isolate.cc b/src/isolate.cc
index d0b7f11..60e3379 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1634,7 +1634,7 @@
       free_list_(0),
       preallocated_storage_preallocated_(false),
       inner_pointer_to_code_cache_(NULL),
-      write_input_buffer_(NULL),
+      write_iterator_(NULL),
       global_handles_(NULL),
       context_switcher_(NULL),
       thread_manager_(NULL),
@@ -1845,8 +1845,8 @@
   bootstrapper_ = NULL;
   delete inner_pointer_to_code_cache_;
   inner_pointer_to_code_cache_ = NULL;
-  delete write_input_buffer_;
-  write_input_buffer_ = NULL;
+  delete write_iterator_;
+  write_iterator_ = NULL;
 
   delete context_switcher_;
   context_switcher_ = NULL;
@@ -1964,7 +1964,7 @@
   descriptor_lookup_cache_ = new DescriptorLookupCache();
   unicode_cache_ = new UnicodeCache();
   inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
-  write_input_buffer_ = new StringInputBuffer();
+  write_iterator_ = new ConsStringIteratorOp();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
   handle_scope_implementer_ = new HandleScopeImplementer(this);
diff --git a/src/isolate.h b/src/isolate.h
index 9f086b2..b3cfb43 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -75,7 +75,7 @@
 class RegExpStack;
 class SaveContext;
 class UnicodeCache;
-class StringInputBuffer;
+class ConsStringIteratorOp;
 class StringTracker;
 class StubCache;
 class ThreadManager;
@@ -881,7 +881,7 @@
     return inner_pointer_to_code_cache_;
   }
 
-  StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
+  ConsStringIteratorOp* write_iterator() { return write_iterator_; }
 
   GlobalHandles* global_handles() { return global_handles_; }
 
@@ -903,16 +903,16 @@
     return &jsregexp_canonrange_;
   }
 
-  StringInputBuffer* objects_string_compare_buffer_a() {
-    return &objects_string_compare_buffer_a_;
+  ConsStringIteratorOp* objects_string_compare_iterator_a() {
+    return &objects_string_compare_iterator_a_;
   }
 
-  StringInputBuffer* objects_string_compare_buffer_b() {
-    return &objects_string_compare_buffer_b_;
+  ConsStringIteratorOp* objects_string_compare_iterator_b() {
+    return &objects_string_compare_iterator_b_;
   }
 
-  StaticResource<StringInputBuffer>* objects_string_input_buffer() {
-    return &objects_string_input_buffer_;
+  StaticResource<ConsStringIteratorOp>* objects_string_iterator() {
+    return &objects_string_iterator_;
   }
 
   RuntimeState* runtime_state() { return &runtime_state_; }
@@ -1225,7 +1225,7 @@
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
   InnerPointerToCodeCache* inner_pointer_to_code_cache_;
-  StringInputBuffer* write_input_buffer_;
+  ConsStringIteratorOp* write_iterator_;
   GlobalHandles* global_handles_;
   ContextSwitcher* context_switcher_;
   ThreadManager* thread_manager_;
@@ -1236,9 +1236,9 @@
   StringTracker* string_tracker_;
   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
-  StringInputBuffer objects_string_compare_buffer_a_;
-  StringInputBuffer objects_string_compare_buffer_b_;
-  StaticResource<StringInputBuffer> objects_string_input_buffer_;
+  ConsStringIteratorOp objects_string_compare_iterator_a_;
+  ConsStringIteratorOp objects_string_compare_iterator_b_;
+  StaticResource<ConsStringIteratorOp> objects_string_iterator_;
   unibrow::Mapping<unibrow::Ecma262Canonicalize>
       regexp_macro_assembler_canonicalize_;
   RegExpStack* regexp_stack_;
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
index 8f660ce..1f59cb4 100644
--- a/src/lithium-allocator-inl.h
+++ b/src/lithium-allocator-inl.h
@@ -99,6 +99,7 @@
 
 LOperand* InputIterator::Current() {
   ASSERT(!Done());
+  ASSERT(instr_->InputAt(current_) != NULL);
   return instr_->InputAt(current_);
 }
 
@@ -110,7 +111,9 @@
 
 
 void InputIterator::SkipUninteresting() {
-  while (current_ < limit_ && instr_->InputAt(current_)->IsConstantOperand()) {
+  while (current_ < limit_) {
+    LOperand* current = instr_->InputAt(current_);
+    if (current != NULL && !current->IsConstantOperand()) break;
     ++current_;
   }
 }
@@ -127,9 +130,11 @@
 
 LOperand* UseIterator::Current() {
   ASSERT(!Done());
-  return input_iterator_.Done()
+  LOperand* result = input_iterator_.Done()
       ? env_iterator_.Current()
       : input_iterator_.Current();
+  ASSERT(result != NULL);
+  return result;
 }
 
 
diff --git a/src/lithium.h b/src/lithium.h
index cc2cde0..ea61ff5 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -581,6 +581,7 @@
 
   LOperand* Current() {
     ASSERT(!Done());
+    ASSERT(env_->values()->at(current_) != NULL);
     return env_->values()->at(current_);
   }
 
@@ -622,6 +623,7 @@
 
   LOperand* Current() {
     ASSERT(!current_iterator_.Done());
+    ASSERT(current_iterator_.Current() != NULL);
     return current_iterator_.Current();
   }
 
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 11e2217..9e71123 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -36,6 +36,23 @@
 };
 
 
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+  // No special flags.
+  NO_ALLOCATION_FLAGS = 0,
+  // Return the pointer to the allocated already tagged as a heap object.
+  TAG_OBJECT = 1 << 0,
+  // The content of the result register already contains the allocation top in
+  // new space.
+  RESULT_CONTAINS_TOP = 1 << 1,
+  // Specify that the requested size of the space to allocate is specified in
+  // words instead of bytes.
+  SIZE_IN_WORDS = 1 << 2,
+  // Align the allocation to a multiple of kDoubleSize
+  DOUBLE_ALIGNMENT = 1 << 3
+};
+
+
 // Invalid depth in prototype chain.
 const int kInvalidProtoDepth = -1;
 
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index e750620..de97bc3 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -805,7 +805,7 @@
 // space.  There is no guarantee that the relocated location can be similarly
 // encoded.
 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
-  return rmode != RelocInfo::NONE;
+  return !RelocInfo::IsNone(rmode);
 }
 
 void Assembler::GenInstrRegister(Opcode opcode,
@@ -1990,7 +1990,7 @@
            || RelocInfo::IsPosition(rmode));
     // These modes do not need an entry in the constant pool.
   }
-  if (rinfo.rmode() != RelocInfo::NONE) {
+  if (!RelocInfo::IsNone(rinfo.rmode())) {
     // Don't record external references unless the heap will be serialized.
     if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
 #ifdef DEBUG
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 521b38d..0e58eb5 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -112,7 +112,11 @@
   stream->Add("= ");
   for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    InputAt(i)->PrintTo(stream);
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
   }
 }
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 7772cfe..89623bd 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2845,21 +2845,31 @@
 
 
 uint16_t StringCharacterStream::GetNext() {
-  ASSERT((buffer8_ == NULL && end_ == NULL) || buffer8_ < end_);
+  ASSERT(buffer8_ != NULL && end_ != NULL);
+  // Advance cursor if needed.
+  // TODO(dcarney): Ensure uses of the api call HasMore first and avoid this.
+  if (buffer8_ == end_) HasMore();
+  ASSERT(buffer8_ < end_);
   return is_one_byte_ ? *buffer8_++ : *buffer16_++;
 }
 
 
-StringCharacterStream::StringCharacterStream(
-    String* string, unsigned offset, ConsStringIteratorOp* op)
+StringCharacterStream::StringCharacterStream(String* string,
+                                             ConsStringIteratorOp* op,
+                                             unsigned offset)
   : is_one_byte_(false),
-    buffer8_(NULL),
-    end_(NULL),
     op_(op) {
-  op->Reset();
+  Reset(string, offset);
+}
+
+
+void StringCharacterStream::Reset(String* string, unsigned offset) {
+  op_->Reset();
+  buffer8_ = NULL;
+  end_ = NULL;
   int32_t type = string->map()->instance_type();
   unsigned length = string->length();
-  String::Visit(string, offset, *this, *op, type, length);
+  String::Visit(string, offset, *this, *op_, type, length);
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index e1d6da6..a97590a 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1048,7 +1048,8 @@
     return;
   }
 
-  StringInputBuffer buf(this);
+  ConsStringIteratorOp op;
+  StringCharacterStream stream(this, &op);
 
   bool truncated = false;
   if (len > kMaxShortPrintLength) {
@@ -1057,17 +1058,17 @@
   }
   bool ascii = true;
   for (int i = 0; i < len; i++) {
-    int c = buf.GetNext();
+    uint16_t c = stream.GetNext();
 
     if (c < 32 || c >= 127) {
       ascii = false;
     }
   }
-  buf.Reset(this);
+  stream.Reset(this);
   if (ascii) {
     accumulator->Add("<String[%u]: ", length());
     for (int i = 0; i < len; i++) {
-      accumulator->Put(buf.GetNext());
+      accumulator->Put(static_cast<char>(stream.GetNext()));
     }
     accumulator->Put('>');
   } else {
@@ -1075,7 +1076,7 @@
     // characters and that backslashes are therefore escaped.
     accumulator->Add("<String[%u]\\: ", length());
     for (int i = 0; i < len; i++) {
-      int c = buf.GetNext();
+      uint16_t c = stream.GetNext();
       if (c == '\n') {
         accumulator->Add("\\n");
       } else if (c == '\r') {
@@ -1085,7 +1086,7 @@
       } else if (c < 32 || c > 126) {
         accumulator->Add("\\x%02x", c);
       } else {
-        accumulator->Put(c);
+        accumulator->Put(static_cast<char>(c));
       }
     }
     if (truncated) {
@@ -1537,15 +1538,16 @@
 }
 
 
-static bool IsIdentifier(UnicodeCache* cache,
-                         unibrow::CharacterStream* buffer) {
+static bool IsIdentifier(UnicodeCache* cache, String* string) {
   // Checks whether the buffer contains an identifier (no escape).
-  if (!buffer->has_more()) return false;
-  if (!cache->IsIdentifierStart(buffer->GetNext())) {
+  if (string->length() == 0) return false;
+  ConsStringIteratorOp op;
+  StringCharacterStream stream(string, &op);
+  if (!cache->IsIdentifierStart(stream.GetNext())) {
     return false;
   }
-  while (buffer->has_more()) {
-    if (!cache->IsIdentifierPart(buffer->GetNext())) {
+  while (stream.HasMore()) {
+    if (!cache->IsIdentifierPart(stream.GetNext())) {
       return false;
     }
   }
@@ -1566,8 +1568,7 @@
   // hidden symbols) and is not a real identifier.
   // Normalize the object if it will have too many fast properties.
   Isolate* isolate = GetHeap()->isolate();
-  StringInputBuffer buffer(name);
-  if ((!IsIdentifier(isolate->unicode_cache(), &buffer)
+  if ((!IsIdentifier(isolate->unicode_cache(), name)
        && name != isolate->heap()->hidden_symbol()) ||
       (map()->unused_property_fields() == 0 &&
        TooManyFastProperties(properties()->length(), store_mode))) {
@@ -6594,14 +6595,14 @@
   if (length < 0) length = kMaxInt - offset;
 
   // Compute the size of the UTF-8 string. Start at the specified offset.
-  Access<StringInputBuffer> buffer(
-      heap->isolate()->objects_string_input_buffer());
-  buffer->Reset(offset, this);
+  Access<ConsStringIteratorOp> op(
+      heap->isolate()->objects_string_iterator());
+  StringCharacterStream stream(this, op.value(), offset);
   int character_position = offset;
   int utf8_bytes = 0;
   int last = unibrow::Utf16::kNoPreviousCharacter;
-  while (buffer->has_more() && character_position++ < offset + length) {
-    uint16_t character = buffer->GetNext();
+  while (stream.HasMore() && character_position++ < offset + length) {
+    uint16_t character = stream.GetNext();
     utf8_bytes += unibrow::Utf8::Length(character, last);
     last = character;
   }
@@ -6613,13 +6614,12 @@
   char* result = NewArray<char>(utf8_bytes + 1);
 
   // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
-  buffer->Rewind();
-  buffer->Seek(offset);
+  stream.Reset(this, offset);
   character_position = offset;
   int utf8_byte_position = 0;
   last = unibrow::Utf16::kNoPreviousCharacter;
-  while (buffer->has_more() && character_position++ < offset + length) {
-    uint16_t character = buffer->GetNext();
+  while (stream.HasMore() && character_position++ < offset + length) {
+    uint16_t character = stream.GetNext();
     if (allow_nulls == DISALLOW_NULLS && character == 0) {
       character = ' ';
     }
@@ -6671,15 +6671,15 @@
   }
   Heap* heap = GetHeap();
 
-  Access<StringInputBuffer> buffer(
-      heap->isolate()->objects_string_input_buffer());
-  buffer->Reset(this);
+  Access<ConsStringIteratorOp> op(
+      heap->isolate()->objects_string_iterator());
+  StringCharacterStream stream(this, op.value());
 
   uc16* result = NewArray<uc16>(length() + 1);
 
   int i = 0;
-  while (buffer->has_more()) {
-    uint16_t character = buffer->GetNext();
+  while (stream.HasMore()) {
+    uint16_t character = stream.GetNext();
     result[i++] = character;
   }
   result[i] = 0;
@@ -6693,252 +6693,6 @@
 }
 
 
-void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
-                                                           unsigned* offset_ptr,
-                                                           unsigned max_chars) {
-  unsigned chars_read = 0;
-  unsigned offset = *offset_ptr;
-  while (chars_read < max_chars) {
-    uint16_t c = *reinterpret_cast<uint16_t*>(
-        reinterpret_cast<char*>(this) -
-            kHeapObjectTag + kHeaderSize + offset * kShortSize);
-    if (c <= kMaxAsciiCharCode) {
-      // Fast case for ASCII characters.   Cursor is an input output argument.
-      if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
-                                                          rbb->util_buffer,
-                                                          rbb->capacity,
-                                                          rbb->cursor)) {
-        break;
-      }
-    } else {
-      if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
-                                                             rbb->util_buffer,
-                                                             rbb->capacity,
-                                                             rbb->cursor)) {
-        break;
-      }
-    }
-    offset++;
-    chars_read++;
-  }
-  *offset_ptr = offset;
-  rbb->remaining += chars_read;
-}
-
-
-const unibrow::byte* SeqOneByteString::SeqOneByteStringReadBlock(
-    unsigned* remaining,
-    unsigned* offset_ptr,
-    unsigned max_chars) {
-  const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) -
-      kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize;
-  *remaining = max_chars;
-  *offset_ptr += max_chars;
-  return b;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse.  Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane.  Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit. Since this is not an
-// -IntoBuffer method it can delegate to one of the efficient
-// *AsciiStringReadBlock routines.
-const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
-                                                     unsigned* offset_ptr,
-                                                     unsigned max_chars) {
-  ConsString* current = this;
-  unsigned offset = *offset_ptr;
-  int offset_correction = 0;
-
-  while (true) {
-    String* left = current->first();
-    unsigned left_length = (unsigned)left->length();
-    if (left_length > offset &&
-        (max_chars <= left_length - offset ||
-         (rbb->capacity <= left_length - offset &&
-          (max_chars = left_length - offset, true)))) {  // comma operator!
-      // Left hand side only - iterate unless we have reached the bottom of
-      // the cons tree.  The assignment on the left of the comma operator is
-      // in order to make use of the fact that the -IntoBuffer routines can
-      // produce at most 'capacity' characters.  This enables us to postpone
-      // the point where we switch to the -IntoBuffer routines (below) in order
-      // to maximize the chances of delegating a big chunk of work to the
-      // efficient *AsciiStringReadBlock routines.
-      if (StringShape(left).IsCons()) {
-        current = ConsString::cast(left);
-        continue;
-      } else {
-        const unibrow::byte* answer =
-            String::ReadBlock(left, rbb, &offset, max_chars);
-        *offset_ptr = offset + offset_correction;
-        return answer;
-      }
-    } else if (left_length <= offset) {
-      // Right hand side only - iterate unless we have reached the bottom of
-      // the cons tree.
-      String* right = current->second();
-      offset -= left_length;
-      offset_correction += left_length;
-      if (StringShape(right).IsCons()) {
-        current = ConsString::cast(right);
-        continue;
-      } else {
-        const unibrow::byte* answer =
-            String::ReadBlock(right, rbb, &offset, max_chars);
-        *offset_ptr = offset + offset_correction;
-        return answer;
-      }
-    } else {
-      // The block to be read spans two sides of the ConsString, so we call the
-      // -IntoBuffer version, which will recurse.  The -IntoBuffer methods
-      // are able to assemble data from several part strings because they use
-      // the util_buffer to store their data and never return direct pointers
-      // to their storage.  We don't try to read more than the buffer capacity
-      // here or we can get too much recursion.
-      ASSERT(rbb->remaining == 0);
-      ASSERT(rbb->cursor == 0);
-      current->ConsStringReadBlockIntoBuffer(
-          rbb,
-          &offset,
-          max_chars > rbb->capacity ? rbb->capacity : max_chars);
-      *offset_ptr = offset + offset_correction;
-      return rbb->util_buffer;
-    }
-  }
-}
-
-
-const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
-      unsigned* remaining,
-      unsigned* offset_ptr,
-      unsigned max_chars) {
-  // Cast const char* to unibrow::byte* (signedness difference).
-  const unibrow::byte* b =
-      reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
-  *remaining = max_chars;
-  *offset_ptr += max_chars;
-  return b;
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
-      ReadBlockBuffer* rbb,
-      unsigned* offset_ptr,
-      unsigned max_chars) {
-  unsigned chars_read = 0;
-  unsigned offset = *offset_ptr;
-  const uint16_t* data = GetChars();
-  while (chars_read < max_chars) {
-    uint16_t c = data[offset];
-    if (c <= kMaxAsciiCharCode) {
-      // Fast case for ASCII characters. Cursor is an input output argument.
-      if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
-                                                          rbb->util_buffer,
-                                                          rbb->capacity,
-                                                          rbb->cursor))
-        break;
-    } else {
-      if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
-                                                             rbb->util_buffer,
-                                                             rbb->capacity,
-                                                             rbb->cursor))
-        break;
-    }
-    offset++;
-    chars_read++;
-  }
-  *offset_ptr = offset;
-  rbb->remaining += chars_read;
-}
-
-
-void SeqOneByteString::SeqOneByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
-                                                 unsigned* offset_ptr,
-                                                 unsigned max_chars) {
-  unsigned capacity = rbb->capacity - rbb->cursor;
-  if (max_chars > capacity) max_chars = capacity;
-  memcpy(rbb->util_buffer + rbb->cursor,
-         reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize +
-             *offset_ptr * kCharSize,
-         max_chars);
-  rbb->remaining += max_chars;
-  *offset_ptr += max_chars;
-  rbb->cursor += max_chars;
-}
-
-
-void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer(
-      ReadBlockBuffer* rbb,
-      unsigned* offset_ptr,
-      unsigned max_chars) {
-  unsigned capacity = rbb->capacity - rbb->cursor;
-  if (max_chars > capacity) max_chars = capacity;
-  memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
-  rbb->remaining += max_chars;
-  *offset_ptr += max_chars;
-  rbb->cursor += max_chars;
-}
-
-
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer, or returns a pointer to a buffer
-// where they can be found.  The pointer is not necessarily valid across a GC
-// (see AsciiStringReadBlock).
-const unibrow::byte* String::ReadBlock(String* input,
-                                       ReadBlockBuffer* rbb,
-                                       unsigned* offset_ptr,
-                                       unsigned max_chars) {
-  ASSERT(*offset_ptr <= static_cast<unsigned>(input->length()));
-  if (max_chars == 0) {
-    rbb->remaining = 0;
-    return NULL;
-  }
-  switch (StringShape(input).representation_tag()) {
-    case kSeqStringTag:
-      if (input->IsOneByteRepresentation()) {
-        SeqOneByteString* str = SeqOneByteString::cast(input);
-        return str->SeqOneByteStringReadBlock(&rbb->remaining,
-                                            offset_ptr,
-                                            max_chars);
-      } else {
-        SeqTwoByteString* str = SeqTwoByteString::cast(input);
-        str->SeqTwoByteStringReadBlockIntoBuffer(rbb,
-                                                 offset_ptr,
-                                                 max_chars);
-        return rbb->util_buffer;
-      }
-    case kConsStringTag:
-      return ConsString::cast(input)->ConsStringReadBlock(rbb,
-                                                          offset_ptr,
-                                                          max_chars);
-    case kExternalStringTag:
-      if (input->IsOneByteRepresentation()) {
-        return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
-            &rbb->remaining,
-            offset_ptr,
-            max_chars);
-      } else {
-        ExternalTwoByteString::cast(input)->
-            ExternalTwoByteStringReadBlockIntoBuffer(rbb,
-                                                     offset_ptr,
-                                                     max_chars);
-        return rbb->util_buffer;
-      }
-    case kSlicedStringTag:
-      return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
-                                                              offset_ptr,
-                                                              max_chars);
-    default:
-      break;
-  }
-
-  UNREACHABLE();
-  return 0;
-}
-
-
 void Relocatable::PostGarbageCollectionProcessing() {
   Isolate* isolate = Isolate::Current();
   Relocatable* current = isolate->relocatable_top();
@@ -7023,11 +6777,6 @@
 }
 
 
-void StringInputBuffer::Seek(unsigned pos) {
-  Reset(pos, input_);
-}
-
-
 String* ConsStringIteratorOp::Operate(String* string,
                                       unsigned* offset_out,
                                       int32_t* type_out,
@@ -7163,154 +6912,6 @@
 }
 
 
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer.  It can be used with strings
-// that have been glued together to form a ConsString and which must cooperate
-// to fill up a buffer.
-void String::ReadBlockIntoBuffer(String* input,
-                                 ReadBlockBuffer* rbb,
-                                 unsigned* offset_ptr,
-                                 unsigned max_chars) {
-  ASSERT(*offset_ptr <= (unsigned)input->length());
-  if (max_chars == 0) return;
-
-  switch (StringShape(input).representation_tag()) {
-    case kSeqStringTag:
-      if (input->IsOneByteRepresentation()) {
-        SeqOneByteString::cast(input)->SeqOneByteStringReadBlockIntoBuffer(rbb,
-                                                                 offset_ptr,
-                                                                 max_chars);
-        return;
-      } else {
-        SeqTwoByteString::cast(input)->SeqTwoByteStringReadBlockIntoBuffer(rbb,
-                                                                     offset_ptr,
-                                                                     max_chars);
-        return;
-      }
-    case kConsStringTag:
-      ConsString::cast(input)->ConsStringReadBlockIntoBuffer(rbb,
-                                                             offset_ptr,
-                                                             max_chars);
-      return;
-    case kExternalStringTag:
-      if (input->IsOneByteRepresentation()) {
-        ExternalAsciiString::cast(input)->
-            ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
-      } else {
-        ExternalTwoByteString::cast(input)->
-            ExternalTwoByteStringReadBlockIntoBuffer(rbb,
-                                                     offset_ptr,
-                                                     max_chars);
-       }
-       return;
-    case kSlicedStringTag:
-      SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
-                                                                 offset_ptr,
-                                                                 max_chars);
-      return;
-    default:
-      break;
-  }
-
-  UNREACHABLE();
-  return;
-}
-
-
-const unibrow::byte* String::ReadBlock(String* input,
-                                       unibrow::byte* util_buffer,
-                                       unsigned capacity,
-                                       unsigned* remaining,
-                                       unsigned* offset_ptr) {
-  ASSERT(*offset_ptr <= (unsigned)input->length());
-  unsigned chars = input->length() - *offset_ptr;
-  ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
-  const unibrow::byte* answer = ReadBlock(input, &rbb, offset_ptr, chars);
-  ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
-  *remaining = rbb.remaining;
-  return answer;
-}
-
-
-const unibrow::byte* String::ReadBlock(String** raw_input,
-                                       unibrow::byte* util_buffer,
-                                       unsigned capacity,
-                                       unsigned* remaining,
-                                       unsigned* offset_ptr) {
-  Handle<String> input(raw_input);
-  ASSERT(*offset_ptr <= (unsigned)input->length());
-  unsigned chars = input->length() - *offset_ptr;
-  if (chars > capacity) chars = capacity;
-  ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
-  ReadBlockIntoBuffer(*input, &rbb, offset_ptr, chars);
-  ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
-  *remaining = rbb.remaining;
-  return rbb.util_buffer;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse.  Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane.  Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit.
-void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
-                                               unsigned* offset_ptr,
-                                               unsigned max_chars) {
-  ConsString* current = this;
-  unsigned offset = *offset_ptr;
-  int offset_correction = 0;
-
-  while (true) {
-    String* left = current->first();
-    unsigned left_length = (unsigned)left->length();
-    if (left_length > offset &&
-      max_chars <= left_length - offset) {
-      // Left hand side only - iterate unless we have reached the bottom of
-      // the cons tree.
-      if (StringShape(left).IsCons()) {
-        current = ConsString::cast(left);
-        continue;
-      } else {
-        String::ReadBlockIntoBuffer(left, rbb, &offset, max_chars);
-        *offset_ptr = offset + offset_correction;
-        return;
-      }
-    } else if (left_length <= offset) {
-      // Right hand side only - iterate unless we have reached the bottom of
-      // the cons tree.
-      offset -= left_length;
-      offset_correction += left_length;
-      String* right = current->second();
-      if (StringShape(right).IsCons()) {
-        current = ConsString::cast(right);
-        continue;
-      } else {
-        String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
-        *offset_ptr = offset + offset_correction;
-        return;
-      }
-    } else {
-      // The block to be read spans two sides of the ConsString, so we recurse.
-      // First recurse on the left.
-      max_chars -= left_length - offset;
-      String::ReadBlockIntoBuffer(left, rbb, &offset, left_length - offset);
-      // We may have reached the max or there may not have been enough space
-      // in the buffer for the characters in the left hand side.
-      if (offset == left_length) {
-        // Recurse on the right.
-        String* right = String::cast(current->second());
-        offset -= left_length;
-        offset_correction += left_length;
-        String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
-      }
-      *offset_ptr = offset + offset_correction;
-      return;
-    }
-  }
-}
-
-
 uint16_t ConsString::ConsStringGet(int index) {
   ASSERT(index >= 0 && index < this->length());
 
@@ -7347,26 +6948,6 @@
 }
 
 
-const unibrow::byte* SlicedString::SlicedStringReadBlock(
-    ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
-  unsigned offset = this->offset();
-  *offset_ptr += offset;
-  const unibrow::byte* answer = String::ReadBlock(String::cast(parent()),
-                                                  buffer, offset_ptr, chars);
-  *offset_ptr -= offset;
-  return answer;
-}
-
-
-void SlicedString::SlicedStringReadBlockIntoBuffer(
-    ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
-  unsigned offset = this->offset();
-  *offset_ptr += offset;
-  String::ReadBlockIntoBuffer(String::cast(parent()),
-                              buffer, offset_ptr, chars);
-  *offset_ptr -= offset;
-}
-
 template <typename sinkchar>
 void String::WriteToFlat(String* src,
                          sinkchar* sink,
@@ -7457,46 +7038,28 @@
 }
 
 
-template <typename IteratorA, typename IteratorB>
-static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
-  // General slow case check.  We know that the ia and ib iterators
-  // have the same length.
-  while (ia->has_more()) {
-    uint32_t ca = ia->GetNext();
-    uint32_t cb = ib->GetNext();
-    ASSERT(ca <= unibrow::Utf16::kMaxNonSurrogateCharCode);
-    ASSERT(cb <= unibrow::Utf16::kMaxNonSurrogateCharCode);
-    if (ca != cb)
-      return false;
-  }
-  return true;
-}
-
-
 // Compares the contents of two strings by reading and comparing
 // int-sized blocks of characters.
 template <typename Char>
-static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
-  int length = a.length();
-  ASSERT_EQ(length, b.length());
-  const Char* pa = a.start();
-  const Char* pb = b.start();
+static inline bool CompareRawStringContents(const Char* const a,
+                                            const Char* const b,
+                                            int length) {
   int i = 0;
 #ifndef V8_HOST_CAN_READ_UNALIGNED
   // If this architecture isn't comfortable reading unaligned ints
   // then we have to check that the strings are aligned before
   // comparing them blockwise.
   const int kAlignmentMask = sizeof(uint32_t) - 1;  // NOLINT
-  uint32_t pa_addr = reinterpret_cast<uint32_t>(pa);
-  uint32_t pb_addr = reinterpret_cast<uint32_t>(pb);
+  uint32_t pa_addr = reinterpret_cast<uint32_t>(a);
+  uint32_t pb_addr = reinterpret_cast<uint32_t>(b);
   if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
 #endif
     const int kStepSize = sizeof(int) / sizeof(Char);  // NOLINT
     int endpoint = length - kStepSize;
     // Compare blocks until we reach near the end of the string.
     for (; i <= endpoint; i += kStepSize) {
-      uint32_t wa = *reinterpret_cast<const uint32_t*>(pa + i);
-      uint32_t wb = *reinterpret_cast<const uint32_t*>(pb + i);
+      uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i);
+      uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i);
       if (wa != wb) {
         return false;
       }
@@ -7514,25 +7077,145 @@
 }
 
 
-template <typename IteratorA>
-static inline bool CompareStringContentsPartial(Isolate* isolate,
-                                                IteratorA* ia,
-                                                String* b) {
-  String::FlatContent content = b->GetFlatContent();
-  if (content.IsFlat()) {
-    if (content.IsAscii()) {
-      VectorIterator<char> ib(content.ToAsciiVector());
-      return CompareStringContents(ia, &ib);
-    } else {
-      VectorIterator<uc16> ib(content.ToUC16Vector());
-      return CompareStringContents(ia, &ib);
+template<typename Chars1, typename Chars2>
+class RawStringComparator : public AllStatic {
+ public:
+  static inline bool compare(const Chars1* a, const Chars2* b, int len) {
+    ASSERT(sizeof(Chars1) != sizeof(Chars2));
+    for (int i = 0; i < len; i++) {
+      if (a[i] != b[i]) {
+        return false;
+      }
     }
-  } else {
-    isolate->objects_string_compare_buffer_b()->Reset(0, b);
-    return CompareStringContents(ia,
-                                 isolate->objects_string_compare_buffer_b());
+    return true;
   }
-}
+};
+
+
+template<>
+class RawStringComparator<uint16_t, uint16_t> {
+ public:
+  static inline bool compare(const uint16_t* a, const uint16_t* b, int len) {
+    return CompareRawStringContents(a, b, len);
+  }
+};
+
+
+template<>
+class RawStringComparator<uint8_t, uint8_t> {
+ public:
+  static inline bool compare(const uint8_t* a, const uint8_t* b, int len) {
+    return CompareRawStringContents(a, b, len);
+  }
+};
+
+
+class StringComparator {
+  class State {
+   public:
+    explicit inline State(ConsStringIteratorOp* op)
+      : op_(op), is_one_byte_(true), length_(0), buffer8_(NULL) {}
+
+    inline void Init(String* string, unsigned len) {
+      op_->Reset();
+      int32_t type = string->map()->instance_type();
+      String::Visit(string, 0, *this, *op_, type, len);
+    }
+
+    inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+      is_one_byte_ = true;
+      buffer8_ = chars;
+      length_ = length;
+    }
+
+    inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+      is_one_byte_ = false;
+      buffer16_ = chars;
+      length_ = length;
+    }
+
+    void Advance(unsigned consumed) {
+      ASSERT(consumed <= length_);
+      // Still in buffer.
+      if (length_ != consumed) {
+        if (is_one_byte_) {
+          buffer8_ += consumed;
+        } else {
+          buffer16_ += consumed;
+        }
+        length_ -= consumed;
+        return;
+      }
+      // Advance state.
+      ASSERT(op_->HasMore());
+      int32_t type = 0;
+      unsigned length = 0;
+      String* next = op_->ContinueOperation(&type, &length);
+      ASSERT(next != NULL);
+      ConsStringNullOp null_op;
+      String::Visit(next, 0, *this, null_op, type, length);
+    }
+
+    ConsStringIteratorOp* const op_;
+    bool is_one_byte_;
+    unsigned length_;
+    union {
+      const uint8_t* buffer8_;
+      const uint16_t* buffer16_;
+    };
+    DISALLOW_IMPLICIT_CONSTRUCTORS(State);
+  };
+
+ public:
+  inline StringComparator(ConsStringIteratorOp* op_1,
+                          ConsStringIteratorOp* op_2)
+    : state_1_(op_1),
+      state_2_(op_2) {
+  }
+
+  template<typename Chars1, typename Chars2>
+  static inline bool Equals(State* state_1, State* state_2, unsigned to_check) {
+    const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_);
+    const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_);
+    return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
+  }
+
+  bool Equals(unsigned length, String* string_1, String* string_2) {
+    ASSERT(length != 0);
+    state_1_.Init(string_1, length);
+    state_2_.Init(string_2, length);
+    while (true) {
+      unsigned to_check = Min(state_1_.length_, state_2_.length_);
+      ASSERT(to_check > 0 && to_check <= length);
+      bool is_equal;
+      if (state_1_.is_one_byte_) {
+        if (state_2_.is_one_byte_) {
+          is_equal = Equals<uint8_t, uint8_t>(&state_1_, &state_2_, to_check);
+        } else {
+          is_equal = Equals<uint8_t, uint16_t>(&state_1_, &state_2_, to_check);
+        }
+      } else {
+        if (state_2_.is_one_byte_) {
+          is_equal = Equals<uint16_t, uint8_t>(&state_1_, &state_2_, to_check);
+        } else {
+          is_equal = Equals<uint16_t, uint16_t>(&state_1_, &state_2_, to_check);
+        }
+      }
+      // Looping done.
+      if (!is_equal) return false;
+      length -= to_check;
+      // Exit condition. Strings are equal.
+      if (length == 0) return true;
+      state_1_.Advance(to_check);
+      state_2_.Advance(to_check);
+    }
+  }
+
+ private:
+  State state_1_;
+  State state_2_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringComparator);
+};
 
 
 bool String::SlowEquals(String* other) {
@@ -7568,58 +7251,19 @@
   String* lhs = this->TryFlattenGetString();
   String* rhs = other->TryFlattenGetString();
 
+  // TODO(dcarney): Compare all types of flat strings with a Visitor.
   if (StringShape(lhs).IsSequentialAscii() &&
       StringShape(rhs).IsSequentialAscii()) {
     const char* str1 = SeqOneByteString::cast(lhs)->GetChars();
     const char* str2 = SeqOneByteString::cast(rhs)->GetChars();
-    return CompareRawStringContents(Vector<const char>(str1, len),
-                                    Vector<const char>(str2, len));
+    return CompareRawStringContents(str1, str2, len);
   }
 
   Isolate* isolate = GetIsolate();
-  String::FlatContent lhs_content = lhs->GetFlatContent();
-  String::FlatContent rhs_content = rhs->GetFlatContent();
-  if (lhs_content.IsFlat()) {
-    if (lhs_content.IsAscii()) {
-      Vector<const char> vec1 = lhs_content.ToAsciiVector();
-      if (rhs_content.IsFlat()) {
-        if (rhs_content.IsAscii()) {
-          Vector<const char> vec2 = rhs_content.ToAsciiVector();
-          return CompareRawStringContents(vec1, vec2);
-        } else {
-          VectorIterator<char> buf1(vec1);
-          VectorIterator<uc16> ib(rhs_content.ToUC16Vector());
-          return CompareStringContents(&buf1, &ib);
-        }
-      } else {
-        VectorIterator<char> buf1(vec1);
-        isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
-        return CompareStringContents(&buf1,
-            isolate->objects_string_compare_buffer_b());
-      }
-    } else {
-      Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
-      if (rhs_content.IsFlat()) {
-        if (rhs_content.IsAscii()) {
-          VectorIterator<uc16> buf1(vec1);
-          VectorIterator<char> ib(rhs_content.ToAsciiVector());
-          return CompareStringContents(&buf1, &ib);
-        } else {
-          Vector<const uc16> vec2(rhs_content.ToUC16Vector());
-          return CompareRawStringContents(vec1, vec2);
-        }
-      } else {
-        VectorIterator<uc16> buf1(vec1);
-        isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
-        return CompareStringContents(&buf1,
-            isolate->objects_string_compare_buffer_b());
-      }
-    }
-  } else {
-    isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
-    return CompareStringContentsPartial(isolate,
-        isolate->objects_string_compare_buffer_a(), rhs);
-  }
+  StringComparator comparator(isolate->objects_string_compare_iterator_a(),
+                              isolate->objects_string_compare_iterator_b());
+
+  return comparator.Equals(static_cast<unsigned>(len), lhs, rhs);
 }
 
 
@@ -7764,11 +7408,12 @@
 }
 
 
-bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
-                               uint32_t* index,
-                               int length) {
+bool String::ComputeArrayIndex(uint32_t* index) {
+  int length = this->length();
   if (length == 0 || length > kMaxArrayIndexSize) return false;
-  uc32 ch = buffer->GetNext();
+  ConsStringIteratorOp op;
+  StringCharacterStream stream(this, &op);
+  uint16_t ch = stream.GetNext();
 
   // If the string begins with a '0' character, it must only consist
   // of it to be a legal array index.
@@ -7781,8 +7426,8 @@
   int d = ch - '0';
   if (d < 0 || d > 9) return false;
   uint32_t result = d;
-  while (buffer->has_more()) {
-    d = buffer->GetNext() - '0';
+  while (stream.HasMore()) {
+    d = stream.GetNext() - '0';
     if (d < 0 || d > 9) return false;
     // Check that the new result is below the 32 bit limit.
     if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
@@ -7803,8 +7448,7 @@
     *index = (kArrayIndexHashMask & field) >> kHashShift;
     return true;
   } else {
-    StringInputBuffer buffer(this);
-    return ComputeArrayIndex(&buffer, index, length());
+    return ComputeArrayIndex(index);
   }
 }
 
@@ -9546,9 +9190,9 @@
     }
   }
   FixedArrayBase* old_elements = elements();
-  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
   MaybeObject* maybe_obj =
-      accessor->CopyElements(this, new_elements, new_elements_kind);
+      accessor->CopyElements(this, new_elements, elements_kind);
   if (maybe_obj->IsFailure()) return maybe_obj;
 
   if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
@@ -9606,9 +9250,9 @@
   }
 
   FixedArrayBase* old_elements = elements();
-  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
   { MaybeObject* maybe_obj =
-        accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+        accessor->CopyElements(this, elems, elements_kind);
     if (maybe_obj->IsFailure()) return maybe_obj;
   }
   if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
diff --git a/src/objects.h b/src/objects.h
index 88194c4..f1fa27a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -7200,9 +7200,7 @@
   // Returns a hash value used for the property table
   inline uint32_t Hash();
 
-  static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
-                                uint32_t* index,
-                                int length);
+  bool ComputeArrayIndex(uint32_t* index);
 
   // Externalization.
   bool MakeExternal(v8::String::ExternalStringResource* resource);
@@ -7315,18 +7313,6 @@
   const uc16* GetTwoByteData();
   const uc16* GetTwoByteData(unsigned start);
 
-  // Support for StringInputBuffer
-  static const unibrow::byte* ReadBlock(String* input,
-                                        unibrow::byte* util_buffer,
-                                        unsigned capacity,
-                                        unsigned* remaining,
-                                        unsigned* offset);
-  static const unibrow::byte* ReadBlock(String** input,
-                                        unibrow::byte* util_buffer,
-                                        unsigned capacity,
-                                        unsigned* remaining,
-                                        unsigned* offset);
-
   // Helper function for flattening strings.
   template <typename sinkchar>
   static void WriteToFlat(String* source,
@@ -7385,33 +7371,6 @@
                            int32_t type,
                            unsigned length);
 
- protected:
-  class ReadBlockBuffer {
-   public:
-    ReadBlockBuffer(unibrow::byte* util_buffer_,
-                    unsigned cursor_,
-                    unsigned capacity_,
-                    unsigned remaining_) :
-      util_buffer(util_buffer_),
-      cursor(cursor_),
-      capacity(capacity_),
-      remaining(remaining_) {
-    }
-    unibrow::byte* util_buffer;
-    unsigned       cursor;
-    unsigned       capacity;
-    unsigned       remaining;
-  };
-
-  static inline const unibrow::byte* ReadBlock(String* input,
-                                               ReadBlockBuffer* buffer,
-                                               unsigned* offset,
-                                               unsigned max_chars);
-  static void ReadBlockIntoBuffer(String* input,
-                                  ReadBlockBuffer* buffer,
-                                  unsigned* offset_ptr,
-                                  unsigned max_chars);
-
  private:
   // Try to flatten the top level ConsString that is hiding behind this
   // string.  This is a no-op unless the string is a ConsString.  Flatten
@@ -7487,14 +7446,6 @@
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize);
 
-  // Support for StringInputBuffer.
-  inline void SeqOneByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
-                                                unsigned* offset,
-                                                unsigned chars);
-  inline const unibrow::byte* SeqOneByteStringReadBlock(unsigned* remaining,
-                                                      unsigned* offset,
-                                                      unsigned chars);
-
   DECLARE_VERIFIER(SeqOneByteString)
 
  private:
@@ -7539,11 +7490,6 @@
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
 
-  // Support for StringInputBuffer.
-  inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
-                                                  unsigned* offset_ptr,
-                                                  unsigned chars);
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
 };
@@ -7586,14 +7532,6 @@
   static const int kSecondOffset = kFirstOffset + kPointerSize;
   static const int kSize = kSecondOffset + kPointerSize;
 
-  // Support for StringInputBuffer.
-  inline const unibrow::byte* ConsStringReadBlock(ReadBlockBuffer* buffer,
-                                                  unsigned* offset_ptr,
-                                                  unsigned chars);
-  inline void ConsStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
-                                            unsigned* offset_ptr,
-                                            unsigned chars);
-
   // Minimum length for a cons string.
   static const int kMinLength = 13;
 
@@ -7638,13 +7576,6 @@
   static const int kOffsetOffset = kParentOffset + kPointerSize;
   static const int kSize = kOffsetOffset + kPointerSize;
 
-  // Support for StringInputBuffer
-  inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
-                                                    unsigned* offset_ptr,
-                                                    unsigned chars);
-  inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
-                                              unsigned* offset_ptr,
-                                              unsigned chars);
   // Minimum length for a sliced string.
   static const int kMinLength = 13;
 
@@ -7721,14 +7652,6 @@
   template<typename StaticVisitor>
   inline void ExternalAsciiStringIterateBody();
 
-  // Support for StringInputBuffer.
-  const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
-                                                    unsigned* offset,
-                                                    unsigned chars);
-  inline void ExternalAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
-                                                     unsigned* offset,
-                                                     unsigned chars);
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
 };
@@ -7769,12 +7692,6 @@
   template<typename StaticVisitor>
   inline void ExternalTwoByteStringIterateBody();
 
-
-  // Support for StringInputBuffer.
-  void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
-                                                unsigned* offset_ptr,
-                                                unsigned chars);
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
 };
@@ -7821,24 +7738,6 @@
 };
 
 
-// Note that StringInputBuffers are not valid across a GC!  To fix this
-// it would have to store a String Handle instead of a String* and
-// AsciiStringReadBlock would have to be modified to use memcpy.
-//
-// StringInputBuffer is able to traverse any string regardless of how
-// deeply nested a sequence of ConsStrings it is made of.  However,
-// performance will be better if deep strings are flattened before they
-// are traversed.  Since flattening requires memory allocation this is
-// not always desirable, however (esp. in debugging situations).
-class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
- public:
-  virtual void Seek(unsigned pos);
-  inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
-  explicit inline StringInputBuffer(String* backing):
-      unibrow::InputBuffer<String, String*, 1024>(backing) {}
-};
-
-
 // A ConsStringOp that returns null.
 // Useful when the operation to apply on a ConsString
 // requires an expensive data structure.
@@ -7898,11 +7797,11 @@
 class StringCharacterStream {
  public:
   inline StringCharacterStream(String* string,
-                               unsigned offset,
-                               ConsStringIteratorOp* op);
+                               ConsStringIteratorOp* op,
+                               unsigned offset = 0);
   inline uint16_t GetNext();
   inline bool HasMore();
-  inline void Reset(String* string, unsigned offset, ConsStringIteratorOp* op);
+  inline void Reset(String* string, unsigned offset = 0);
   inline void VisitOneByteString(const uint8_t* chars, unsigned length);
   inline void VisitTwoByteString(const uint16_t* chars, unsigned length);
 
diff --git a/src/runtime.cc b/src/runtime.cc
index 4920957..f6051ac 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -3489,17 +3489,17 @@
   str1->TryFlatten();
   str2->TryFlatten();
 
-  StringInputBuffer& buf1 =
-      *isolate->runtime_state()->string_locale_compare_buf1();
-  StringInputBuffer& buf2 =
-      *isolate->runtime_state()->string_locale_compare_buf2();
-
-  buf1.Reset(str1);
-  buf2.Reset(str2);
+  ConsStringIteratorOp* op1 =
+      isolate->runtime_state()->string_locale_compare_it1();
+  ConsStringIteratorOp* op2 =
+      isolate->runtime_state()->string_locale_compare_it2();
+  // TODO(dcarney) Can do array compares here more efficiently.
+  StringCharacterStream stream1(str1, op1);
+  StringCharacterStream stream2(str2, op2);
 
   for (int i = 0; i < end; i++) {
-    uint16_t char1 = buf1.GetNext();
-    uint16_t char2 = buf2.GetNext();
+    uint16_t char1 = stream1.GetNext();
+    uint16_t char2 = stream2.GetNext();
     if (char1 != char2) return Smi::FromInt(char1 - char2);
   }
 
@@ -5143,11 +5143,11 @@
   int escaped_length = 0;
   int length = source->length();
   {
-    Access<StringInputBuffer> buffer(
-        isolate->runtime_state()->string_input_buffer());
-    buffer->Reset(source);
-    while (buffer->has_more()) {
-      uint16_t character = buffer->GetNext();
+    Access<ConsStringIteratorOp> op(
+        isolate->runtime_state()->string_iterator());
+    StringCharacterStream stream(source, op.value());
+    while (stream.HasMore()) {
+      uint16_t character = stream.GetNext();
       if (character >= 256) {
         escaped_length += 6;
       } else if (IsNotEscaped(character)) {
@@ -5175,11 +5175,11 @@
   String* destination = String::cast(o);
   int dest_position = 0;
 
-  Access<StringInputBuffer> buffer(
-      isolate->runtime_state()->string_input_buffer());
-  buffer->Rewind();
-  while (buffer->has_more()) {
-    uint16_t chr = buffer->GetNext();
+  Access<ConsStringIteratorOp> op(
+      isolate->runtime_state()->string_iterator());
+  StringCharacterStream stream(source, op.value());
+  while (stream.HasMore()) {
+    uint16_t chr = stream.GetNext();
     if (chr >= 256) {
       destination->Set(dest_position, '%');
       destination->Set(dest_position+1, 'u');
@@ -5717,15 +5717,15 @@
 
   // Convert all characters to upper case, assuming that they will fit
   // in the buffer
-  Access<StringInputBuffer> buffer(
-      isolate->runtime_state()->string_input_buffer());
-  buffer->Reset(s);
+  Access<ConsStringIteratorOp> op(
+      isolate->runtime_state()->string_iterator());
+  StringCharacterStream stream(s, op.value());
   unibrow::uchar chars[Converter::kMaxWidth];
   // We can assume that the string is not empty
-  uc32 current = buffer->GetNext();
+  uc32 current = stream.GetNext();
   for (int i = 0; i < length;) {
-    bool has_next = buffer->has_more();
-    uc32 next = has_next ? buffer->GetNext() : 0;
+    bool has_next = stream.HasMore();
+    uc32 next = has_next ? stream.GetNext() : 0;
     int char_length = mapping->get(current, next, chars);
     if (char_length == 0) {
       // The case conversion of this character is the character itself.
@@ -5755,8 +5755,8 @@
         if (next_length == 0) next_length = 1;
       }
       int current_length = i + char_length + next_length;
-      while (buffer->has_more()) {
-        current = buffer->GetNext();
+      while (stream.HasMore()) {
+        current = stream.GetNext();
         // NOTE: we use 0 as the next character here because, while
         // the next character may affect what a character converts to,
         // it does not in any case affect the length of what it convert
@@ -6960,23 +6960,21 @@
 }
 
 
-static Object* StringInputBufferCompare(RuntimeState* state,
+static Object* StringCharacterStreamCompare(RuntimeState* state,
                                         String* x,
                                         String* y) {
-  StringInputBuffer& bufx = *state->string_input_buffer_compare_bufx();
-  StringInputBuffer& bufy = *state->string_input_buffer_compare_bufy();
-  bufx.Reset(x);
-  bufy.Reset(y);
-  while (bufx.has_more() && bufy.has_more()) {
-    int d = bufx.GetNext() - bufy.GetNext();
+  StringCharacterStream stream_x(x, state->string_iterator_compare_x());
+  StringCharacterStream stream_y(y, state->string_iterator_compare_y());
+  while (stream_x.HasMore() && stream_y.HasMore()) {
+    int d = stream_x.GetNext() - stream_y.GetNext();
     if (d < 0) return Smi::FromInt(LESS);
     else if (d > 0) return Smi::FromInt(GREATER);
   }
 
   // x is (non-trivial) prefix of y:
-  if (bufy.has_more()) return Smi::FromInt(LESS);
+  if (stream_y.HasMore()) return Smi::FromInt(LESS);
   // y is prefix of x:
-  return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+  return Smi::FromInt(stream_x.HasMore() ? GREATER : EQUAL);
 }
 
 
@@ -7020,7 +7018,7 @@
     result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
   }
   ASSERT(result ==
-      StringInputBufferCompare(Isolate::Current()->runtime_state(), x, y));
+      StringCharacterStreamCompare(Isolate::Current()->runtime_state(), x, y));
   return result;
 }
 
@@ -7056,7 +7054,7 @@
   }
 
   return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
-      : StringInputBufferCompare(isolate->runtime_state(), x, y);
+      : StringCharacterStreamCompare(isolate->runtime_state(), x, y);
 }
 
 
@@ -9885,9 +9883,10 @@
   ASSERT(args.length() == 1);
 
   CONVERT_ARG_CHECKED(String, string, 0);
-  StringInputBuffer buffer(string);
-  while (buffer.has_more()) {
-    uint16_t character = buffer.GetNext();
+  ConsStringIteratorOp op;
+  StringCharacterStream stream(string, &op);
+  while (stream.HasMore()) {
+    uint16_t character = stream.GetNext();
     PrintF("%c", character);
   }
   return string;
diff --git a/src/runtime.h b/src/runtime.h
index 7a21bb9..47102a0 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -575,8 +575,8 @@
 
 class RuntimeState {
  public:
-  StaticResource<StringInputBuffer>* string_input_buffer() {
-    return &string_input_buffer_;
+  StaticResource<ConsStringIteratorOp>* string_iterator() {
+    return &string_iterator_;
   }
   unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
     return &to_upper_mapping_;
@@ -584,29 +584,29 @@
   unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
     return &to_lower_mapping_;
   }
-  StringInputBuffer* string_input_buffer_compare_bufx() {
-    return &string_input_buffer_compare_bufx_;
+  ConsStringIteratorOp* string_iterator_compare_x() {
+    return &string_iterator_compare_x_;
   }
-  StringInputBuffer* string_input_buffer_compare_bufy() {
-    return &string_input_buffer_compare_bufy_;
+  ConsStringIteratorOp* string_iterator_compare_y() {
+    return &string_iterator_compare_y_;
   }
-  StringInputBuffer* string_locale_compare_buf1() {
-    return &string_locale_compare_buf1_;
+  ConsStringIteratorOp* string_locale_compare_it1() {
+    return &string_locale_compare_it1_;
   }
-  StringInputBuffer* string_locale_compare_buf2() {
-    return &string_locale_compare_buf2_;
+  ConsStringIteratorOp* string_locale_compare_it2() {
+    return &string_locale_compare_it2_;
   }
 
  private:
   RuntimeState() {}
   // Non-reentrant string buffer for efficient general use in the runtime.
-  StaticResource<StringInputBuffer> string_input_buffer_;
+  StaticResource<ConsStringIteratorOp> string_iterator_;
   unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
   unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
-  StringInputBuffer string_input_buffer_compare_bufx_;
-  StringInputBuffer string_input_buffer_compare_bufy_;
-  StringInputBuffer string_locale_compare_buf1_;
-  StringInputBuffer string_locale_compare_buf2_;
+  ConsStringIteratorOp string_iterator_compare_x_;
+  ConsStringIteratorOp string_iterator_compare_y_;
+  ConsStringIteratorOp string_locale_compare_it1_;
+  ConsStringIteratorOp string_locale_compare_it2_;
 
   friend class Isolate;
   friend class Runtime;
diff --git a/src/scanner.h b/src/scanner.h
index 6036833..0a560e3 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -430,10 +430,6 @@
   // be empty).
   bool ScanRegExpFlags();
 
-  // Tells whether the buffer contains an identifier (no escapes).
-  // Used for checking if a property name is an identifier.
-  static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
  private:
   // The current and look-ahead token.
   struct TokenDesc {
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 30519b5..fc07d94 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -311,14 +311,14 @@
 
 
 bool StringStream::Put(String* str, int start, int end) {
-  StringInputBuffer name_buffer(str);
-  name_buffer.Seek(start);
-  for (int i = start; i < end && name_buffer.has_more(); i++) {
-    int c = name_buffer.GetNext();
+  ConsStringIteratorOp op;
+  StringCharacterStream stream(str, &op, start);
+  for (int i = start; i < end && stream.HasMore(); i++) {
+    uint16_t c = stream.GetNext();
     if (c >= 127 || c < 32) {
       c = '?';
     }
-    if (!Put(c)) {
+    if (!Put(static_cast<char>(c))) {
       return false;  // Output was truncated.
     }
   }
diff --git a/src/unicode-inl.h b/src/unicode-inl.h
index 8ceefe8..c3a00ed 100644
--- a/src/unicode-inl.h
+++ b/src/unicode-inl.h
@@ -137,109 +137,6 @@
   }
 }
 
-uchar CharacterStream::GetNext() {
-  uchar result = DecodeCharacter(buffer_, &cursor_);
-  if (remaining_ == 1) {
-    cursor_ = 0;
-    FillBuffer();
-  } else {
-    remaining_--;
-  }
-  ASSERT(BoundsCheck(cursor_));
-  return result;
-}
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define IF_LITTLE(expr) expr
-#define IF_BIG(expr)    ((void) 0)
-#elif __BYTE_ORDER == __BIG_ENDIAN
-#define IF_LITTLE(expr) ((void) 0)
-#define IF_BIG(expr)    expr
-#else
-#warning Unknown byte ordering
-#endif
-
-bool CharacterStream::EncodeAsciiCharacter(uchar c, byte* buffer,
-    unsigned capacity, unsigned& offset) {
-  if (offset >= capacity) return false;
-  buffer[offset] = c;
-  offset += 1;
-  return true;
-}
-
-bool CharacterStream::EncodeNonAsciiCharacter(uchar c, byte* buffer,
-    unsigned capacity, unsigned& offset) {
-  unsigned aligned = (offset + 0x3) & ~0x3;
-  if ((aligned + sizeof(uchar)) > capacity)
-    return false;
-  if (offset == aligned) {
-    IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = (c << 8) | 0x80);
-    IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c | (1 << 31));
-  } else {
-    buffer[offset] = 0x80;
-    IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = c << 8);
-    IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c);
-  }
-  offset = aligned + sizeof(uchar);
-  return true;
-}
-
-bool CharacterStream::EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
-    unsigned& offset) {
-  if (c <= Utf8::kMaxOneByteChar) {
-    return EncodeAsciiCharacter(c, buffer, capacity, offset);
-  } else {
-    return EncodeNonAsciiCharacter(c, buffer, capacity, offset);
-  }
-}
-
-uchar CharacterStream::DecodeCharacter(const byte* buffer, unsigned* offset) {
-  byte b = buffer[*offset];
-  if (b <= Utf8::kMaxOneByteChar) {
-    (*offset)++;
-    return b;
-  } else {
-    unsigned aligned = (*offset + 0x3) & ~0x3;
-    *offset = aligned + sizeof(uchar);
-    IF_LITTLE(return *reinterpret_cast<const uchar*>(buffer + aligned) >> 8);
-    IF_BIG(return *reinterpret_cast<const uchar*>(buffer + aligned) &
-                    ~(1 << 31));
-  }
-}
-
-#undef IF_LITTLE
-#undef IF_BIG
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::FillBuffer() {
-  buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Rewind() {
-  Reset(input_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(unsigned position, I input) {
-  input_ = input;
-  remaining_ = 0;
-  cursor_ = 0;
-  offset_ = position;
-  buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(I input) {
-  Reset(0, input);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Seek(unsigned position) {
-  offset_ = position;
-  buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
 Utf8DecoderBase::Utf8DecoderBase()
   : unbuffered_start_(NULL),
     utf16_length_(0),
diff --git a/src/unicode.cc b/src/unicode.cc
index 3788dd6..04065b0 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -277,33 +277,6 @@
 }
 
 
-unsigned CharacterStream::Length() {
-  unsigned result = 0;
-  while (has_more()) {
-    result++;
-    GetNext();
-  }
-  Rewind();
-  return result;
-}
-
-unsigned CharacterStream::Utf16Length() {
-  unsigned result = 0;
-  while (has_more()) {
-    uchar c = GetNext();
-    result += c > Utf16::kMaxNonSurrogateCharCode ? 2 : 1;
-  }
-  Rewind();
-  return result;
-}
-
-void CharacterStream::Seek(unsigned position) {
-  Rewind();
-  for (unsigned i = 0; i < position; i++) {
-    GetNext();
-  }
-}
-
 void Utf8DecoderBase::Reset(uint16_t* buffer,
                             unsigned buffer_length,
                             const uint8_t* stream,
diff --git a/src/unicode.h b/src/unicode.h
index a9b27aa..626d988 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -100,21 +100,6 @@
   static const uchar kMaxCodePoint;
 };
 
-// --- U t f   8   a n d   16 ---
-
-template <typename Data>
-class Buffer {
- public:
-  inline Buffer(Data data, unsigned length) : data_(data), length_(length) { }
-  inline Buffer() : data_(0), length_(0) { }
-  Data data() { return data_; }
-  unsigned length() { return length_; }
- private:
-  Data data_;
-  unsigned length_;
-};
-
-
 class Utf16 {
  public:
   static inline bool IsLeadSurrogate(int code) {
@@ -173,72 +158,6 @@
                               unsigned* cursor);
 };
 
-// --- C h a r a c t e r   S t r e a m ---
-
-class CharacterStream {
- public:
-  inline uchar GetNext();
-  inline bool has_more() { return remaining_ != 0; }
-  // Note that default implementation is not efficient.
-  virtual void Seek(unsigned);
-  unsigned Length();
-  unsigned Utf16Length();
-  virtual ~CharacterStream() { }
-  static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
-      unsigned& offset);
-  static inline bool EncodeAsciiCharacter(uchar c, byte* buffer,
-      unsigned capacity, unsigned& offset);
-  static inline bool EncodeNonAsciiCharacter(uchar c, byte* buffer,
-      unsigned capacity, unsigned& offset);
-  static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
-  virtual void Rewind() = 0;
-
- protected:
-  virtual void FillBuffer() = 0;
-  virtual bool BoundsCheck(unsigned offset) = 0;
-  // The number of characters left in the current buffer
-  unsigned remaining_;
-  // The current offset within the buffer
-  unsigned cursor_;
-  // The buffer containing the decoded characters.
-  const byte* buffer_;
-};
-
-// --- I n p u t   B u f f e r ---
-
-/**
- * Provides efficient access to encoded characters in strings.  It
- * does so by reading characters one block at a time, rather than one
- * character at a time, which gives string implementations an
- * opportunity to optimize the decoding.
- */
-template <class Reader, class Input = Reader*, unsigned kSize = 256>
-class InputBuffer : public CharacterStream {
- public:
-  virtual void Rewind();
-  inline void Reset(Input input);
-  void Seek(unsigned position);
-  inline void Reset(unsigned position, Input input);
- protected:
-  InputBuffer() { }
-  explicit InputBuffer(Input input) { Reset(input); }
-  virtual void FillBuffer();
-  virtual bool BoundsCheck(unsigned offset) {
-    return (buffer_ != util_buffer_) || (offset < kSize);
-  }
-
-  // A custom offset that can be used by the string implementation to
-  // mark progress within the encoded string.
-  unsigned offset_;
-  // The input string
-  Input input_;
-  // To avoid heap allocation, we keep an internal buffer to which
-  // the encoded string can write its characters.  The string
-  // implementation is free to decide whether it wants to use this
-  // buffer or not.
-  byte util_buffer_[kSize];
-};
-
 
 class Utf8DecoderBase {
  public:
diff --git a/src/v8conversions.cc b/src/v8conversions.cc
index c6755d5..26a4868 100644
--- a/src/v8conversions.cc
+++ b/src/v8conversions.cc
@@ -41,40 +41,40 @@
 
 namespace {
 
-// C++-style iterator adaptor for StringInputBuffer
+// C++-style iterator adaptor for StringCharacterStream
 // (unlike C++ iterators the end-marker has different type).
-class StringInputBufferIterator {
+class StringCharacterStreamIterator {
  public:
   class EndMarker {};
 
-  explicit StringInputBufferIterator(StringInputBuffer* buffer);
+  explicit StringCharacterStreamIterator(StringCharacterStream* stream);
 
-  int operator*() const;
+  uint16_t operator*() const;
   void operator++();
   bool operator==(EndMarker const&) const { return end_; }
   bool operator!=(EndMarker const& m) const { return !end_; }
 
  private:
-  StringInputBuffer* const buffer_;
-  int current_;
+  StringCharacterStream* const stream_;
+  uint16_t current_;
   bool end_;
 };
 
 
-StringInputBufferIterator::StringInputBufferIterator(
-    StringInputBuffer* buffer) : buffer_(buffer) {
+StringCharacterStreamIterator::StringCharacterStreamIterator(
+    StringCharacterStream* stream) : stream_(stream) {
   ++(*this);
 }
 
-int StringInputBufferIterator::operator*() const {
+uint16_t StringCharacterStreamIterator::operator*() const {
   return current_;
 }
 
 
-void StringInputBufferIterator::operator++() {
-  end_ = !buffer_->has_more();
+void StringCharacterStreamIterator::operator++() {
+  end_ = !stream_->HasMore();
   if (!end_) {
-    current_ = buffer_->GetNext();
+    current_ = stream_->GetNext();
   }
 }
 }  // End anonymous namespace.
@@ -83,6 +83,7 @@
 double StringToDouble(UnicodeCache* unicode_cache,
                       String* str, int flags, double empty_string_val) {
   StringShape shape(str);
+  // TODO(dcarney): Use a Visitor here.
   if (shape.IsSequentialAscii()) {
     const char* begin = SeqOneByteString::cast(str)->GetChars();
     const char* end = begin + str->length();
@@ -94,10 +95,11 @@
     return InternalStringToDouble(unicode_cache, begin, end, flags,
                                   empty_string_val);
   } else {
-    StringInputBuffer buffer(str);
+    ConsStringIteratorOp op;
+    StringCharacterStream stream(str, &op);
     return InternalStringToDouble(unicode_cache,
-                                  StringInputBufferIterator(&buffer),
-                                  StringInputBufferIterator::EndMarker(),
+                                  StringCharacterStreamIterator(&stream),
+                                  StringCharacterStreamIterator::EndMarker(),
                                   flags,
                                   empty_string_val);
   }
@@ -108,6 +110,7 @@
                    String* str,
                    int radix) {
   StringShape shape(str);
+  // TODO(dcarney): Use a Visitor here.
   if (shape.IsSequentialAscii()) {
     const char* begin = SeqOneByteString::cast(str)->GetChars();
     const char* end = begin + str->length();
@@ -117,10 +120,11 @@
     const uc16* end = begin + str->length();
     return InternalStringToInt(unicode_cache, begin, end, radix);
   } else {
-    StringInputBuffer buffer(str);
+    ConsStringIteratorOp op;
+    StringCharacterStream stream(str, &op);
     return InternalStringToInt(unicode_cache,
-                               StringInputBufferIterator(&buffer),
-                               StringInputBufferIterator::EndMarker(),
+                               StringCharacterStreamIterator(&stream),
+                               StringCharacterStreamIterator::EndMarker(),
                                radix);
   }
 }
diff --git a/src/version.cc b/src/version.cc
index e675f9f..f203b64 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     16
-#define BUILD_NUMBER      1
+#define BUILD_NUMBER      2
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index f864174..67acbf0 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -53,7 +53,7 @@
 
 void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
   Memory::uint64_at(pc_) = x;
-  if (rmode != RelocInfo::NONE) {
+  if (!RelocInfo::IsNone(rmode)) {
     RecordRelocInfo(rmode, x);
   }
   pc_ += sizeof(uint64_t);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index cb0cd86..160a0df 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -110,7 +110,7 @@
     __ or_(rdi, rcx);
 
     // Get the sahf supported flag, from CPUID(0x80000001)
-    __ movq(rax, 0x80000001, RelocInfo::NONE);
+    __ movq(rax, 0x80000001, RelocInfo::NONE64);
     __ cpuid();
   }
   supported_ = kDefaultCpuFeatures;
@@ -173,7 +173,7 @@
 #endif
 
   // Patch the code.
-  patcher.masm()->movq(r10, target, RelocInfo::NONE);
+  patcher.masm()->movq(r10, target, RelocInfo::NONE64);
   patcher.masm()->call(r10);
 
   // Check that the size of the code generated is as expected.
@@ -1498,7 +1498,7 @@
 
 void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
   // Non-relocatable values might not need a 64-bit representation.
-  if (rmode == RelocInfo::NONE) {
+  if (RelocInfo::IsNone(rmode)) {
     // Sadly, there is no zero or sign extending move for 8-bit immediates.
     if (is_int32(value)) {
       movq(dst, Immediate(static_cast<int32_t>(value)));
@@ -1558,11 +1558,11 @@
 void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
   // If there is no relocation info, emit the value of the handle efficiently
   // (possibly using less that 8 bytes for the value).
-  if (mode == RelocInfo::NONE) {
+  if (RelocInfo::IsNone(mode)) {
     // There is no possible reason to store a heap pointer without relocation
     // info, so it must be a smi.
     ASSERT(value->IsSmi());
-    movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
+    movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
   } else {
     EnsureSpace ensure_space(this);
     ASSERT(value->IsHeapObject());
@@ -2995,7 +2995,7 @@
 // Relocation information implementations.
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  ASSERT(rmode != RelocInfo::NONE);
+  ASSERT(!RelocInfo::IsNone(rmode));
   // Don't record external references unless the heap will be serialized.
   if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
 #ifdef DEBUG
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index f950368..650cf5d 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -333,7 +333,11 @@
 
   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
+  AllocationFlags flags = TAG_OBJECT;
+  if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+    flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+  }
+  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, flags);
 
   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -2148,7 +2152,7 @@
       Label continue_sqrt, continue_rsqrt, not_plus_half;
       // Test for 0.5.
       // Load double_scratch with 0.5.
-      __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+      __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
       __ movq(double_scratch, scratch);
       // Already ruled out NaNs for exponent.
       __ ucomisd(double_scratch, double_exponent);
@@ -2158,7 +2162,7 @@
       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
       // According to IEEE-754, double-precision -Infinity has the highest
       // 12 bits set and the lowest 52 bits cleared.
-      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
       __ movq(double_scratch, scratch);
       __ ucomisd(double_scratch, double_base);
       // Comparing -Infinity with NaN results in "unordered", which sets the
@@ -2190,7 +2194,7 @@
       // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
       // According to IEEE-754, double-precision -Infinity has the highest
       // 12 bits set and the lowest 52 bits cleared.
-      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
       __ movq(double_scratch, scratch);
       __ ucomisd(double_scratch, double_base);
       // Comparing -Infinity with NaN results in "unordered", which sets the
@@ -3930,8 +3934,7 @@
     __ movq(rdi, rax);
 #endif
     __ movq(kScratchRegister,
-            FUNCTION_ADDR(Runtime::PerformGC),
-            RelocInfo::RUNTIME_ENTRY);
+            ExternalReference::perform_gc_function(masm->isolate()));
     __ call(kScratchRegister);
   }
 
@@ -4009,7 +4012,7 @@
   __ j(zero, &retry, Label::kNear);
 
   // Special handling of out of memory exceptions.
-  __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+  __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE64);
   __ cmpq(rax, kScratchRegister);
   __ j(equal, throw_out_of_memory_exception);
 
@@ -4089,7 +4092,7 @@
 
   // Do full GC and retry runtime call one final time.
   Failure* failure = Failure::InternalError();
-  __ movq(rax, failure, RelocInfo::NONE);
+  __ movq(rax, failure, RelocInfo::NONE64);
   GenerateCore(masm,
                &throw_normal_exception,
                &throw_termination_exception,
@@ -4108,7 +4111,7 @@
   // Set pending exception and rax to out of memory exception.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       isolate);
-  __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+  __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE64);
   __ Store(pending_exception, rax);
   // Fall through to the next label.
 
@@ -4136,7 +4139,7 @@
     // Cannot use smi-register for loading yet.
     __ movq(kScratchRegister,
             reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
-            RelocInfo::NONE);
+            RelocInfo::NONE64);
     __ push(kScratchRegister);  // context slot
     __ push(kScratchRegister);  // function slot
     // Save callee-saved registers (X64/Win64 calling conventions).
@@ -4191,7 +4194,7 @@
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       isolate);
   __ Store(pending_exception, rax);
-  __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+  __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
   __ jmp(&exit);
 
   // Invoke: Link this frame into the handler chain.  There's only one
@@ -6462,7 +6465,7 @@
 #endif
 
   // Call the entry hook function.
-  __ movq(rax, &entry_hook_, RelocInfo::NONE);
+  __ movq(rax, &entry_hook_, RelocInfo::NONE64);
   __ movq(rax, Operand(rax, 0));
 
   AllowExternalCallThatCantCauseGC scope(masm);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 7954604..63f3ac4 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -212,7 +212,7 @@
   __ j(zero, &valid_result);
   __ fstp(0);  // Drop result in st(0).
   int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
-  __ movq(rcx, kNaNValue, RelocInfo::NONE);
+  __ movq(rcx, kNaNValue, RelocInfo::NONE64);
   __ movq(Operand(rsp, kPointerSize), rcx);
   __ movsd(xmm0, Operand(rsp, kPointerSize));
   __ jmp(&return_result);
@@ -327,7 +327,7 @@
   STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
 
   Label loop, entry, convert_hole;
-  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
   // r15: the-hole NaN
   __ jmp(&entry);
 
@@ -425,7 +425,7 @@
   __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
 
   // Prepare for conversion loop.
-  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
   __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
   // rsi: the-hole NaN
   // rdi: pointer to the-hole
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index d32310a..c719bf8 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -85,7 +85,7 @@
     // There is room enough to write a long call instruction because we pad
     // LLazyBailout instructions with nops if necessary.
     CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
-    patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
+    patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE64);
     ASSERT(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
     ASSERT(call_address + patch_size() <= code->instruction_end());
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index e0aeb8e..97ec4b1 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -319,7 +319,7 @@
   __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
   __ movq(kScratchRegister,
           reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
-          RelocInfo::NONE);
+          RelocInfo::NONE64);
   __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
           kScratchRegister);
 }
@@ -3021,10 +3021,10 @@
     __ PrepareCallCFunction(2);
 #ifdef _WIN64
   __ movq(rcx, object);
-  __ movq(rdx, index, RelocInfo::NONE);
+  __ movq(rdx, index, RelocInfo::NONE64);
 #else
   __ movq(rdi, object);
-  __ movq(rsi, index, RelocInfo::NONE);
+  __ movq(rsi, index, RelocInfo::NONE64);
 #endif
     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 700ce3c..d64d4ff 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -164,7 +164,7 @@
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ Set(rax, slots);
-      __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
+      __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
       Label loop;
       __ bind(&loop);
       __ push(kScratchRegister);
@@ -1133,7 +1133,7 @@
       __ neg(reg1);
       DeoptimizeIf(zero, instr->environment());
     }
-    __ movq(reg2, multiplier, RelocInfo::NONE);
+    __ movq(reg2, multiplier, RelocInfo::NONE64);
     // Result just fit in r64, because it's int32 * uint32.
     __ imul(reg2, reg1);
 
@@ -1144,7 +1144,7 @@
 
 
 void LCodeGen::DoDivI(LDivI* instr) {
-  if (instr->hydrogen()->HasPowerOf2Divisor()) {
+  if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
     Register dividend = ToRegister(instr->left());
     int32_t divisor =
         HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -1191,13 +1191,13 @@
 
   // Check for x / 0.
   Register right_reg = ToRegister(right);
-  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+  if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
     __ testl(right_reg, right_reg);
     DeoptimizeIf(zero, instr->environment());
   }
 
   // Check for (0 / -x) that will produce negative zero.
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     Label left_not_zero;
     __ testl(left_reg, left_reg);
     __ j(not_zero, &left_not_zero, Label::kNear);
@@ -1207,7 +1207,7 @@
   }
 
   // Check for (kMinInt / -1).
-  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+  if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
     Label left_not_min_int;
     __ cmpl(left_reg, Immediate(kMinInt));
     __ j(not_zero, &left_not_min_int, Label::kNear);
@@ -1220,9 +1220,19 @@
   __ cdq();
   __ idivl(right_reg);
 
-  // Deoptimize if remainder is not 0.
-  __ testl(rdx, rdx);
-  DeoptimizeIf(not_zero, instr->environment());
+  if (!instr->is_flooring()) {
+    // Deoptimize if remainder is not 0.
+    __ testl(rdx, rdx);
+    DeoptimizeIf(not_zero, instr->environment());
+  } else {
+    Label done;
+    __ testl(rdx, rdx);
+    __ j(zero, &done, Label::kNear);
+    __ xorl(rdx, right_reg);
+    __ sarl(rdx, Immediate(31));
+    __ addl(rax, rdx);
+    __ bind(&done);
+  }
 }
 
 
@@ -1569,10 +1579,10 @@
     __ PrepareCallCFunction(2);
 #ifdef _WIN64
   __ movq(rcx, object);
-  __ movq(rdx, index, RelocInfo::NONE);
+  __ movq(rdx, index, RelocInfo::NONE64);
 #else
   __ movq(rdi, object);
-  __ movq(rsi, index, RelocInfo::NONE);
+  __ movq(rsi, index, RelocInfo::NONE64);
 #endif
     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1712,6 +1722,7 @@
       break;
     case Token::DIV:
       __ divsd(left, right);
+      __ movaps(left, left);
       break;
     case Token::MOD:
       __ PrepareCallCFunction(2);
@@ -2452,7 +2463,6 @@
     __ pop(rbp);
   }
   if (info()->IsStub()) {
-    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
     __ Ret(0, r10);
   } else {
     __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
@@ -3428,7 +3438,7 @@
 
   Label done;
   // xmm_scratch = 0.5
-  __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
+  __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE64);
   __ movq(xmm_scratch, kScratchRegister);
   Label below_half;
   __ ucomisd(xmm_scratch, input_reg);
@@ -3457,7 +3467,9 @@
     // Bailout if below -0.5, otherwise round to (positive) zero, even
     // if negative.
     // xmm_scrach = -0.5
-    __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
+    __ movq(kScratchRegister,
+            V8_INT64_C(0xBFE0000000000000),
+            RelocInfo::NONE64);
     __ movq(xmm_scratch, kScratchRegister);
     __ ucomisd(input_reg, xmm_scratch);
     DeoptimizeIf(below, instr->environment());
@@ -3486,7 +3498,7 @@
   Label done, sqrt;
   // Check base for -Infinity.  According to IEEE-754, double-precision
   // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
-  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
   __ movq(xmm_scratch, kScratchRegister);
   __ ucomisd(xmm_scratch, input_reg);
   // Comparing -Infinity with NaN results in "unordered", which sets the
@@ -4582,7 +4594,9 @@
     // Performs a truncating conversion of a floating point number as used by
     // the JS bitwise operations.
     __ cvttsd2siq(result_reg, input_reg);
-    __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
+    __ movq(kScratchRegister,
+            V8_INT64_C(0x8000000000000000),
+            RelocInfo::NONE64);
     __ cmpq(result_reg, kScratchRegister);
     DeoptimizeIf(equal, instr->environment());
   } else {
@@ -4983,7 +4997,7 @@
       __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
       __ movq(FieldOperand(result, total_offset), rcx);
     } else {
-      __ movq(rcx, value, RelocInfo::NONE);
+      __ movq(rcx, value, RelocInfo::NONE64);
       __ movq(FieldOperand(result, total_offset), rcx);
     }
   }
@@ -5005,7 +5019,7 @@
         int64_t value = double_array->get_representation(i);
         int total_offset =
             elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
-        __ movq(rcx, value, RelocInfo::NONE);
+        __ movq(rcx, value, RelocInfo::NONE64);
         __ movq(FieldOperand(result, total_offset), rcx);
       }
     } else if (elements->IsFixedArray()) {
@@ -5023,7 +5037,7 @@
           __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
           __ movq(FieldOperand(result, total_offset), rcx);
         } else {
-          __ movq(rcx, value, RelocInfo::NONE);
+          __ movq(rcx, value, RelocInfo::NONE64);
           __ movq(FieldOperand(result, total_offset), rcx);
         }
       }
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 574970c..85eeee1 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -114,7 +114,11 @@
   stream->Add("= ");
   for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    InputAt(i)->PrintTo(stream);
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
   }
 }
 
@@ -1006,7 +1010,14 @@
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+  // If there is a non-return use, the context must be allocated in a register.
+  for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+    if (!it.value()->IsReturn()) {
+      return DefineAsRegister(new(zone()) LContext);
+    }
+  }
+
+  return NULL;
 }
 
 
@@ -1228,12 +1239,31 @@
     return constant_val->CopyToRepresentation(Representation::Integer32(),
                                               divisor->block()->zone());
   }
+  // A value with an integer representation does not need to be transformed.
+  if (divisor->representation().IsInteger32()) {
+    return divisor;
+  // A change from an integer32 can be replaced by the integer32 value.
+  } else if (divisor->IsChange() &&
+             HChange::cast(divisor)->from().IsInteger32()) {
+    return HChange::cast(divisor)->value();
+  }
   return NULL;
 }
 
 
 LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
   HValue* right = instr->right();
+  if (!right->IsConstant()) {
+    ASSERT(right->representation().IsInteger32());
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into rdx.
+    LOperand* temp = FixedTemp(rdx);
+    LOperand* dividend = UseFixed(instr->left(), rax);
+    LOperand* divisor = UseRegister(instr->right());
+    LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
+    return AssignEnvironment(DefineFixed(flooring_div, rax));
+  }
+
   ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
   LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
   int32_t divisor_si = HConstant::cast(right)->Integer32Value();
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index f5f0250..9def5c5 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -573,6 +573,8 @@
   LOperand* right() { return inputs_[1]; }
   LOperand* temp() { return temps_[0]; }
 
+  bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
   DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
   DECLARE_HYDROGEN_ACCESSOR(Div)
 };
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 8513a68..0270a26 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -162,7 +162,7 @@
   int64_t address = reinterpret_cast<int64_t>(source.address());
   if (is_int32(address) && !Serializer::enabled()) {
     if (emit_debug_code()) {
-      movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+      movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
     }
     push(Immediate(static_cast<int32_t>(address)));
     return;
@@ -287,7 +287,7 @@
     ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
     intptr_t new_space_start =
         reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
-    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+    movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
     if (scratch.is(object)) {
       addq(scratch, kScratchRegister);
     } else {
@@ -342,8 +342,8 @@
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
   }
 }
 
@@ -376,8 +376,8 @@
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
   }
 }
 
@@ -442,8 +442,8 @@
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
   }
 }
 
@@ -524,11 +524,11 @@
   }
 #endif
   push(rax);
-  movq(kScratchRegister, p0, RelocInfo::NONE);
+  movq(kScratchRegister, p0, RelocInfo::NONE64);
   push(kScratchRegister);
   movq(kScratchRegister,
        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
-       RelocInfo::NONE);
+       RelocInfo::NONE64);
   push(kScratchRegister);
 
   if (!has_frame_) {
@@ -731,7 +731,7 @@
 
   // Call the api function!
   movq(rax, reinterpret_cast<int64_t>(function_address),
-       RelocInfo::RUNTIME_ENTRY);
+       RelocInfo::EXTERNAL_REFERENCE);
   call(rax);
 
   if (FLAG_log_timer_events) {
@@ -937,7 +937,7 @@
   } else if (is_int32(x)) {
     movq(dst, Immediate(static_cast<int32_t>(x)));
   } else {
-    movq(dst, x, RelocInfo::NONE);
+    movq(dst, x, RelocInfo::NONE64);
   }
 }
 
@@ -1002,7 +1002,7 @@
   if (emit_debug_code()) {
     movq(dst,
          reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
-         RelocInfo::NONE);
+         RelocInfo::NONE64);
     cmpq(dst, kSmiConstantRegister);
     if (allow_stub_calls()) {
       Assert(equal, "Uninitialized kSmiConstantRegister");
@@ -1049,7 +1049,7 @@
       UNREACHABLE();
       return;
     default:
-      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
       return;
   }
   if (negative) {
@@ -2927,7 +2927,7 @@
   cmpl(src, Immediate(0));
   movq(kScratchRegister,
        reinterpret_cast<int64_t>(&kUint32Bias),
-       RelocInfo::NONE);
+       RelocInfo::NONE64);
   movsd(scratch, Operand(kScratchRegister, 0));
   cvtlsi2sd(dst, src);
   j(not_sign, &done, Label::kNear);
@@ -3011,7 +3011,7 @@
 void MacroAssembler::AssertZeroExtended(Register int32_register) {
   if (emit_debug_code()) {
     ASSERT(!int32_register.is(kScratchRegister));
-    movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+    movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
     cmpq(kScratchRegister, int32_register);
     Check(above_equal, "32 bit value in register is not zero-extended");
   }
@@ -3758,6 +3758,7 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3777,6 +3778,13 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
 
+  // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+    testq(result, Immediate(kDoubleAlignmentMask));
+    Check(zero, "Allocation is not double aligned");
+  }
+
   // Calculate new top and bail out if new space is exhausted.
   ExternalReference new_space_allocation_limit =
       ExternalReference::new_space_allocation_limit_address(isolate());
@@ -3795,15 +3803,17 @@
   // Update allocation top.
   UpdateAllocationTopHelper(top_reg, scratch);
 
+  bool tag_result = (flags & TAG_OBJECT) != 0;
   if (top_reg.is(result)) {
-    if ((flags & TAG_OBJECT) != 0) {
+    if (tag_result) {
       subq(result, Immediate(object_size - kHeapObjectTag));
     } else {
       subq(result, Immediate(object_size));
     }
-  } else if ((flags & TAG_OBJECT) != 0) {
+  } else if (tag_result) {
     // Tag the result if requested.
-    addq(result, Immediate(kHeapObjectTag));
+    ASSERT(kHeapObjectTag == 1);
+    incq(result);
   }
 }
 
@@ -3816,6 +3826,7 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & SIZE_IN_WORDS) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3834,6 +3845,13 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
 
+  // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+    testq(result, Immediate(kDoubleAlignmentMask));
+    Check(zero, "Allocation is not double aligned");
+  }
+
   // Calculate new top and bail out if new space is exhausted.
   ExternalReference new_space_allocation_limit =
       ExternalReference::new_space_allocation_limit_address(isolate());
@@ -3852,7 +3870,8 @@
 
   // Tag the result if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    addq(result, Immediate(kHeapObjectTag));
+    ASSERT(kHeapObjectTag == 1);
+    incq(result);
   }
 }
 
@@ -3863,6 +3882,8 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
+                   SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1d56435..136f0f6 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -35,18 +35,6 @@
 namespace v8 {
 namespace internal {
 
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
-  // No special flags.
-  NO_ALLOCATION_FLAGS = 0,
-  // Return the pointer to the allocated already tagged as a heap object.
-  TAG_OBJECT = 1 << 0,
-  // The content of the result register already contains the allocation top in
-  // new space.
-  RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
@@ -385,7 +373,7 @@
   void InitializeSmiConstantRegister() {
     movq(kSmiConstantRegister,
          reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
-         RelocInfo::NONE);
+         RelocInfo::NONE64);
   }
 
   // Conversions between tagged smi values and non-tagged integer values.
@@ -1485,17 +1473,16 @@
 #define CODE_COVERAGE_STRINGIFY(x) #x
 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) {                                               \
-    byte* x64_coverage_function =                                         \
-        reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
-    masm->pushfd();                                                       \
-    masm->pushad();                                                       \
-    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
-    masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY);          \
-    masm->pop(rax);                                                       \
-    masm->popad();                                                        \
-    masm->popfd();                                                        \
-  }                                                                       \
+#define ACCESS_MASM(masm) {                                                  \
+    Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
+    masm->pushfq();                                                          \
+    masm->Pushad();                                                          \
+    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));            \
+    masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE);        \
+    masm->pop(rax);                                                          \
+    masm->Popad();                                                           \
+    masm->popfq();                                                           \
+  }                                                                          \
   masm->
 #else
 #define ACCESS_MASM(masm) masm->
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 6cb87e8..827ec28 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -280,7 +280,7 @@
             (static_cast<uint64_t>(str[i + 5]) << 40) ||
             (static_cast<uint64_t>(str[i + 6]) << 48) ||
             (static_cast<uint64_t>(str[i + 7]) << 56);
-        __ movq(rax, combined_chars, RelocInfo::NONE);
+        __ movq(rax, combined_chars, RelocInfo::NONE64);
         __ cmpq(rax, Operand(rbx, byte_offset + i));
         i += 8;
       } else if (i + 4 <= n) {
@@ -300,7 +300,7 @@
       ASSERT(mode_ == UC16);
       if (i + 4 <= n) {
         uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
-        __ movq(rax, combined_chars, RelocInfo::NONE);
+        __ movq(rax, combined_chars, RelocInfo::NONE64);
         __ cmpq(rax,
                 Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
         i += 4;
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 0329966..994b5bc 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2088,7 +2088,7 @@
   const int sign_mask_shift =
       (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
   __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
-          RelocInfo::NONE);
+          RelocInfo::NONE64);
   __ testq(rbx, rdi);
   __ j(not_zero, &negative_sign);
   __ ret(2 * kPointerSize);
@@ -3894,7 +3894,7 @@
     __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
                                    &restore_key_transition_elements_kind);
 
-    __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+    __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
     for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
       __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
     }
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index 59eeed9..044ebe4 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -404,7 +404,7 @@
   ASSERT(Smi::IsValid(result));
   __ movl(rax, Immediate(id));
   __ Move(r8, Smi::FromInt(static_cast<int>(result)));
-  __ movq(rcx, x, RelocInfo::NONE);
+  __ movq(rcx, x, RelocInfo::NONE64);
   __ movq(r11, rcx);
   __ Integer64PlusConstantToSmi(rdx, rcx, y);
   __ cmpq(rdx, r8);
@@ -2227,7 +2227,7 @@
   __ lea(r13, Operand(rbp, -3 * kPointerSize));
   __ lea(rbx, Operand(rbp, -5 * kPointerSize));
   __ movl(rcx, Immediate(2));
-  __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE);
+  __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE64);
   __ movl(rax, Immediate(1));
 
   Operand sp0 = Operand(rsp, 0);
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 087db2f..682b327 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -473,6 +473,10 @@
 }
 
 
+// The memory use computed this way is not entirely accurate and depends on
+// the way malloc allocates memory.  That's why the memory use may seem to
+// increase even though the sum of the allocated object sizes decreases.  It
+// also means that the memory use depends on the kernel and stdlib.
 static intptr_t MemoryInUse() {
   intptr_t memory_use = 0;
 
@@ -538,17 +542,18 @@
   if (initial_memory >= 0) {
     InitializeVM();
     intptr_t delta = MemoryInUse() - initial_memory;
-    if (sizeof(initial_memory) == 8) {
+    printf("delta: %" V8_PTR_PREFIX "d kB\n", delta / 1024);
+    if (sizeof(initial_memory) == 8) {  // 64-bit.
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(delta, 3600 * 1024);  // 3396.
+        CHECK_LE(delta, 3700 * 1024);
       } else {
-        CHECK_LE(delta, 4000 * 1024);  // 3948.
+        CHECK_LE(delta, 4200 * 1024);
       }
-    } else {
+    } else {                            // 32-bit.
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(delta, 2600 * 1024);  // 2400.
+        CHECK_LE(delta, 2600 * 1024);
       } else {
-        CHECK_LE(delta, 3000 * 1024);  // 2920.
+        CHECK_LE(delta, 3000 * 1024);
       }
     }
   }
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 652a60a..b7ce9ac 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -1,7 +1,7 @@
 // Copyright 2012 the V8 project authors. All rights reserved.
 
 // Check that we can traverse very deep stacks of ConsStrings using
-// StringInputBuffer.  Check that Get(int) works on very deep stacks
+// StringCharacterStram.  Check that Get(int) works on very deep stacks
 // of ConsStrings.  These operations may not be very fast, but they
 // should be possible without getting errors due to too deep recursion.
 
@@ -369,7 +369,7 @@
 void VerifyConsString(Handle<String> root, ConsStringGenerationData* data) {
   // Verify basic data.
   CHECK(root->IsConsString());
-  CHECK((unsigned)root->length() == data->stats_.chars_);
+  CHECK(static_cast<unsigned>(root->length()) == data->stats_.chars_);
   // Recursive verify.
   ConsStringStats stats;
   AccumulateStats(ConsString::cast(*root), &stats);
@@ -514,23 +514,16 @@
 }
 
 
-static StringInputBuffer buffer;
 static ConsStringIteratorOp cons_string_iterator_op_1;
 static ConsStringIteratorOp cons_string_iterator_op_2;
 
 static void Traverse(Handle<String> s1, Handle<String> s2) {
   int i = 0;
-  buffer.Reset(*s1);
-  StringCharacterStream character_stream_1(*s1, 0, &cons_string_iterator_op_1);
-  StringCharacterStream character_stream_2(*s2, 0, &cons_string_iterator_op_2);
-  StringInputBuffer buffer2(*s2);
-  while (buffer.has_more()) {
-    CHECK(buffer2.has_more());
-    CHECK(character_stream_1.HasMore());
+  StringCharacterStream character_stream_1(*s1, &cons_string_iterator_op_1);
+  StringCharacterStream character_stream_2(*s2, &cons_string_iterator_op_2);
+  while (character_stream_1.HasMore()) {
     CHECK(character_stream_2.HasMore());
-    uint16_t c = buffer.GetNext();
-    CHECK_EQ(c, buffer2.GetNext());
-    CHECK_EQ(c, character_stream_1.GetNext());
+    uint16_t c = character_stream_1.GetNext();
     CHECK_EQ(c, character_stream_2.GetNext());
     i++;
   }
@@ -543,17 +536,11 @@
 
 static void TraverseFirst(Handle<String> s1, Handle<String> s2, int chars) {
   int i = 0;
-  buffer.Reset(*s1);
-  StringInputBuffer buffer2(*s2);
-  StringCharacterStream character_stream_1(*s1, 0, &cons_string_iterator_op_1);
-  StringCharacterStream character_stream_2(*s2, 0, &cons_string_iterator_op_2);
-  while (buffer.has_more() && i < chars) {
-    CHECK(buffer2.has_more());
-    CHECK(character_stream_1.HasMore());
+  StringCharacterStream character_stream_1(*s1, &cons_string_iterator_op_1);
+  StringCharacterStream character_stream_2(*s2, &cons_string_iterator_op_2);
+  while (character_stream_1.HasMore() && i < chars) {
     CHECK(character_stream_2.HasMore());
-    uint16_t c = buffer.GetNext();
-    CHECK_EQ(c, buffer2.GetNext());
-    CHECK_EQ(c, character_stream_1.GetNext());
+    uint16_t c = character_stream_1.GetNext();
     CHECK_EQ(c, character_stream_2.GetNext());
     i++;
   }
@@ -621,9 +608,9 @@
     // Want to test the offset == length case.
     if (offset > length) offset = length;
     StringCharacterStream flat_stream(
-        flat_string, (unsigned) offset, &cons_string_iterator_op_1);
+        flat_string, &cons_string_iterator_op_1, static_cast<unsigned>(offset));
     StringCharacterStream cons_stream(
-        cons_string, (unsigned) offset, &cons_string_iterator_op_2);
+        cons_string, &cons_string_iterator_op_2, static_cast<unsigned>(offset));
     for (int i = offset; i < length; i++) {
       uint16_t c = flat_string->Get(i);
       CHECK(flat_stream.HasMore());
diff --git a/test/mjsunit/math-floor-of-div-nosudiv.js b/test/mjsunit/math-floor-of-div-nosudiv.js
index 7155e85..5baed2d 100644
--- a/test/mjsunit/math-floor-of-div-nosudiv.js
+++ b/test/mjsunit/math-floor-of-div-nosudiv.js
@@ -184,10 +184,38 @@
 %OptimizeFunctionOnNextCall(test_div);
 test_div();
 
+// Test for ia32/x64 flooring correctness.
+var values2 = [1, 3, 10, 99, 100, 101, 0x7fffffff];
+function test_div2() {
+  for (var i = 0; i < values2.length; i++) {
+    for (var j = 0; j < values2.length; j++) {
+      assertEquals(Math.floor(div((values2[i] | 0), (values2[j] | 0))),
+                   Math.floor((values2[i] | 0) / (values2[j] | 0)));
+      assertEquals(Math.floor(div(-(values2[i] | 0), (values2[j] | 0))),
+                   Math.floor(-(values2[i] | 0) / (values2[j] | 0)));
+      assertEquals(Math.floor(div((values2[i] | 0), -(values2[j] | 0))),
+                   Math.floor((values2[i] | 0) / -(values2[j] | 0)));
+      assertEquals(Math.floor(div(-(values2[i] | 0), -(values2[j] | 0))),
+                   Math.floor(-(values2[i] | 0) / -(values2[j] | 0)));
+    }
+  }
+}
+
+test_div2();
+%OptimizeFunctionOnNextCall(test_div2);
+test_div2();
+
+
 // Test for negative zero, overflow and division by 0.
 // Separate the tests to prevent deoptimizations from making the other optimized
 // test unreachable.
 
+// We box the value in an array to avoid constant propagation.
+var neg_one_in_array = [-1];
+var zero_in_array = [0];
+var min_int_in_array = [-2147483648];
+
+// Test for dividing by constant.
 function IsNegativeZero(x) {
   assertTrue(x == 0);  // Is 0 or -0.
   var y = 1 / x;
@@ -196,15 +224,12 @@
 }
 
 function test_div_deopt_minus_zero() {
-  var zero_in_array = [0];
   for (var i = 0; i < 2; ++i) {
     assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
   }
 }
 
 function test_div_deopt_overflow() {
-  // We box the value in an array to avoid constant propagation.
-  var min_int_in_array = [-2147483648];
   for (var i = 0; i < 2; ++i) {
     // We use '| 0' to force the representation to int32.
     assertEquals(-min_int_in_array[0],
@@ -228,3 +253,36 @@
 test_div_deopt_minus_zero();
 test_div_deopt_overflow();
 test_div_deopt_div_by_zero();
+
+// Test for dividing by variable.
+function test_div_deopt_minus_zero_v() {
+  for (var i = 0; i < 2; ++i) {
+    assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) /
+               neg_one_in_array[0])));
+  }
+}
+
+function test_div_deopt_overflow_v() {
+  for (var i = 0; i < 2; ++i) {
+    // We use '| 0' to force the representation to int32.
+    assertEquals(-min_int_in_array[0],
+                 Math.floor((min_int_in_array[0] | 0) / neg_one_in_array[0]));
+  }
+}
+
+function test_div_deopt_div_by_zero_v() {
+  for (var i = 0; i < 2; ++i) {
+    assertEquals(div(i, 0),
+                 Math.floor(i / zero_in_array[0]));
+  }
+}
+
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero_v);
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
diff --git a/test/mjsunit/math-floor-of-div.js b/test/mjsunit/math-floor-of-div.js
index 35bc3e4..c7ef289 100644
--- a/test/mjsunit/math-floor-of-div.js
+++ b/test/mjsunit/math-floor-of-div.js
@@ -184,10 +184,38 @@
 %OptimizeFunctionOnNextCall(test_div);
 test_div();
 
+// Test for ia32/x64 flooring correctness.
+var values2 = [1, 3, 10, 99, 100, 101, 0x7fffffff];
+function test_div2() {
+  for (var i = 0; i < values2.length; i++) {
+    for (var j = 0; j < values2.length; j++) {
+      assertEquals(Math.floor(div((values2[i] | 0), (values2[j] | 0))),
+                   Math.floor((values2[i] | 0) / (values2[j] | 0)));
+      assertEquals(Math.floor(div(-(values2[i] | 0), (values2[j] | 0))),
+                   Math.floor(-(values2[i] | 0) / (values2[j] | 0)));
+      assertEquals(Math.floor(div((values2[i] | 0), -(values2[j] | 0))),
+                   Math.floor((values2[i] | 0) / -(values2[j] | 0)));
+      assertEquals(Math.floor(div(-(values2[i] | 0), -(values2[j] | 0))),
+                   Math.floor(-(values2[i] | 0) / -(values2[j] | 0)));
+    }
+  }
+}
+
+test_div2();
+%OptimizeFunctionOnNextCall(test_div2);
+test_div2();
+
+
 // Test for negative zero, overflow and division by 0.
 // Separate the tests to prevent deoptimizations from making the other optimized
 // test unreachable.
 
+// We box the value in an array to avoid constant propagation.
+var neg_one_in_array = [-1];
+var zero_in_array = [0];
+var min_int_in_array = [-2147483648];
+
+// Test for dividing by constant.
 function IsNegativeZero(x) {
   assertTrue(x == 0);  // Is 0 or -0.
   var y = 1 / x;
@@ -196,15 +224,12 @@
 }
 
 function test_div_deopt_minus_zero() {
-  var zero_in_array = [0];
   for (var i = 0; i < 2; ++i) {
     assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
   }
 }
 
 function test_div_deopt_overflow() {
-  // We box the value in an array to avoid constant propagation.
-  var min_int_in_array = [-2147483648];
   for (var i = 0; i < 2; ++i) {
     // We use '| 0' to force the representation to int32.
     assertEquals(-min_int_in_array[0],
@@ -228,3 +253,36 @@
 test_div_deopt_minus_zero();
 test_div_deopt_overflow();
 test_div_deopt_div_by_zero();
+
+// Test for dividing by variable.
+function test_div_deopt_minus_zero_v() {
+  for (var i = 0; i < 2; ++i) {
+    assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) /
+               neg_one_in_array[0])));
+  }
+}
+
+function test_div_deopt_overflow_v() {
+  for (var i = 0; i < 2; ++i) {
+    // We use '| 0' to force the representation to int32.
+    assertEquals(-min_int_in_array[0],
+                 Math.floor((min_int_in_array[0] | 0) / neg_one_in_array[0]));
+  }
+}
+
+function test_div_deopt_div_by_zero_v() {
+  for (var i = 0; i < 2; ++i) {
+    assertEquals(div(i, 0),
+                 Math.floor(i / zero_in_array[0]));
+  }
+}
+
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero_v);
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
diff --git a/tools/gen-postmortem-metadata.py b/tools/gen-postmortem-metadata.py
index 7dbefde..0acb658 100644
--- a/tools/gen-postmortem-metadata.py
+++ b/tools/gen-postmortem-metadata.py
@@ -76,16 +76,15 @@
     { 'name': 'SmiTag',                 'value': 'kSmiTag' },
     { 'name': 'SmiTagMask',             'value': 'kSmiTagMask' },
     { 'name': 'SmiValueShift',          'value': 'kSmiTagSize' },
+    { 'name': 'SmiShiftSize',           'value': 'kSmiShiftSize' },
     { 'name': 'PointerSizeLog2',        'value': 'kPointerSizeLog2' },
 
-    { 'name': 'prop_idx_transitions',
-        'value': 'DescriptorArray::kTransitionsIndex' },
     { 'name': 'prop_idx_first',
         'value': 'DescriptorArray::kFirstIndex' },
     { 'name': 'prop_type_field',
         'value': 'FIELD' },
     { 'name': 'prop_type_first_phantom',
-        'value': 'MAP_TRANSITION' },
+        'value': 'TRANSITION' },
     { 'name': 'prop_type_mask',
         'value': 'PropertyDetails::TypeField::kMask' },
 
@@ -107,7 +106,6 @@
     'JSObject, elements, Object, kElementsOffset',
     'FixedArray, data, uintptr_t, kHeaderSize',
     'Map, instance_attributes, int, kInstanceAttributesOffset',
-    'Map, instance_descriptors, int, kInstanceDescriptorsOrBitField3Offset',
     'Map, inobject_properties, int, kInObjectPropertiesOffset',
     'Map, instance_size, int, kInstanceSizeOffset',
     'HeapNumber, value, double, kValueOffset',