Upgrade to 3.29

Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.

Bug: 17370214

Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/builtins.cc b/src/builtins.cc
index 01e88f5..d0c19e5 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1,41 +1,22 @@
 // Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
 
-#include "v8.h"
+#include "src/v8.h"
 
-#include "api.h"
-#include "arguments.h"
-#include "bootstrapper.h"
-#include "builtins.h"
-#include "gdb-jit.h"
-#include "ic-inl.h"
-#include "heap-profiler.h"
-#include "mark-compact.h"
-#include "vm-state-inl.h"
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/base/once.h"
+#include "src/bootstrapper.h"
+#include "src/builtins.h"
+#include "src/cpu-profiler.h"
+#include "src/gdb-jit.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap-profiler.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/prototype.h"
+#include "src/vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -50,12 +31,12 @@
       : Arguments(length, arguments) { }
 
   Object*& operator[] (int index) {
-    ASSERT(index < length());
+    DCHECK(index < length());
     return Arguments::operator[](index);
   }
 
   template <class S> Handle<S> at(int index) {
-    ASSERT(index < length());
+    DCHECK(index < length());
     return Arguments::at<S>(index);
   }
 
@@ -78,7 +59,7 @@
 #ifdef DEBUG
   void Verify() {
     // Check we have at least the receiver.
-    ASSERT(Arguments::length() >= 1);
+    DCHECK(Arguments::length() >= 1);
   }
 #endif
 };
@@ -95,7 +76,7 @@
 template <>
 void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
   // Check we have at least the receiver and the called function.
-  ASSERT(Arguments::length() >= 2);
+  DCHECK(Arguments::length() >= 2);
   // Make sure cast to JSFunction succeeds.
   called_function();
 }
@@ -124,37 +105,43 @@
 
 #ifdef DEBUG
 
-#define BUILTIN(name)                                      \
-  MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
-      name##ArgumentsType args, Isolate* isolate);         \
-  MUST_USE_RESULT static MaybeObject* Builtin_##name(      \
-      name##ArgumentsType args, Isolate* isolate) {        \
-    ASSERT(isolate == Isolate::Current());                 \
-    args.Verify();                                         \
-    return Builtin_Impl_##name(args, isolate);             \
-  }                                                        \
-  MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+#define BUILTIN(name)                                            \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(            \
+      name##ArgumentsType args, Isolate* isolate);               \
+  MUST_USE_RESULT static Object* Builtin_##name(                 \
+      int args_length, Object** args_object, Isolate* isolate) { \
+    name##ArgumentsType args(args_length, args_object);          \
+    args.Verify();                                               \
+    return Builtin_Impl_##name(args, isolate);                   \
+  }                                                              \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(            \
       name##ArgumentsType args, Isolate* isolate)
 
 #else  // For release mode.
 
-#define BUILTIN(name)                                      \
-  static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
-
+#define BUILTIN(name)                                            \
+  static Object* Builtin_impl##name(                             \
+      name##ArgumentsType args, Isolate* isolate);               \
+  static Object* Builtin_##name(                                 \
+      int args_length, Object** args_object, Isolate* isolate) { \
+    name##ArgumentsType args(args_length, args_object);          \
+    return Builtin_impl##name(args, isolate);                    \
+  }                                                              \
+  static Object* Builtin_impl##name(                             \
+      name##ArgumentsType args, Isolate* isolate)
 #endif
 
 
-static inline bool CalledAsConstructor(Isolate* isolate) {
 #ifdef DEBUG
+static inline bool CalledAsConstructor(Isolate* isolate) {
   // Calculate the result using a full stack frame iterator and check
   // that the state of the stack is as we assume it to be in the
   // code below.
-  StackFrameIterator it;
-  ASSERT(it.frame()->is_exit());
+  StackFrameIterator it(isolate);
+  DCHECK(it.frame()->is_exit());
   it.Advance();
   StackFrame* frame = it.frame();
   bool reference_result = frame->is_construct();
-#endif
   Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
   // Because we know fp points to an exit frame we can use the relevant
   // part of ExitFrame::ComputeCallerState directly.
@@ -168,9 +155,11 @@
   const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
   Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
   bool result = (marker == kConstructMarker);
-  ASSERT_EQ(result, reference_result);
+  DCHECK_EQ(result, reference_result);
   return result;
 }
+#endif
+
 
 // ----------------------------------------------------------------------------
 
@@ -185,485 +174,380 @@
 }
 
 
-static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
-                                           Isolate* isolate,
-                                           JSFunction* constructor) {
-  Heap* heap = isolate->heap();
-  isolate->counters()->array_function_runtime()->Increment();
-
-  JSArray* array;
-  if (CalledAsConstructor(isolate)) {
-    array = JSArray::cast((*args)[0]);
-    // Initialize elements and length in case later allocations fail so that the
-    // array object is initialized in a valid state.
-    array->set_length(Smi::FromInt(0));
-    array->set_elements(heap->empty_fixed_array());
-    if (!FLAG_smi_only_arrays) {
-      Context* global_context = isolate->context()->global_context();
-      if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
-          !global_context->object_js_array_map()->IsUndefined()) {
-        array->set_map(Map::cast(global_context->object_js_array_map()));
-      }
-    }
-  } else {
-    // Allocate the JS Array
-    MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
-    if (!maybe_obj->To(&array)) return maybe_obj;
-  }
-
-  // Optimize the case where there is one argument and the argument is a
-  // small smi.
-  if (args->length() == 2) {
-    Object* obj = (*args)[1];
-    if (obj->IsSmi()) {
-      int len = Smi::cast(obj)->value();
-      if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
-        Object* fixed_array;
-        { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
-          if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
-        }
-        // We do not use SetContent to skip the unnecessary elements type check.
-        array->set_elements(FixedArray::cast(fixed_array));
-        array->set_length(Smi::cast(obj));
-        return array;
-      }
-    }
-    // Take the argument as the length.
-    { MaybeObject* maybe_obj = array->Initialize(0);
-      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    }
-    return array->SetElementsLength((*args)[1]);
-  }
-
-  // Optimize the case where there are no parameters passed.
-  if (args->length() == 1) {
-    return array->Initialize(JSArray::kPreallocatedArrayElements);
-  }
-
-  // Set length and elements on the array.
-  int number_of_elements = args->length() - 1;
-  MaybeObject* maybe_object =
-      array->EnsureCanContainElements(args, 1, number_of_elements,
-                                      ALLOW_CONVERTED_DOUBLE_ELEMENTS);
-  if (maybe_object->IsFailure()) return maybe_object;
-
-  // Allocate an appropriately typed elements array.
-  MaybeObject* maybe_elms;
-  ElementsKind elements_kind = array->GetElementsKind();
-  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-    maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
-        number_of_elements);
-  } else {
-    maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
-  }
-  FixedArrayBase* elms;
-  if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
-
-  // Fill in the content
-  switch (array->GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS: {
-      FixedArray* smi_elms = FixedArray::cast(elms);
-      for (int index = 0; index < number_of_elements; index++) {
-        smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
-      }
-      break;
-    }
-    case FAST_ELEMENTS: {
-      AssertNoAllocation no_gc;
-      WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-      FixedArray* object_elms = FixedArray::cast(elms);
-      for (int index = 0; index < number_of_elements; index++) {
-        object_elms->set(index, (*args)[index+1], mode);
-      }
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS: {
-      FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
-      for (int index = 0; index < number_of_elements; index++) {
-        double_elms->set(index, (*args)[index+1]->Number());
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  array->set_elements(elms);
-  array->set_length(Smi::FromInt(number_of_elements));
-  return array;
-}
-
-
-BUILTIN(InternalArrayCodeGeneric) {
-  return ArrayCodeGenericCommon(
-      &args,
-      isolate,
-      isolate->context()->global_context()->internal_array_function());
-}
-
-
-BUILTIN(ArrayCodeGeneric) {
-  return ArrayCodeGenericCommon(
-      &args,
-      isolate,
-      isolate->context()->global_context()->array_function());
-}
-
-
-static void MoveElements(Heap* heap,
-                         AssertNoAllocation* no_gc,
-                         FixedArray* dst,
-                         int dst_index,
-                         FixedArray* src,
-                         int src_index,
-                         int len) {
+static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index,
+                               FixedDoubleArray* src, int src_index, int len) {
   if (len == 0) return;
-  ASSERT(dst->map() != HEAP->fixed_cow_array_map());
-  memmove(dst->data_start() + dst_index,
-          src->data_start() + src_index,
-          len * kPointerSize);
-  WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
-  if (mode == UPDATE_WRITE_BARRIER) {
-    heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
-  }
-  heap->incremental_marking()->RecordWrites(dst);
-}
-
-
-static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
-  ASSERT(dst->map() != heap->fixed_cow_array_map());
-  MemsetPointer(dst->data_start() + from, heap->the_hole_value(), to - from);
-}
-
-
-static FixedArray* LeftTrimFixedArray(Heap* heap,
-                                      FixedArray* elms,
-                                      int to_trim) {
-  ASSERT(elms->map() != HEAP->fixed_cow_array_map());
-  // For now this trick is only applied to fixed arrays in new and paged space.
-  // In large object space the object's start must coincide with chunk
-  // and thus the trick is just not applicable.
-  ASSERT(!HEAP->lo_space()->Contains(elms));
-
-  STATIC_ASSERT(FixedArray::kMapOffset == 0);
-  STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
-  STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
-
-  Object** former_start = HeapObject::RawField(elms, 0);
-
-  const int len = elms->length();
-
-  if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
-      !heap->new_space()->Contains(elms)) {
-    // If we are doing a big trim in old space then we zap the space that was
-    // formerly part of the array so that the GC (aided by the card-based
-    // remembered set) won't find pointers to new-space there.
-    Object** zap = reinterpret_cast<Object**>(elms->address());
-    zap++;  // Header of filler must be at least one word so skip that.
-    for (int i = 1; i < to_trim; i++) {
-      *zap++ = Smi::FromInt(0);
-    }
-  }
-  // Technically in new space this write might be omitted (except for
-  // debug mode which iterates through the heap), but to play safer
-  // we still do it.
-  heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
-
-  former_start[to_trim] = heap->fixed_array_map();
-  former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
-
-  // Maintain marking consistency for HeapObjectIterator and
-  // IncrementalMarking.
-  int size_delta = to_trim * kPointerSize;
-  if (heap->marking()->TransferMark(elms->address(),
-                                    elms->address() + size_delta)) {
-    MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
-  }
-
-  HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
-                                     elms->address() + size_delta));
-  return FixedArray::cast(HeapObject::FromAddress(
-      elms->address() + to_trim * kPointerSize));
+  MemMove(dst->data_start() + dst_index, src->data_start() + src_index,
+          len * kDoubleSize);
 }
 
 
 static bool ArrayPrototypeHasNoElements(Heap* heap,
-                                        Context* global_context,
+                                        Context* native_context,
                                         JSObject* array_proto) {
+  DisallowHeapAllocation no_gc;
   // This method depends on non writability of Object and Array prototype
   // fields.
   if (array_proto->elements() != heap->empty_fixed_array()) return false;
   // Object.prototype
-  Object* proto = array_proto->GetPrototype();
-  if (proto == heap->null_value()) return false;
-  array_proto = JSObject::cast(proto);
-  if (array_proto != global_context->initial_object_prototype()) return false;
+  PrototypeIterator iter(heap->isolate(), array_proto);
+  if (iter.IsAtEnd()) {
+    return false;
+  }
+  array_proto = JSObject::cast(iter.GetCurrent());
+  if (array_proto != native_context->initial_object_prototype()) return false;
   if (array_proto->elements() != heap->empty_fixed_array()) return false;
-  return array_proto->GetPrototype()->IsNull();
+  iter.Advance();
+  return iter.IsAtEnd();
 }
 
 
+// Returns empty handle if not applicable.
 MUST_USE_RESULT
-static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
-    Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
-  if (!receiver->IsJSArray()) return NULL;
-  JSArray* array = JSArray::cast(receiver);
-  HeapObject* elms = array->elements();
+static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
+    Isolate* isolate,
+    Handle<Object> receiver,
+    Arguments* args,
+    int first_added_arg) {
+  if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  // If there may be elements accessors in the prototype chain, the fast path
+  // cannot be used if there arguments to add to the array.
+  if (args != NULL && array->map()->DictionaryElementsInPrototypeChainOnly()) {
+    return MaybeHandle<FixedArrayBase>();
+  }
+  if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
+  if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>();
+  Handle<FixedArrayBase> elms(array->elements(), isolate);
+  Heap* heap = isolate->heap();
   Map* map = elms->map();
   if (map == heap->fixed_array_map()) {
-    if (args == NULL || array->HasFastElements()) return elms;
-    if (array->HasFastDoubleElements()) {
-      ASSERT(elms == heap->empty_fixed_array());
-      MaybeObject* maybe_transition =
-          array->TransitionElementsKind(FAST_ELEMENTS);
-      if (maybe_transition->IsFailure()) return maybe_transition;
-      return elms;
-    }
+    if (args == NULL || array->HasFastObjectElements()) return elms;
   } else if (map == heap->fixed_cow_array_map()) {
-    MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
-    if (args == NULL || array->HasFastElements() ||
-        maybe_writable_result->IsFailure()) {
-      return maybe_writable_result;
-    }
+    elms = JSObject::EnsureWritableFastElements(array);
+    if (args == NULL || array->HasFastObjectElements()) return elms;
+  } else if (map == heap->fixed_double_array_map()) {
+    if (args == NULL) return elms;
   } else {
-    return NULL;
+    return MaybeHandle<FixedArrayBase>();
   }
 
   // Need to ensure that the arguments passed in args can be contained in
   // the array.
   int args_length = args->length();
-  if (first_added_arg >= args_length) return array->elements();
+  if (first_added_arg >= args_length) return handle(array->elements(), isolate);
 
-  MaybeObject* maybe_array = array->EnsureCanContainElements(
-      args,
-      first_added_arg,
-      args_length - first_added_arg,
-      DONT_ALLOW_DOUBLE_ELEMENTS);
-  if (maybe_array->IsFailure()) return maybe_array;
-  return array->elements();
+  ElementsKind origin_kind = array->map()->elements_kind();
+  DCHECK(!IsFastObjectElementsKind(origin_kind));
+  ElementsKind target_kind = origin_kind;
+  {
+    DisallowHeapAllocation no_gc;
+    int arg_count = args->length() - first_added_arg;
+    Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
+    for (int i = 0; i < arg_count; i++) {
+      Object* arg = arguments[i];
+      if (arg->IsHeapObject()) {
+        if (arg->IsHeapNumber()) {
+          target_kind = FAST_DOUBLE_ELEMENTS;
+        } else {
+          target_kind = FAST_ELEMENTS;
+          break;
+        }
+      }
+    }
+  }
+  if (target_kind != origin_kind) {
+    JSObject::TransitionElementsKind(array, target_kind);
+    return handle(array->elements(), isolate);
+  }
+  return elms;
 }
 
 
 static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
                                                      JSArray* receiver) {
   if (!FLAG_clever_optimizations) return false;
-  Context* global_context = heap->isolate()->context()->global_context();
+  DisallowHeapAllocation no_gc;
+  Context* native_context = heap->isolate()->context()->native_context();
   JSObject* array_proto =
-      JSObject::cast(global_context->array_function()->prototype());
-  return receiver->GetPrototype() == array_proto &&
-         ArrayPrototypeHasNoElements(heap, global_context, array_proto);
+      JSObject::cast(native_context->array_function()->prototype());
+  PrototypeIterator iter(heap->isolate(), receiver);
+  return iter.GetCurrent() == array_proto &&
+         ArrayPrototypeHasNoElements(heap, native_context, array_proto);
 }
 
 
-MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
+MUST_USE_RESULT static Object* CallJsBuiltin(
     Isolate* isolate,
     const char* name,
     BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
   HandleScope handleScope(isolate);
 
-  Handle<Object> js_builtin =
-      GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
-                  name);
+  Handle<Object> js_builtin = Object::GetProperty(
+      isolate,
+      handle(isolate->native_context()->builtins(), isolate),
+      name).ToHandleChecked();
   Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
   int argc = args.length() - 1;
   ScopedVector<Handle<Object> > argv(argc);
   for (int i = 0; i < argc; ++i) {
     argv[i] = args.at<Object>(i + 1);
   }
-  bool pending_exception;
-  Handle<Object> result = Execution::Call(function,
-                                          args.receiver(),
-                                          argc,
-                                          argv.start(),
-                                          &pending_exception);
-  if (pending_exception) return Failure::Exception();
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      Execution::Call(isolate,
+                      function,
+                      args.receiver(),
+                      argc,
+                      argv.start()));
   return *result;
 }
 
 
 BUILTIN(ArrayPush) {
-  Heap* heap = isolate->heap();
-  Object* receiver = *args.receiver();
-  Object* elms_obj;
-  { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
-    if (maybe_elms_obj == NULL) {
-      return CallJsBuiltin(isolate, "ArrayPush", args);
-    }
-    if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  MaybeHandle<FixedArrayBase> maybe_elms_obj =
+      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
+  Handle<FixedArrayBase> elms_obj;
+  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+    return CallJsBuiltin(isolate, "ArrayPush", args);
   }
-  FixedArray* elms = FixedArray::cast(elms_obj);
-  JSArray* array = JSArray::cast(receiver);
 
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
-  if (to_add == 0) {
-    return Smi::FromInt(len);
+  if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
+    return CallJsBuiltin(isolate, "ArrayPush", args);
   }
-  // Currently fixed arrays cannot grow too big, so
-  // we should never hit this case.
-  ASSERT(to_add <= (Smi::kMaxValue - len));
+  DCHECK(!array->map()->is_observed());
 
-  int new_length = len + to_add;
+  ElementsKind kind = array->GetElementsKind();
 
-  if (new_length > elms->length()) {
-    // New backing storage is needed.
-    int capacity = new_length + (new_length >> 1) + 16;
-    Object* obj;
-    { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
-      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  if (IsFastSmiOrObjectElementsKind(kind)) {
+    Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+    if (to_add == 0) {
+      return Smi::FromInt(len);
     }
-    FixedArray* new_elms = FixedArray::cast(obj);
+    // Currently fixed arrays cannot grow too big, so
+    // we should never hit this case.
+    DCHECK(to_add <= (Smi::kMaxValue - len));
 
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                               new_elms, FAST_ELEMENTS, 0, len);
-    FillWithHoles(heap, new_elms, new_length, capacity);
+    int new_length = len + to_add;
 
-    elms = new_elms;
+    if (new_length > elms->length()) {
+      // New backing storage is needed.
+      int capacity = new_length + (new_length >> 1) + 16;
+      Handle<FixedArray> new_elms =
+          isolate->factory()->NewUninitializedFixedArray(capacity);
+
+      ElementsAccessor* accessor = array->GetElementsAccessor();
+      accessor->CopyElements(
+          elms_obj, 0, kind, new_elms, 0,
+          ElementsAccessor::kCopyToEndAndInitializeToHole);
+
+      elms = new_elms;
+    }
+
+    // Add the provided values.
+    DisallowHeapAllocation no_gc;
+    WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+    for (int index = 0; index < to_add; index++) {
+      elms->set(index + len, args[index + 1], mode);
+    }
+
+    if (*elms != array->elements()) {
+      array->set_elements(*elms);
+    }
+
+    // Set the length.
+    array->set_length(Smi::FromInt(new_length));
+    return Smi::FromInt(new_length);
+  } else {
+    int elms_len = elms_obj->length();
+    if (to_add == 0) {
+      return Smi::FromInt(len);
+    }
+    // Currently fixed arrays cannot grow too big, so
+    // we should never hit this case.
+    DCHECK(to_add <= (Smi::kMaxValue - len));
+
+    int new_length = len + to_add;
+
+    Handle<FixedDoubleArray> new_elms;
+
+    if (new_length > elms_len) {
+      // New backing storage is needed.
+      int capacity = new_length + (new_length >> 1) + 16;
+      // Create new backing store; since capacity > 0, we can
+      // safely cast to FixedDoubleArray.
+      new_elms = Handle<FixedDoubleArray>::cast(
+          isolate->factory()->NewFixedDoubleArray(capacity));
+
+      ElementsAccessor* accessor = array->GetElementsAccessor();
+      accessor->CopyElements(
+          elms_obj, 0, kind, new_elms, 0,
+          ElementsAccessor::kCopyToEndAndInitializeToHole);
+
+    } else {
+      // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
+      // empty_fixed_array.
+      new_elms = Handle<FixedDoubleArray>::cast(elms_obj);
+    }
+
+    // Add the provided values.
+    DisallowHeapAllocation no_gc;
+    int index;
+    for (index = 0; index < to_add; index++) {
+      Object* arg = args[index + 1];
+      new_elms->set(index + len, arg->Number());
+    }
+
+    if (*new_elms != array->elements()) {
+      array->set_elements(*new_elms);
+    }
+
+    // Set the length.
+    array->set_length(Smi::FromInt(new_length));
+    return Smi::FromInt(new_length);
   }
-
-  // Add the provided values.
-  AssertNoAllocation no_gc;
-  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-  for (int index = 0; index < to_add; index++) {
-    elms->set(index + len, args[index + 1], mode);
-  }
-
-  if (elms != array->elements()) {
-    array->set_elements(elms);
-  }
-
-  // Set the length.
-  array->set_length(Smi::FromInt(new_length));
-  return Smi::FromInt(new_length);
 }
 
 
 BUILTIN(ArrayPop) {
-  Heap* heap = isolate->heap();
-  Object* receiver = *args.receiver();
-  Object* elms_obj;
-  { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
-    if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
-    if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  MaybeHandle<FixedArrayBase> maybe_elms_obj =
+      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+  Handle<FixedArrayBase> elms_obj;
+  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
+    return CallJsBuiltin(isolate, "ArrayPop", args);
   }
-  FixedArray* elms = FixedArray::cast(elms_obj);
-  JSArray* array = JSArray::cast(receiver);
+
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  DCHECK(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
-  if (len == 0) return heap->undefined_value();
+  if (len == 0) return isolate->heap()->undefined_value();
 
-  // Get top element
-  MaybeObject* top = elms->get(len - 1);
-
-  // Set the length.
-  array->set_length(Smi::FromInt(len - 1));
-
-  if (!top->IsTheHole()) {
-    // Delete the top element.
-    elms->set_the_hole(len - 1);
-    return top;
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  int new_length = len - 1;
+  Handle<Object> element =
+      accessor->Get(array, array, new_length, elms_obj).ToHandleChecked();
+  if (element->IsTheHole()) {
+    return CallJsBuiltin(isolate, "ArrayPop", args);
   }
-
-  top = array->GetPrototype()->GetElement(len - 1);
-
-  return top;
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate,
+      accessor->SetLength(array, handle(Smi::FromInt(new_length), isolate)));
+  return *element;
 }
 
 
 BUILTIN(ArrayShift) {
+  HandleScope scope(isolate);
   Heap* heap = isolate->heap();
-  Object* receiver = *args.receiver();
-  Object* elms_obj;
-  { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
-    if (maybe_elms_obj == NULL)
-        return CallJsBuiltin(isolate, "ArrayShift", args);
-    if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
-  }
-  if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+  Handle<Object> receiver = args.receiver();
+  MaybeHandle<FixedArrayBase> maybe_elms_obj =
+      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+  Handle<FixedArrayBase> elms_obj;
+  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+      !IsJSArrayFastElementMovingAllowed(heap,
+                                         *Handle<JSArray>::cast(receiver))) {
     return CallJsBuiltin(isolate, "ArrayShift", args);
   }
-  FixedArray* elms = FixedArray::cast(elms_obj);
-  JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastTypeElements());
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  DCHECK(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
 
   // Get first element
-  Object* first = elms->get(0);
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  Handle<Object> first =
+    accessor->Get(array, array, 0, elms_obj).ToHandleChecked();
   if (first->IsTheHole()) {
-    first = heap->undefined_value();
+    return CallJsBuiltin(isolate, "ArrayShift", args);
   }
 
-  if (!heap->lo_space()->Contains(elms)) {
-    array->set_elements(LeftTrimFixedArray(heap, elms, 1));
+  if (heap->CanMoveObjectStart(*elms_obj)) {
+    array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1));
   } else {
     // Shift the elements.
-    AssertNoAllocation no_gc;
-    MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
-    elms->set(len - 1, heap->the_hole_value());
+    if (elms_obj->IsFixedArray()) {
+      Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+      DisallowHeapAllocation no_gc;
+      heap->MoveElements(*elms, 0, 1, len - 1);
+      elms->set(len - 1, heap->the_hole_value());
+    } else {
+      Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
+      MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
+      elms->set_the_hole(len - 1);
+    }
   }
 
   // Set the length.
   array->set_length(Smi::FromInt(len - 1));
 
-  return first;
+  return *first;
 }
 
 
 BUILTIN(ArrayUnshift) {
+  HandleScope scope(isolate);
   Heap* heap = isolate->heap();
-  Object* receiver = *args.receiver();
-  Object* elms_obj;
-  { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
-    if (maybe_elms_obj == NULL)
-        return CallJsBuiltin(isolate, "ArrayUnshift", args);
-    if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
-  }
-  if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+  Handle<Object> receiver = args.receiver();
+  MaybeHandle<FixedArrayBase> maybe_elms_obj =
+      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+  Handle<FixedArrayBase> elms_obj;
+  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+      !IsJSArrayFastElementMovingAllowed(heap,
+                                         *Handle<JSArray>::cast(receiver))) {
     return CallJsBuiltin(isolate, "ArrayUnshift", args);
   }
-  FixedArray* elms = FixedArray::cast(elms_obj);
-  JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastTypeElements());
-
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  DCHECK(!array->map()->is_observed());
+  if (!array->HasFastSmiOrObjectElements()) {
+    return CallJsBuiltin(isolate, "ArrayUnshift", args);
+  }
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
   int new_length = len + to_add;
   // Currently fixed arrays cannot grow too big, so
   // we should never hit this case.
-  ASSERT(to_add <= (Smi::kMaxValue - len));
+  DCHECK(to_add <= (Smi::kMaxValue - len));
 
-  MaybeObject* maybe_object =
-      array->EnsureCanContainElements(&args, 1, to_add,
-                                      DONT_ALLOW_DOUBLE_ELEMENTS);
-  if (maybe_object->IsFailure()) return maybe_object;
+  if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
+    return CallJsBuiltin(isolate, "ArrayUnshift", args);
+  }
+
+  Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+
+  JSObject::EnsureCanContainElements(array, &args, 1, to_add,
+                                     DONT_ALLOW_DOUBLE_ELEMENTS);
 
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
-    Object* obj;
-    { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
-      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    }
-    FixedArray* new_elms = FixedArray::cast(obj);
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                               new_elms, FAST_ELEMENTS, to_add, len);
-    FillWithHoles(heap, new_elms, new_length, capacity);
+    Handle<FixedArray> new_elms =
+        isolate->factory()->NewUninitializedFixedArray(capacity);
+
+    ElementsKind kind = array->GetElementsKind();
+    ElementsAccessor* accessor = array->GetElementsAccessor();
+    accessor->CopyElements(
+        elms, 0, kind, new_elms, to_add,
+        ElementsAccessor::kCopyToEndAndInitializeToHole);
+
     elms = new_elms;
-    array->set_elements(elms);
+    array->set_elements(*elms);
   } else {
-    AssertNoAllocation no_gc;
-    MoveElements(heap, &no_gc, elms, to_add, elms, 0, len);
+    DisallowHeapAllocation no_gc;
+    heap->MoveElements(*elms, to_add, 0, len);
   }
 
   // Add the provided values.
-  AssertNoAllocation no_gc;
+  DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < to_add; i++) {
     elms->set(i, args[i + 1], mode);
@@ -676,70 +560,98 @@
 
 
 BUILTIN(ArraySlice) {
+  HandleScope scope(isolate);
   Heap* heap = isolate->heap();
-  Object* receiver = *args.receiver();
-  FixedArray* elms;
+  Handle<Object> receiver = args.receiver();
   int len = -1;
-  if (receiver->IsJSArray()) {
-    JSArray* array = JSArray::cast(receiver);
-    if (!array->HasFastTypeElements() ||
-        !IsJSArrayFastElementMovingAllowed(heap, array)) {
-      return CallJsBuiltin(isolate, "ArraySlice", args);
-    }
+  int relative_start = 0;
+  int relative_end = 0;
+  {
+    DisallowHeapAllocation no_gc;
+    if (receiver->IsJSArray()) {
+      JSArray* array = JSArray::cast(*receiver);
+      if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArraySlice", args);
+      }
 
-    elms = FixedArray::cast(array->elements());
-    len = Smi::cast(array->length())->value();
-  } else {
-    // Array.slice(arguments, ...) is quite a common idiom (notably more
-    // than 50% of invocations in Web apps).  Treat it in C++ as well.
-    Map* arguments_map =
-        isolate->context()->global_context()->arguments_boilerplate()->map();
+      if (!array->HasFastElements()) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArraySlice", args);
+      }
 
-    bool is_arguments_object_with_fast_elements =
-        receiver->IsJSObject()
-        && JSObject::cast(receiver)->map() == arguments_map
-        && JSObject::cast(receiver)->HasFastTypeElements();
-    if (!is_arguments_object_with_fast_elements) {
-      return CallJsBuiltin(isolate, "ArraySlice", args);
-    }
-    elms = FixedArray::cast(JSObject::cast(receiver)->elements());
-    Object* len_obj = JSObject::cast(receiver)
-        ->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
-    if (!len_obj->IsSmi()) {
-      return CallJsBuiltin(isolate, "ArraySlice", args);
-    }
-    len = Smi::cast(len_obj)->value();
-    if (len > elms->length()) {
-      return CallJsBuiltin(isolate, "ArraySlice", args);
-    }
-    for (int i = 0; i < len; i++) {
-      if (elms->get(i) == heap->the_hole_value()) {
+      len = Smi::cast(array->length())->value();
+    } else {
+      // Array.slice(arguments, ...) is quite a common idiom (notably more
+      // than 50% of invocations in Web apps).  Treat it in C++ as well.
+      Map* arguments_map =
+          isolate->context()->native_context()->sloppy_arguments_map();
+
+      bool is_arguments_object_with_fast_elements =
+          receiver->IsJSObject() &&
+          JSObject::cast(*receiver)->map() == arguments_map;
+      if (!is_arguments_object_with_fast_elements) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArraySlice", args);
+      }
+      JSObject* object = JSObject::cast(*receiver);
+
+      if (!object->HasFastElements()) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArraySlice", args);
+      }
+
+      Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+      if (!len_obj->IsSmi()) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArraySlice", args);
+      }
+      len = Smi::cast(len_obj)->value();
+      if (len > object->elements()->length()) {
+        AllowHeapAllocation allow_allocation;
         return CallJsBuiltin(isolate, "ArraySlice", args);
       }
     }
-  }
-  ASSERT(len >= 0);
-  int n_arguments = args.length() - 1;
 
-  // Note carefully choosen defaults---if argument is missing,
-  // it's undefined which gets converted to 0 for relative_start
-  // and to len for relative_end.
-  int relative_start = 0;
-  int relative_end = len;
-  if (n_arguments > 0) {
-    Object* arg1 = args[1];
-    if (arg1->IsSmi()) {
-      relative_start = Smi::cast(arg1)->value();
-    } else if (!arg1->IsUndefined()) {
-      return CallJsBuiltin(isolate, "ArraySlice", args);
-    }
-    if (n_arguments > 1) {
-      Object* arg2 = args[2];
-      if (arg2->IsSmi()) {
-        relative_end = Smi::cast(arg2)->value();
-      } else if (!arg2->IsUndefined()) {
+    DCHECK(len >= 0);
+    int n_arguments = args.length() - 1;
+
+    // Note carefully choosen defaults---if argument is missing,
+    // it's undefined which gets converted to 0 for relative_start
+    // and to len for relative_end.
+    relative_start = 0;
+    relative_end = len;
+    if (n_arguments > 0) {
+      Object* arg1 = args[1];
+      if (arg1->IsSmi()) {
+        relative_start = Smi::cast(arg1)->value();
+      } else if (arg1->IsHeapNumber()) {
+        double start = HeapNumber::cast(arg1)->value();
+        if (start < kMinInt || start > kMaxInt) {
+          AllowHeapAllocation allow_allocation;
+          return CallJsBuiltin(isolate, "ArraySlice", args);
+        }
+        relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
+      } else if (!arg1->IsUndefined()) {
+        AllowHeapAllocation allow_allocation;
         return CallJsBuiltin(isolate, "ArraySlice", args);
       }
+      if (n_arguments > 1) {
+        Object* arg2 = args[2];
+        if (arg2->IsSmi()) {
+          relative_end = Smi::cast(arg2)->value();
+        } else if (arg2->IsHeapNumber()) {
+          double end = HeapNumber::cast(arg2)->value();
+          if (end < kMinInt || end > kMaxInt) {
+            AllowHeapAllocation allow_allocation;
+            return CallJsBuiltin(isolate, "ArraySlice", args);
+          }
+          relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
+        } else if (!arg2->IsUndefined()) {
+          AllowHeapAllocation allow_allocation;
+          return CallJsBuiltin(isolate, "ArraySlice", args);
+        }
+      }
     }
   }
 
@@ -751,42 +663,58 @@
   int final = (relative_end < 0) ? Max(len + relative_end, 0)
                                  : Min(relative_end, len);
 
-  ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
-
   // Calculate the length of result array.
   int result_len = Max(final - k, 0);
 
-  MaybeObject* maybe_array =
-      heap->AllocateJSArrayAndStorage(elements_kind,
-                                      result_len,
-                                      result_len);
-  JSArray* result_array;
-  if (!maybe_array->To(&result_array)) return maybe_array;
+  Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+  Handle<FixedArrayBase> elms(object->elements(), isolate);
 
-  CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
-                             FixedArray::cast(result_array->elements()),
-                             FAST_ELEMENTS, 0, result_len);
+  ElementsKind kind = object->GetElementsKind();
+  if (IsHoleyElementsKind(kind)) {
+    DisallowHeapAllocation no_gc;
+    bool packed = true;
+    ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+    for (int i = k; i < final; i++) {
+      if (!accessor->HasElement(object, object, i, elms)) {
+        packed = false;
+        break;
+      }
+    }
+    if (packed) {
+      kind = GetPackedElementsKind(kind);
+    } else if (!receiver->IsJSArray()) {
+      AllowHeapAllocation allow_allocation;
+      return CallJsBuiltin(isolate, "ArraySlice", args);
+    }
+  }
 
-  return result_array;
+  Handle<JSArray> result_array =
+      isolate->factory()->NewJSArray(kind, result_len, result_len);
+
+  DisallowHeapAllocation no_gc;
+  if (result_len == 0) return *result_array;
+
+  ElementsAccessor* accessor = object->GetElementsAccessor();
+  accessor->CopyElements(
+      elms, k, kind, handle(result_array->elements(), isolate), 0, result_len);
+  return *result_array;
 }
 
 
 BUILTIN(ArraySplice) {
+  HandleScope scope(isolate);
   Heap* heap = isolate->heap();
-  Object* receiver = *args.receiver();
-  Object* elms_obj;
-  { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
-    if (maybe_elms_obj == NULL)
-        return CallJsBuiltin(isolate, "ArraySplice", args);
-    if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
-  }
-  if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+  Handle<Object> receiver = args.receiver();
+  MaybeHandle<FixedArrayBase> maybe_elms_obj =
+      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
+  Handle<FixedArrayBase> elms_obj;
+  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+      !IsJSArrayFastElementMovingAllowed(heap,
+                                         *Handle<JSArray>::cast(receiver))) {
     return CallJsBuiltin(isolate, "ArraySplice", args);
   }
-  FixedArray* elms = FixedArray::cast(elms_obj);
-  JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastTypeElements());
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  DCHECK(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
 
@@ -794,10 +722,19 @@
 
   int relative_start = 0;
   if (n_arguments > 0) {
+    DisallowHeapAllocation no_gc;
     Object* arg1 = args[1];
     if (arg1->IsSmi()) {
       relative_start = Smi::cast(arg1)->value();
+    } else if (arg1->IsHeapNumber()) {
+      double start = HeapNumber::cast(arg1)->value();
+      if (start < kMinInt || start > kMaxInt) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArraySplice", args);
+      }
+      relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
     } else if (!arg1->IsUndefined()) {
+      AllowHeapAllocation allow_allocation;
       return CallJsBuiltin(isolate, "ArraySplice", args);
     }
   }
@@ -811,208 +748,309 @@
   // compatibility.
   int actual_delete_count;
   if (n_arguments == 1) {
-    ASSERT(len - actual_start >= 0);
+    DCHECK(len - actual_start >= 0);
     actual_delete_count = len - actual_start;
   } else {
     int value = 0;  // ToInteger(undefined) == 0
     if (n_arguments > 1) {
+      DisallowHeapAllocation no_gc;
       Object* arg2 = args[2];
       if (arg2->IsSmi()) {
         value = Smi::cast(arg2)->value();
       } else {
+        AllowHeapAllocation allow_allocation;
         return CallJsBuiltin(isolate, "ArraySplice", args);
       }
     }
     actual_delete_count = Min(Max(value, 0), len - actual_start);
   }
 
-  JSArray* result_array = NULL;
-  ElementsKind elements_kind =
-      JSObject::cast(receiver)->GetElementsKind();
-  MaybeObject* maybe_array =
-      heap->AllocateJSArrayAndStorage(elements_kind,
-                                      actual_delete_count,
-                                      actual_delete_count);
-  if (!maybe_array->To(&result_array)) return maybe_array;
-
-  {
-    // Fill newly created array.
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
-                               FixedArray::cast(result_array->elements()),
-                               FAST_ELEMENTS, 0, actual_delete_count);
-  }
+  ElementsKind elements_kind = array->GetElementsKind();
 
   int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
   int new_length = len - actual_delete_count + item_count;
 
+  // For double mode we do not support changing the length.
+  if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
+    return CallJsBuiltin(isolate, "ArraySplice", args);
+  }
+
+  if (new_length == 0) {
+    Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
+        elms_obj, elements_kind, actual_delete_count);
+    array->set_elements(heap->empty_fixed_array());
+    array->set_length(Smi::FromInt(0));
+    return *result;
+  }
+
+  Handle<JSArray> result_array =
+      isolate->factory()->NewJSArray(elements_kind,
+                                     actual_delete_count,
+                                     actual_delete_count);
+
+  if (actual_delete_count > 0) {
+    DisallowHeapAllocation no_gc;
+    ElementsAccessor* accessor = array->GetElementsAccessor();
+    accessor->CopyElements(
+        elms_obj, actual_start, elements_kind,
+        handle(result_array->elements(), isolate), 0, actual_delete_count);
+  }
+
   bool elms_changed = false;
   if (item_count < actual_delete_count) {
     // Shrink the array.
-    const bool trim_array = !heap->lo_space()->Contains(elms) &&
+    const bool trim_array = !heap->lo_space()->Contains(*elms_obj) &&
       ((actual_start + item_count) <
           (len - actual_delete_count - actual_start));
     if (trim_array) {
       const int delta = actual_delete_count - item_count;
 
-      {
-        AssertNoAllocation no_gc;
-        MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
+      if (elms_obj->IsFixedDoubleArray()) {
+        Handle<FixedDoubleArray> elms =
+            Handle<FixedDoubleArray>::cast(elms_obj);
+        MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
+      } else {
+        Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+        DisallowHeapAllocation no_gc;
+        heap->MoveElements(*elms, delta, 0, actual_start);
       }
 
-      elms = LeftTrimFixedArray(heap, elms, delta);
-
+      if (heap->CanMoveObjectStart(*elms_obj)) {
+        // On the fast path we move the start of the object in memory.
+        elms_obj = handle(heap->LeftTrimFixedArray(*elms_obj, delta));
+      } else {
+        // This is the slow path. We are going to move the elements to the left
+        // by copying them. For trimmed values we store the hole.
+        if (elms_obj->IsFixedDoubleArray()) {
+          Handle<FixedDoubleArray> elms =
+              Handle<FixedDoubleArray>::cast(elms_obj);
+          MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
+          elms->FillWithHoles(len - delta, len);
+        } else {
+          Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+          DisallowHeapAllocation no_gc;
+          heap->MoveElements(*elms, 0, delta, len - delta);
+          elms->FillWithHoles(len - delta, len);
+        }
+      }
       elms_changed = true;
     } else {
-      AssertNoAllocation no_gc;
-      MoveElements(heap, &no_gc,
-                   elms, actual_start + item_count,
-                   elms, actual_start + actual_delete_count,
-                   (len - actual_delete_count - actual_start));
-      FillWithHoles(heap, elms, new_length, len);
+      if (elms_obj->IsFixedDoubleArray()) {
+        Handle<FixedDoubleArray> elms =
+            Handle<FixedDoubleArray>::cast(elms_obj);
+        MoveDoubleElements(*elms, actual_start + item_count,
+                           *elms, actual_start + actual_delete_count,
+                           (len - actual_delete_count - actual_start));
+        elms->FillWithHoles(new_length, len);
+      } else {
+        Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+        DisallowHeapAllocation no_gc;
+        heap->MoveElements(*elms, actual_start + item_count,
+                           actual_start + actual_delete_count,
+                           (len - actual_delete_count - actual_start));
+        elms->FillWithHoles(new_length, len);
+      }
     }
   } else if (item_count > actual_delete_count) {
+    Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
     // Currently fixed arrays cannot grow too big, so
     // we should never hit this case.
-    ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
+    DCHECK((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
 
     // Check if array need to grow.
     if (new_length > elms->length()) {
       // New backing storage is needed.
       int capacity = new_length + (new_length >> 1) + 16;
-      Object* obj;
-      { MaybeObject* maybe_obj =
-            heap->AllocateUninitializedFixedArray(capacity);
-        if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-      }
-      FixedArray* new_elms = FixedArray::cast(obj);
+      Handle<FixedArray> new_elms =
+          isolate->factory()->NewUninitializedFixedArray(capacity);
 
-      {
+      DisallowHeapAllocation no_gc;
+
+      ElementsKind kind = array->GetElementsKind();
+      ElementsAccessor* accessor = array->GetElementsAccessor();
+      if (actual_start > 0) {
         // Copy the part before actual_start as is.
-        CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                                   new_elms, FAST_ELEMENTS, 0, actual_start);
-        const int to_copy = len - actual_delete_count - actual_start;
-        CopyObjectToObjectElements(elms, FAST_ELEMENTS,
-                                   actual_start + actual_delete_count,
-                                   new_elms, FAST_ELEMENTS,
-                                   actual_start + item_count, to_copy);
+        accessor->CopyElements(
+            elms, 0, kind, new_elms, 0, actual_start);
       }
+      accessor->CopyElements(
+          elms, actual_start + actual_delete_count, kind,
+          new_elms, actual_start + item_count,
+          ElementsAccessor::kCopyToEndAndInitializeToHole);
 
-      FillWithHoles(heap, new_elms, new_length, capacity);
-
-      elms = new_elms;
+      elms_obj = new_elms;
       elms_changed = true;
     } else {
-      AssertNoAllocation no_gc;
-      MoveElements(heap, &no_gc,
-                   elms, actual_start + item_count,
-                   elms, actual_start + actual_delete_count,
-                   (len - actual_delete_count - actual_start));
+      DisallowHeapAllocation no_gc;
+      heap->MoveElements(*elms, actual_start + item_count,
+                         actual_start + actual_delete_count,
+                         (len - actual_delete_count - actual_start));
     }
   }
 
-  AssertNoAllocation no_gc;
-  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-  for (int k = actual_start; k < actual_start + item_count; k++) {
-    elms->set(k, args[3 + k - actual_start], mode);
+  if (IsFastDoubleElementsKind(elements_kind)) {
+    Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
+    for (int k = actual_start; k < actual_start + item_count; k++) {
+      Object* arg = args[3 + k - actual_start];
+      if (arg->IsSmi()) {
+        elms->set(k, Smi::cast(arg)->value());
+      } else {
+        elms->set(k, HeapNumber::cast(arg)->value());
+      }
+    }
+  } else {
+    Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+    DisallowHeapAllocation no_gc;
+    WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+    for (int k = actual_start; k < actual_start + item_count; k++) {
+      elms->set(k, args[3 + k - actual_start], mode);
+    }
   }
 
   if (elms_changed) {
-    array->set_elements(elms);
+    array->set_elements(*elms_obj);
   }
-
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
 
-  return result_array;
+  return *result_array;
 }
 
 
 BUILTIN(ArrayConcat) {
-  Heap* heap = isolate->heap();
-  Context* global_context = isolate->context()->global_context();
-  JSObject* array_proto =
-      JSObject::cast(global_context->array_function()->prototype());
-  if (!ArrayPrototypeHasNoElements(heap, global_context, array_proto)) {
-    return CallJsBuiltin(isolate, "ArrayConcat", args);
-  }
+  HandleScope scope(isolate);
 
-  // Iterate through all the arguments performing checks
-  // and calculating total length.
   int n_arguments = args.length();
   int result_len = 0;
-  ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
-  for (int i = 0; i < n_arguments; i++) {
-    Object* arg = args[i];
-    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
-        || JSArray::cast(arg)->GetPrototype() != array_proto) {
-      return CallJsBuiltin(isolate, "ArrayConcat", args);
+  ElementsKind elements_kind = GetInitialFastElementsKind();
+  bool has_double = false;
+  {
+    DisallowHeapAllocation no_gc;
+    Heap* heap = isolate->heap();
+    Context* native_context = isolate->context()->native_context();
+    JSObject* array_proto =
+        JSObject::cast(native_context->array_function()->prototype());
+    if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
+      AllowHeapAllocation allow_allocation;
+      return CallJsBuiltin(isolate, "ArrayConcatJS", args);
     }
 
-    int len = Smi::cast(JSArray::cast(arg)->length())->value();
+    // Iterate through all the arguments performing checks
+    // and calculating total length.
+    bool is_holey = false;
+    for (int i = 0; i < n_arguments; i++) {
+      Object* arg = args[i];
+      PrototypeIterator iter(isolate, arg);
+      if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() ||
+          iter.GetCurrent() != array_proto) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArrayConcatJS", args);
+      }
+      int len = Smi::cast(JSArray::cast(arg)->length())->value();
 
-    // We shouldn't overflow when adding another len.
-    const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
-    STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
-    USE(kHalfOfMaxInt);
-    result_len += len;
-    ASSERT(result_len >= 0);
+      // We shouldn't overflow when adding another len.
+      const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+      STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+      USE(kHalfOfMaxInt);
+      result_len += len;
+      DCHECK(result_len >= 0);
 
-    if (result_len > FixedArray::kMaxLength) {
-      return CallJsBuiltin(isolate, "ArrayConcat", args);
+      if (result_len > FixedDoubleArray::kMaxLength) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsBuiltin(isolate, "ArrayConcatJS", args);
+      }
+
+      ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+      has_double = has_double || IsFastDoubleElementsKind(arg_kind);
+      is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
+      if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
+        elements_kind = arg_kind;
+      }
     }
-
-    if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
-      elements_kind = FAST_ELEMENTS;
-    }
+    if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
   }
 
-  // Allocate result.
-  JSArray* result_array;
-  MaybeObject* maybe_array =
-      heap->AllocateJSArrayAndStorage(elements_kind,
-                                      result_len,
-                                      result_len);
-  if (!maybe_array->To(&result_array)) return maybe_array;
-  if (result_len == 0) return result_array;
+  // If a double array is concatted into a fast elements array, the fast
+  // elements array needs to be initialized to contain proper holes, since
+  // boxing doubles may cause incremental marking.
+  ArrayStorageAllocationMode mode =
+      has_double && IsFastObjectElementsKind(elements_kind)
+      ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
+  Handle<JSArray> result_array =
+      isolate->factory()->NewJSArray(elements_kind,
+                                     result_len,
+                                     result_len,
+                                     mode);
+  if (result_len == 0) return *result_array;
 
-  // Copy data.
-  int start_pos = 0;
-  FixedArray* result_elms(FixedArray::cast(result_array->elements()));
+  int j = 0;
+  Handle<FixedArrayBase> storage(result_array->elements(), isolate);
+  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
   for (int i = 0; i < n_arguments; i++) {
+    // TODO(ishell): It is crucial to keep |array| as a raw pointer to avoid
+    // performance degradation. Revisit this later.
     JSArray* array = JSArray::cast(args[i]);
     int len = Smi::cast(array->length())->value();
-    FixedArray* elms = FixedArray::cast(array->elements());
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                               result_elms, FAST_ELEMENTS,
-                               start_pos, len);
-    start_pos += len;
+    ElementsKind from_kind = array->GetElementsKind();
+    if (len > 0) {
+      accessor->CopyElements(array, 0, from_kind, storage, j, len);
+      j += len;
+    }
   }
-  ASSERT(start_pos == result_len);
 
-  return result_array;
+  DCHECK(j == result_len);
+
+  return *result_array;
 }
 
 
 // -----------------------------------------------------------------------------
-// Strict mode poison pills
+// Generator and strict mode poison pills
 
 
 BUILTIN(StrictModePoisonPill) {
-  HandleScope scope;
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_poison_pill", HandleVector<Object>(NULL, 0)));
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError("strict_poison_pill", HandleVector<Object>(NULL, 0)));
 }
 
+
+BUILTIN(GeneratorPoisonPill) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError("generator_poison_pill", HandleVector<Object>(NULL, 0)));
+}
+
+
 // -----------------------------------------------------------------------------
 //
 
 
+// Searches the hidden prototype chain of the given object for the first
+// object that is an instance of the given type.  If no such object can
+// be found then Heap::null_value() is returned.
+static inline Object* FindHidden(Heap* heap,
+                                 Object* object,
+                                 FunctionTemplateInfo* type) {
+  for (PrototypeIterator iter(heap->isolate(), object,
+                              PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+    if (type->IsTemplateFor(iter.GetCurrent())) {
+      return iter.GetCurrent();
+    }
+  }
+  return heap->null_value();
+}
+
+
 // Returns the holder JSObject if the function can legally be called
 // with this receiver.  Returns Heap::null_value() if the call is
 // illegal.  Any arguments that don't fit the expected type is
-// overwritten with undefined.  Arguments that do fit the expected
-// type is overwritten with the object in the prototype chain that
-// actually has that type.
+// overwritten with undefined.  Note that holder and the arguments are
+// implicitly rewritten with the first object in the hidden prototype
+// chain that actually has the expected type.
 static inline Object* TypeCheck(Heap* heap,
                                 int argc,
                                 Object** argv,
@@ -1025,15 +1063,10 @@
   SignatureInfo* sig = SignatureInfo::cast(sig_obj);
   // If necessary, check the receiver
   Object* recv_type = sig->receiver();
-
   Object* holder = recv;
   if (!recv_type->IsUndefined()) {
-    for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
-      if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
-        break;
-      }
-    }
-    if (holder == heap->null_value()) return holder;
+    holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
+    if (holder == heap->null_value()) return heap->null_value();
   }
   Object* args_obj = sig->args();
   // If there is no argument signature we're done
@@ -1046,86 +1079,78 @@
     if (argtype->IsUndefined()) continue;
     Object** arg = &argv[-1 - i];
     Object* current = *arg;
-    for (; current != heap->null_value(); current = current->GetPrototype()) {
-      if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
-        *arg = current;
-        break;
-      }
-    }
-    if (current == heap->null_value()) *arg = heap->undefined_value();
+    current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
+    if (current == heap->null_value()) current = heap->undefined_value();
+    *arg = current;
   }
   return holder;
 }
 
 
 template <bool is_construct>
-MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
+MUST_USE_RESULT static Object* HandleApiCallHelper(
     BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
-  ASSERT(is_construct == CalledAsConstructor(isolate));
+  DCHECK(is_construct == CalledAsConstructor(isolate));
   Heap* heap = isolate->heap();
 
   HandleScope scope(isolate);
   Handle<JSFunction> function = args.called_function();
-  ASSERT(function->shared()->IsApiFunction());
+  DCHECK(function->shared()->IsApiFunction());
 
-  FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
+  Handle<FunctionTemplateInfo> fun_data(
+      function->shared()->get_api_func_data(), isolate);
   if (is_construct) {
-    Handle<FunctionTemplateInfo> desc(fun_data, isolate);
-    bool pending_exception = false;
-    isolate->factory()->ConfigureInstance(
-        desc, Handle<JSObject>::cast(args.receiver()), &pending_exception);
-    ASSERT(isolate->has_pending_exception() == pending_exception);
-    if (pending_exception) return Failure::Exception();
-    fun_data = *desc;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, fun_data,
+        isolate->factory()->ConfigureInstance(
+            fun_data, Handle<JSObject>::cast(args.receiver())));
   }
 
-  Object* raw_holder = TypeCheck(heap, args.length(), &args[0], fun_data);
+  SharedFunctionInfo* shared = function->shared();
+  if (shared->strict_mode() == SLOPPY && !shared->native()) {
+    Object* recv = args[0];
+    DCHECK(!recv->IsNull());
+    if (recv->IsUndefined()) args[0] = function->global_proxy();
+  }
+
+  Object* raw_holder = TypeCheck(heap, args.length(), &args[0], *fun_data);
 
   if (raw_holder->IsNull()) {
     // This function cannot be called with the given receiver.  Abort!
-    Handle<Object> obj =
-        isolate->factory()->NewTypeError(
-            "illegal_invocation", HandleVector(&function, 1));
-    return isolate->Throw(*obj);
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError("illegal_invocation", HandleVector(&function, 1)));
   }
 
   Object* raw_call_data = fun_data->call_code();
   if (!raw_call_data->IsUndefined()) {
     CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
     Object* callback_obj = call_data->callback();
-    v8::InvocationCallback callback =
-        v8::ToCData<v8::InvocationCallback>(callback_obj);
+    v8::FunctionCallback callback =
+        v8::ToCData<v8::FunctionCallback>(callback_obj);
     Object* data_obj = call_data->data();
     Object* result;
 
     LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
-    ASSERT(raw_holder->IsJSObject());
+    DCHECK(raw_holder->IsJSObject());
 
-    CustomArguments custom(isolate);
-    v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
-        data_obj, *function, raw_holder);
+    FunctionCallbackArguments custom(isolate,
+                                     data_obj,
+                                     *function,
+                                     raw_holder,
+                                     &args[0] - 1,
+                                     args.length() - 1,
+                                     is_construct);
 
-    v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
-        custom.end(),
-        &args[0] - 1,
-        args.length() - 1,
-        is_construct);
-
-    v8::Handle<v8::Value> value;
-    {
-      // Leaving JavaScript.
-      VMState state(isolate, EXTERNAL);
-      ExternalCallbackScope call_scope(isolate,
-                                       v8::ToCData<Address>(callback_obj));
-      value = callback(new_args);
-    }
+    v8::Handle<v8::Value> value = custom.Call(callback);
     if (value.IsEmpty()) {
       result = heap->undefined_value();
     } else {
       result = *reinterpret_cast<Object**>(*value);
+      result->VerifyApiCallResultType();
     }
 
-    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
     if (!is_construct || result->IsJSObject()) return result;
   }
 
@@ -1143,78 +1168,16 @@
 }
 
 
-#ifdef DEBUG
-
-static void VerifyTypeCheck(Handle<JSObject> object,
-                            Handle<JSFunction> function) {
-  ASSERT(function->shared()->IsApiFunction());
-  FunctionTemplateInfo* info = function->shared()->get_api_func_data();
-  if (info->signature()->IsUndefined()) return;
-  SignatureInfo* signature = SignatureInfo::cast(info->signature());
-  Object* receiver_type = signature->receiver();
-  if (receiver_type->IsUndefined()) return;
-  FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
-  ASSERT(object->IsInstanceOf(type));
-}
-
-#endif
-
-
-BUILTIN(FastHandleApiCall) {
-  ASSERT(!CalledAsConstructor(isolate));
-  Heap* heap = isolate->heap();
-  const bool is_construct = false;
-
-  // We expect four more arguments: callback, function, call data, and holder.
-  const int args_length = args.length() - 4;
-  ASSERT(args_length >= 0);
-
-  Object* callback_obj = args[args_length];
-
-  v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
-      &args[args_length + 1],
-      &args[0] - 1,
-      args_length - 1,
-      is_construct);
-
-#ifdef DEBUG
-  VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
-                  Utils::OpenHandle(*new_args.Callee()));
-#endif
-  HandleScope scope(isolate);
-  Object* result;
-  v8::Handle<v8::Value> value;
-  {
-    // Leaving JavaScript.
-    VMState state(isolate, EXTERNAL);
-    ExternalCallbackScope call_scope(isolate,
-                                     v8::ToCData<Address>(callback_obj));
-    v8::InvocationCallback callback =
-        v8::ToCData<v8::InvocationCallback>(callback_obj);
-
-    value = callback(new_args);
-  }
-  if (value.IsEmpty()) {
-    result = heap->undefined_value();
-  } else {
-    result = *reinterpret_cast<Object**>(*value);
-  }
-
-  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
-  return result;
-}
-
-
 // Helper function to handle calls to non-function objects created through the
 // API. The object can be called as either a constructor (using new) or just as
 // a function (without new).
-MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
+MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
     Isolate* isolate,
     bool is_construct_call,
     BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
   // Non-functions are never called as constructors. Even if this is an object
   // called as a constructor the delegate call is not a construct call.
-  ASSERT(!CalledAsConstructor(isolate));
+  DCHECK(!CalledAsConstructor(isolate));
   Heap* heap = isolate->heap();
 
   Handle<Object> receiver = args.receiver();
@@ -1224,16 +1187,16 @@
 
   // Get the invocation callback from the function descriptor that was
   // used to create the called object.
-  ASSERT(obj->map()->has_instance_call_handler());
+  DCHECK(obj->map()->has_instance_call_handler());
   JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
-  ASSERT(constructor->shared()->IsApiFunction());
+  DCHECK(constructor->shared()->IsApiFunction());
   Object* handler =
       constructor->shared()->get_api_func_data()->instance_call_handler();
-  ASSERT(!handler->IsUndefined());
+  DCHECK(!handler->IsUndefined());
   CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
   Object* callback_obj = call_data->callback();
-  v8::InvocationCallback callback =
-      v8::ToCData<v8::InvocationCallback>(callback_obj);
+  v8::FunctionCallback callback =
+      v8::ToCData<v8::FunctionCallback>(callback_obj);
 
   // Get the data for the call and perform the callback.
   Object* result;
@@ -1241,30 +1204,23 @@
     HandleScope scope(isolate);
     LOG(isolate, ApiObjectAccess("call non-function", obj));
 
-    CustomArguments custom(isolate);
-    v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
-        call_data->data(), constructor, obj);
-    v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
-        custom.end(),
-        &args[0] - 1,
-        args.length() - 1,
-        is_construct_call);
-    v8::Handle<v8::Value> value;
-    {
-      // Leaving JavaScript.
-      VMState state(isolate, EXTERNAL);
-      ExternalCallbackScope call_scope(isolate,
-                                       v8::ToCData<Address>(callback_obj));
-      value = callback(new_args);
-    }
+    FunctionCallbackArguments custom(isolate,
+                                     call_data->data(),
+                                     constructor,
+                                     obj,
+                                     &args[0] - 1,
+                                     args.length() - 1,
+                                     is_construct_call);
+    v8::Handle<v8::Value> value = custom.Call(callback);
     if (value.IsEmpty()) {
       result = heap->undefined_value();
     } else {
       result = *reinterpret_cast<Object**>(*value);
+      result->VerifyApiCallResultType();
     }
   }
   // Check for exceptions and return result.
-  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
   return result;
 }
 
@@ -1283,48 +1239,23 @@
 }
 
 
-static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
-  LoadIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
-  LoadIC::GenerateStringLength(masm, false);
-}
-
-
-static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
-  LoadIC::GenerateStringLength(masm, true);
-}
-
-
-static void Generate_LoadIC_FunctionPrototype(MacroAssembler* masm) {
-  LoadIC::GenerateFunctionPrototype(masm);
-}
-
-
-static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
-  LoadIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_LoadIC_PreMonomorphic(MacroAssembler* masm) {
-  LoadIC::GeneratePreMonomorphic(masm);
-}
-
-
 static void Generate_LoadIC_Miss(MacroAssembler* masm) {
   LoadIC::GenerateMiss(masm);
 }
 
 
-static void Generate_LoadIC_Megamorphic(MacroAssembler* masm) {
-  LoadIC::GenerateMegamorphic(masm);
+static void Generate_LoadIC_Normal(MacroAssembler* masm) {
+  LoadIC::GenerateNormal(masm);
 }
 
 
-static void Generate_LoadIC_Normal(MacroAssembler* masm) {
-  LoadIC::GenerateNormal(masm);
+static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
+  NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
+}
+
+
+static void Generate_LoadIC_Slow(MacroAssembler* masm) {
+  LoadIC::GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -1339,12 +1270,7 @@
 
 
 static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMiss(masm, false);
-}
-
-
-static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMiss(masm, true);
+  KeyedLoadIC::GenerateMiss(masm);
 }
 
 
@@ -1362,23 +1288,6 @@
   KeyedLoadIC::GeneratePreMonomorphic(masm);
 }
 
-static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateIndexedInterceptor(masm);
-}
-
-static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateNonStrictArguments(masm);
-}
-
-static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
-  StoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
-  StoreIC::GenerateInitialize(masm);
-}
-
 
 static void Generate_StoreIC_Miss(MacroAssembler* masm) {
   StoreIC::GenerateMiss(masm);
@@ -1390,63 +1299,33 @@
 }
 
 
-static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
-  StoreIC::GenerateNormal(masm);
-}
-
-
-static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
-  StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
-  StoreIC::GenerateMegamorphic(masm, kStrictMode);
-}
-
-
-static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
-  StoreIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
-  StoreIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
-  StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
-  StoreIC::GenerateGlobalProxy(masm, kStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
-}
-
-
-static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMiss(masm, false);
-}
-
-
-static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMiss(masm, true);
+static void Generate_StoreIC_Slow(MacroAssembler* masm) {
+  NamedStoreHandlerCompiler::GenerateSlow(masm);
 }
 
 
 static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateSlow(masm);
+  ElementHandlerCompiler::GenerateStoreSlow(masm);
+}
+
+
+static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
+  NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
+}
+
+
+static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateGeneric(masm, STRICT);
+}
+
+
+static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateMiss(masm);
 }
 
 
@@ -1459,80 +1338,86 @@
   KeyedStoreIC::GenerateInitialize(masm);
 }
 
-static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateNonStrictArguments(masm);
+
+static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
+  KeyedStoreIC::GeneratePreMonomorphic(masm);
 }
 
-static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
+
+static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
+  KeyedStoreIC::GeneratePreMonomorphic(masm);
 }
 
-static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
+
+static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateSloppyArguments(masm);
 }
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
+
+static void Generate_CallICStub_DebugBreak(MacroAssembler* masm) {
+  DebugCodegen::GenerateCallICStubDebugBreak(masm);
+}
+
+
 static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateLoadICDebugBreak(masm);
+  DebugCodegen::GenerateLoadICDebugBreak(masm);
 }
 
 
 static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateStoreICDebugBreak(masm);
+  DebugCodegen::GenerateStoreICDebugBreak(masm);
 }
 
 
 static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateKeyedLoadICDebugBreak(masm);
+  DebugCodegen::GenerateKeyedLoadICDebugBreak(masm);
 }
 
 
 static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateKeyedStoreICDebugBreak(masm);
+  DebugCodegen::GenerateKeyedStoreICDebugBreak(masm);
+}
+
+
+static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
+  DebugCodegen::GenerateCompareNilICDebugBreak(masm);
 }
 
 
 static void Generate_Return_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateReturnDebugBreak(masm);
+  DebugCodegen::GenerateReturnDebugBreak(masm);
 }
 
 
 static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateCallFunctionStubDebugBreak(masm);
-}
-
-
-static void Generate_CallFunctionStub_Recording_DebugBreak(
-    MacroAssembler* masm) {
-  Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
+  DebugCodegen::GenerateCallFunctionStubDebugBreak(masm);
 }
 
 
 static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateCallConstructStubDebugBreak(masm);
+  DebugCodegen::GenerateCallConstructStubDebugBreak(masm);
 }
 
 
 static void Generate_CallConstructStub_Recording_DebugBreak(
     MacroAssembler* masm) {
-  Debug::GenerateCallConstructStubRecordDebugBreak(masm);
+  DebugCodegen::GenerateCallConstructStubRecordDebugBreak(masm);
 }
 
 
 static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateSlotDebugBreak(masm);
+  DebugCodegen::GenerateSlotDebugBreak(masm);
 }
 
 
 static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
-  Debug::GeneratePlainReturnLiveEdit(masm);
+  DebugCodegen::GeneratePlainReturnLiveEdit(masm);
 }
 
 
 static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
-  Debug::GenerateFrameDropperLiveEdit(masm);
+  DebugCodegen::GenerateFrameDropperLiveEdit(masm);
 }
-#endif
 
 
 Builtins::Builtins() : initialized_(false) {
@@ -1577,11 +1462,11 @@
 class BuiltinFunctionTable {
  public:
   BuiltinDesc* functions() {
-    CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
+    base::CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
     return functions_;
   }
 
-  OnceType once_;
+  base::OnceType once_;
   BuiltinDesc functions_[Builtins::builtin_count + 1];
 
   friend class Builtins;
@@ -1623,18 +1508,27 @@
     functions->extra_args = NO_EXTRA_ARGUMENTS;                             \
     ++functions;
 
+#define DEF_FUNCTION_PTR_H(aname, kind)                                     \
+    functions->generator = FUNCTION_ADDR(Generate_##aname);                 \
+    functions->c_code = NULL;                                               \
+    functions->s_name = #aname;                                             \
+    functions->name = k##aname;                                             \
+    functions->flags = Code::ComputeHandlerFlags(Code::kind);               \
+    functions->extra_args = NO_EXTRA_ARGUMENTS;                             \
+    ++functions;
+
   BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
   BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+  BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
   BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
 
 #undef DEF_FUNCTION_PTR_C
 #undef DEF_FUNCTION_PTR_A
 }
 
-void Builtins::SetUp(bool create_heap_objects) {
-  ASSERT(!initialized_);
-  Isolate* isolate = Isolate::Current();
-  Heap* heap = isolate->heap();
+
+void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
+  DCHECK(!initialized_);
 
   // Create a scope for the handles in the builtins.
   HandleScope scope(isolate);
@@ -1644,7 +1538,13 @@
   // For now we generate builtin adaptor code into a stack-allocated
   // buffer, before copying it into individual code objects. Be careful
   // with alignment, some platforms don't like unaligned code.
-  union { int force_alignment; byte buffer[4*KB]; } u;
+#ifdef DEBUG
+  // We can generate a lot of debug code on Arm64.
+  const size_t buffer_size = 32*KB;
+#else
+  const size_t buffer_size = 8*KB;
+#endif
+  union { int force_alignment; byte buffer[buffer_size]; } u;
 
   // Traverse the list of builtins and generate an adaptor in a
   // separate code object for each one.
@@ -1657,38 +1557,26 @@
       // We pass all arguments to the generator, but it may not use all of
       // them.  This works because the first arguments are on top of the
       // stack.
-      ASSERT(!masm.has_frame());
+      DCHECK(!masm.has_frame());
       g(&masm, functions[i].name, functions[i].extra_args);
       // Move the code into the object heap.
       CodeDesc desc;
       masm.GetCode(&desc);
       Code::Flags flags =  functions[i].flags;
-      Object* code = NULL;
-      {
-        // During startup it's OK to always allocate and defer GC to later.
-        // This simplifies things because we don't need to retry.
-        AlwaysAllocateScope __scope__;
-        { MaybeObject* maybe_code =
-              heap->CreateCode(desc, flags, masm.CodeObject());
-          if (!maybe_code->ToObject(&code)) {
-            v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
-          }
-        }
-      }
+      Handle<Code> code =
+          isolate->factory()->NewCode(desc, flags, masm.CodeObject());
       // Log the event and add the code to the builtins array.
       PROFILE(isolate,
-              CodeCreateEvent(Logger::BUILTIN_TAG,
-                              Code::cast(code),
-                              functions[i].s_name));
-      GDBJIT(AddCode(GDBJITInterface::BUILTIN,
-                     functions[i].s_name,
-                     Code::cast(code)));
-      builtins_[i] = code;
+              CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
+      builtins_[i] = *code;
+      if (code->kind() == Code::BUILTIN) code->set_builtin_index(i);
 #ifdef ENABLE_DISASSEMBLER
       if (FLAG_print_builtin_code) {
-        PrintF("Builtin: %s\n", functions[i].s_name);
-        Code::cast(code)->Disassemble(functions[i].s_name);
-        PrintF("\n");
+        CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+        OFStream os(trace_scope.file());
+        os << "Builtin: " << functions[i].s_name << "\n";
+        code->Disassemble(functions[i].s_name, os);
+        os << "\n";
       }
 #endif
     } else {
@@ -1727,6 +1615,16 @@
 }
 
 
+void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+  masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+  masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+}
+
+
 #define DEFINE_BUILTIN_ACCESSOR_C(name, ignore)               \
 Handle<Code> Builtins::name() {                               \
   Code** code_address =                                       \
@@ -1739,8 +1637,15 @@
       reinterpret_cast<Code**>(builtin_address(k##name));   \
   return Handle<Code>(code_address);                        \
 }
+#define DEFINE_BUILTIN_ACCESSOR_H(name, kind)               \
+Handle<Code> Builtins::name() {                             \
+  Code** code_address =                                     \
+      reinterpret_cast<Code**>(builtin_address(k##name));   \
+  return Handle<Code>(code_address);                        \
+}
 BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
 BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
+BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
 BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
 #undef DEFINE_BUILTIN_ACCESSOR_C
 #undef DEFINE_BUILTIN_ACCESSOR_A