Merge V8 5.4.500.40

Test: Manual - built & ran d8
Change-Id: I4edfa2853d3e565b729723645395688ece3193f4
diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc
new file mode 100644
index 0000000..1b643d4
--- /dev/null
+++ b/src/builtins/arm/builtins-arm.cc
@@ -0,0 +1,2932 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- r0                 : number of arguments excluding receiver
+  //  -- r1                 : target
+  //  -- r3                 : new.target
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+  __ AssertFunction(r1);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects r0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ add(r0, r0, Operand(num_extra_args + 1));
+
+  // Insert extra arguments.
+  __ SmiTag(r0);
+  __ Push(r0, r1, r3);
+  __ SmiUntag(r0);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             exit_frame_type == BUILTIN_EXIT);
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the InternalArray function from the current native context.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the Array function from the current native context.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, r1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(r2);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(r2);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  __ mov(r3, r1);
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- r0                     : number of arguments
+  //  -- r1                     : function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Condition const cc_done = (kind == MathMaxMinKind::kMin) ? mi : gt;
+  Condition const cc_swap = (kind == MathMaxMinKind::kMin) ? gt : mi;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in r5 and the double value in d1.
+  __ LoadRoot(r5, root_index);
+  __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
+
+  Label done_loop, loop;
+  __ mov(r4, r0);
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ sub(r4, r4, Operand(1), SetCC);
+    __ b(lt, &done_loop);
+
+    // Load the next parameter tagged value into r2.
+    __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(r2, &convert_smi);
+    __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ JumpIfRoot(r3, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      DCHECK(!FLAG_enable_embedded_constant_pool);
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(r0);
+      __ SmiTag(r4);
+      __ EnterBuiltinFrame(cp, r1, r0);
+      __ Push(r4, r5);
+      __ mov(r0, r2);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mov(r2, r0);
+      __ Pop(r4, r5);
+      __ LeaveBuiltinFrame(cp, r1, r0);
+      __ SmiUntag(r4);
+      __ SmiUntag(r0);
+      {
+        // Restore the double accumulator value (d1).
+        Label done_restore;
+        __ SmiToDouble(d1, r5);
+        __ JumpIfSmi(r5, &done_restore);
+        __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
+        __ bind(&done_restore);
+      }
+    }
+    __ b(&convert);
+    __ bind(&convert_number);
+    __ vldr(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
+    __ b(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDouble(d2, r2);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (d1) and the next parameter value on the right hand side (d2).
+    Label compare_nan, compare_swap;
+    __ VFPCompareAndSetFlags(d1, d2);
+    __ b(cc_done, &loop);
+    __ b(cc_swap, &compare_swap);
+    __ b(vs, &compare_nan);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ VmovHigh(ip, reg);
+    __ cmp(ip, Operand(0x80000000));
+    __ b(ne, &loop);
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ vmov(d1, d2);
+    __ mov(r5, r2);
+    __ b(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(r5, Heap::kNanValueRootIndex);
+    __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
+    __ b(&loop);
+  }
+
+  __ bind(&done_loop);
+  // Drop all slots, including the receiver.
+  __ add(r0, r0, Operand(1));
+  __ Drop(r0);
+  __ mov(r0, r5);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0                     : number of arguments
+  //  -- r1                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into r0.
+  Label no_arguments;
+  {
+    __ mov(r2, r0);  // Store argc in r2.
+    __ sub(r0, r0, Operand(1), SetCC);
+    __ b(lo, &no_arguments);
+    __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+  }
+
+  // 2a. Convert the first argument to a number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(r2);
+    __ EnterBuiltinFrame(cp, r1, r2);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(cp, r1, r2);
+    __ SmiUntag(r2);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ Drop(r2);
+    __ Ret(1);
+  }
+
+  // 2b. No arguments, return +0.
+  __ bind(&no_arguments);
+  __ Move(r0, Smi::FromInt(0));
+  __ Ret(1);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0                     : number of arguments
+  //  -- r1                     : constructor function
+  //  -- r3                     : new target
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r2.
+  {
+    Label no_arguments, done;
+    __ mov(r6, r0);  // Store argc in r6.
+    __ sub(r0, r0, Operand(1), SetCC);
+    __ b(lo, &no_arguments);
+    __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ Move(r2, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure r2 is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(r2, &done_convert);
+    __ CompareObjectType(r2, r4, r4, HEAP_NUMBER_TYPE);
+    __ b(eq, &done_convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(r6);
+      __ EnterBuiltinFrame(cp, r1, r6);
+      __ Push(r3);
+      __ Move(r0, r2);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Move(r2, r0);
+      __ Pop(r3);
+      __ LeaveBuiltinFrame(cp, r1, r6);
+      __ SmiUntag(r6);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ cmp(r1, r3);
+  __ b(ne, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
+  __ b(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(r6);
+    __ EnterBuiltinFrame(cp, r1, r6);
+    __ Push(r2);  // first argument
+    __ CallStub(&stub);
+    __ Pop(r2);
+    __ LeaveBuiltinFrame(cp, r1, r6);
+    __ SmiUntag(r6);
+  }
+  __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r6);
+    __ Ret(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0                     : number of arguments
+  //  -- r1                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into r0.
+  Label no_arguments;
+  {
+    __ mov(r2, r0);  // Store argc in r2.
+    __ sub(r0, r0, Operand(1), SetCC);
+    __ b(lo, &no_arguments);
+    __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+  }
+
+  // 2a. At least one argument, return r0 if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(r0, &to_string);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CompareObjectType(r0, r3, r3, FIRST_NONSTRING_TYPE);
+    __ b(hi, &to_string);
+    __ b(eq, &symbol_descriptive_string);
+    __ b(&drop_frame_and_ret);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(r0, Heap::kempty_stringRootIndex);
+    __ Ret(1);
+  }
+
+  // 3a. Convert r0 to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(r2);
+    __ EnterBuiltinFrame(cp, r1, r2);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(cp, r1, r2);
+    __ SmiUntag(r2);
+  }
+  __ b(&drop_frame_and_ret);
+
+  // 3b. Convert symbol in r0 to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ Drop(r2);
+    __ Drop(1);
+    __ Push(r0);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r2);
+    __ Ret(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0                     : number of arguments
+  //  -- r1                     : constructor function
+  //  -- r3                     : new target
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r2.
+  {
+    Label no_arguments, done;
+    __ mov(r6, r0);  // Store argc in r6.
+    __ sub(r0, r0, Operand(1), SetCC);
+    __ b(lo, &no_arguments);
+    __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ LoadRoot(r2, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure r2 is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(r2, &convert);
+    __ CompareObjectType(r2, r4, r4, FIRST_NONSTRING_TYPE);
+    __ b(lo, &done_convert);
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(r6);
+      __ EnterBuiltinFrame(cp, r1, r6);
+      __ Push(r3);
+      __ Move(r0, r2);
+      __ CallStub(&stub);
+      __ Move(r2, r0);
+      __ Pop(r3);
+      __ LeaveBuiltinFrame(cp, r1, r6);
+      __ SmiUntag(r6);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ cmp(r1, r3);
+  __ b(ne, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
+  __ b(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(r6);
+    __ EnterBuiltinFrame(cp, r1, r6);
+    __ Push(r2);  // first argument
+    __ CallStub(&stub);
+    __ Pop(r2);
+    __ LeaveBuiltinFrame(cp, r1, r6);
+    __ SmiUntag(r6);
+  }
+  __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r6);
+    __ Ret(1);
+  }
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+  __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(r2);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- r0 : argument count (preserved for callee)
+  //  -- r1 : target function (preserved for callee)
+  //  -- r3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ SmiTag(r0);
+    __ push(r0);
+    // Push a copy of the target function and the new target.
+    __ push(r1);
+    __ push(r3);
+    // Push function as parameter to the runtime call.
+    __ Push(r1);
+
+    __ CallRuntime(function_id, 1);
+    __ mov(r2, r0);
+
+    // Restore target function and new target.
+    __ pop(r3);
+    __ pop(r1);
+    __ pop(r0);
+    __ SmiUntag(r0, r0);
+  }
+  __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(r2);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmp(sp, Operand(ip));
+  __ b(hs, &ok);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- r1     : constructor function
+  //  -- r2     : allocation site or undefined
+  //  -- r3     : new target
+  //  -- cp     : context
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(r2, r4);
+    __ Push(cp);
+    __ SmiTag(r0);
+    __ Push(r2, r0);
+
+    if (create_implicit_receiver) {
+      // Allocate the new receiver object.
+      __ Push(r1, r3);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(r4, r0);
+      __ Pop(r1, r3);
+
+      // ----------- S t a t e -------------
+      //  -- r1: constructor function
+      //  -- r3: new target
+      //  -- r4: newly allocated object
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ ldr(r0, MemOperand(sp));
+    }
+
+    __ SmiUntag(r0);
+
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ push(r4);
+      __ push(r4);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
+
+    // Set up pointer to last argument.
+    __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    // r0: number of arguments
+    // r1: constructor function
+    // r2: address of last argument (caller sp)
+    // r3: new target
+    // r4: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ SmiTag(r4, r0);
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
+    __ push(ip);
+    __ bind(&entry);
+    __ sub(r4, r4, Operand(2), SetCC);
+    __ b(ge, &loop);
+
+    // Call the function.
+    // r0: number of arguments
+    // r1: constructor function
+    // r3: new target
+    ParameterCount actual(r0);
+    __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    // r0: result
+    // sp[0]: receiver
+    // sp[1]: number of arguments (smi-tagged)
+    __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      // r0: result
+      // sp[0]: receiver
+      // sp[1]: number of arguments (smi-tagged)
+      __ JumpIfSmi(r0, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ CompareObjectType(r0, r1, r3, FIRST_JS_RECEIVER_TYPE);
+      __ b(ge, &exit);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ ldr(r0, MemOperand(sp));
+
+      // Remove receiver from the stack, remove caller arguments, and
+      // return.
+      __ bind(&exit);
+      // r0: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+    } else {
+      __ ldr(r1, MemOperand(sp));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(r0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
+  __ add(sp, sp, Operand(kPointerSize));
+  if (create_implicit_receiver) {
+    __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
+  }
+  __ Jump(lr);
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the value to pass to the generator
+  //  -- r1 : the JSGeneratorObject to resume
+  //  -- r2 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(r1);
+
+  // Store input value into generator object.
+  __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
+  __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ mov(ip, Operand(last_step_action));
+  __ ldrsb(ip, MemOperand(ip));
+  __ cmp(ip, Operand(StepIn));
+  __ b(ge, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ mov(ip, Operand(debug_suspended_generator));
+  __ ldr(ip, MemOperand(ip));
+  __ cmp(ip, Operand(r1));
+  __ b(eq, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
+
+  // Push receiver.
+  __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+  __ Push(ip);
+
+  // ----------- S t a t e -------------
+  //  -- r1    : the JSGeneratorObject to resume
+  //  -- r2    : the resume mode (tagged)
+  //  -- r4    : generator function
+  //  -- cp    : generator context
+  //  -- lr    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r3,
+         FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+    __ b(mi, &done_loop);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ b(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
+  __ b(ne, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r0, FieldMemOperand(
+                   r0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ SmiUntag(r0);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(r3, r1);
+    __ Move(r1, r4);
+    __ ldr(r5, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    __ Jump(r5);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    DCHECK(!FLAG_enable_embedded_constant_pool);
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(lr, fp);
+    __ Move(fp, sp);
+    __ Push(cp, r4);
+
+    // Restore the operand stack.
+    __ ldr(r0, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+    __ ldr(r3, FieldMemOperand(r0, FixedArray::kLengthOffset));
+    __ add(r0, r0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ add(r3, r0, Operand(r3, LSL, kPointerSizeLog2 - 1));
+    {
+      Label done_loop, loop;
+      __ bind(&loop);
+      __ cmp(r0, r3);
+      __ b(eq, &done_loop);
+      __ ldr(ip, MemOperand(r0, kPointerSize, PostIndex));
+      __ Push(ip);
+      __ b(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+    __ str(ip, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+    __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+    __ add(r3, r3, Operand(r2, ASR, 1));
+    __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+    __ Move(r0, r1);  // Continuation expects generator object in r0.
+    __ Jump(r3);
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r2, r4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r1, r2);
+    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(r1, r2);
+    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ push(r1);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers r2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+                                        IsTagged argc_is_tagged) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+  // Make r2 the space we have left. The stack might already be overflowed
+  // here which will cause r2 to become negative.
+  __ sub(r2, sp, r2);
+  // Check if the arguments will overflow the stack.
+  if (argc_is_tagged == kArgcIsSmiTagged) {
+    __ cmp(r2, Operand::PointerOffsetFromSmiKey(argc));
+  } else {
+    DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+    __ cmp(r2, Operand(argc, LSL, kPointerSizeLog2));
+  }
+  __ b(gt, &okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from Generate_JS_Entry
+  // r0: new.target
+  // r1: function
+  // r2: receiver
+  // r3: argc
+  // r4: argv
+  // r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ mov(cp, Operand(context_address));
+    __ ldr(cp, MemOperand(cp));
+
+    __ InitializeRootRegister();
+
+    // Push the function and the receiver onto the stack.
+    __ Push(r1, r2);
+
+    // Check if we have enough stack space to push all arguments.
+    // Clobbers r2.
+    Generate_CheckStackOverflow(masm, r3, kArgcIsUntaggedInt);
+
+    // Remember new.target.
+    __ mov(r5, r0);
+
+    // Copy arguments to the stack in a loop.
+    // r1: function
+    // r3: argc
+    // r4: argv, i.e. points to first arg
+    Label loop, entry;
+    __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+    // r2 points past last arg.
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
+    __ ldr(r0, MemOperand(r0));                           // dereference handle
+    __ push(r0);                                          // push parameter
+    __ bind(&entry);
+    __ cmp(r4, r2);
+    __ b(ne, &loop);
+
+    // Setup new.target and argc.
+    __ mov(r0, Operand(r3));
+    __ mov(r3, Operand(r5));
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ mov(r5, Operand(r4));
+    __ mov(r6, Operand(r4));
+    if (!FLAG_enable_embedded_constant_pool) {
+      __ mov(r8, Operand(r4));
+    }
+    if (kR9Available == 1) {
+      __ mov(r9, Operand(r4));
+    }
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+    // Respect ABI stack constraint.
+  }
+  __ Jump(lr);
+
+  // r0: result
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ ldr(args_count,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ ldr(args_count,
+         FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ add(sp, sp, args_count, LeaveCC);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o r1: the JS function object being called.
+//   o r3: the new target
+//   o cp: our context
+//   o pp: the caller's constant pool pointer (if enabled)
+//   o fp: the caller's frame pointer
+//   o sp: stack pointer
+//   o lr: return address
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ PushStandardFrame(r1);
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  DCHECK(!debug_info.is(r0));
+  __ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
+  __ cmp(debug_info, Operand(DebugInfo::uninitialized()));
+  // Load original bytecode array or the debug copy.
+  __ ldr(kInterpreterBytecodeArrayRegister,
+         FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
+  __ ldr(kInterpreterBytecodeArrayRegister,
+         FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex), ne);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
+  __ cmp(r0, Operand(masm->CodeObject()));  // Self-reference to this code.
+  __ b(ne, &switch_to_different_code_kind);
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ SmiTst(kInterpreterBytecodeArrayRegister);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Load the initial bytecode offset.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
+  __ Push(r3, kInterpreterBytecodeArrayRegister, r0);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size from the BytecodeArray object.
+    __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                               BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ sub(r9, sp, Operand(r4));
+    __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+    __ cmp(r9, Operand(r2));
+    __ b(hs, &ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    Label loop_header;
+    Label loop_check;
+    __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+    __ b(&loop_check, al);
+    __ bind(&loop_header);
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    __ push(r9);
+    // Continue loop if not done.
+    __ bind(&loop_check);
+    __ sub(r4, r4, Operand(kPointerSize), SetCC);
+    __ b(&loop_header, ge);
+  }
+
+  // Load accumulator and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+                         kInterpreterBytecodeOffsetRegister));
+  __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+                        kPointerSizeLog2));
+  __ Call(ip);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in r0.
+  LeaveInterpreterFrame(masm, r2);
+  __ Jump(lr);
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+  __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(r1, r4, r5);
+  __ Jump(r4);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ ldr(kContextRegister,
+         MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, r2);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(r0);
+
+    // Push function as argument and compile for baseline.
+    __ push(r1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(r0);
+  }
+  __ Jump(lr);
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+                                         Register limit, Register scratch) {
+  Label loop_header, loop_check;
+  __ b(al, &loop_check);
+  __ bind(&loop_header);
+  __ ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
+  __ push(scratch);
+  __ bind(&loop_check);
+  __ cmp(index, limit);
+  __ b(gt, &loop_header);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r2 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- r1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ add(r3, r0, Operand(1));  // Add one for receiver.
+  __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
+  __ sub(r3, r2, r3);
+
+  // Push the arguments.
+  Generate_InterpreterPushArgs(masm, r2, r3, r4);
+
+  // Call the target.
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- r0 : argument count (not including receiver)
+  // -- r3 : new target
+  // -- r1 : constructor to call
+  // -- r2 : address of the first argument
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ mov(r4, Operand(r0, LSL, kPointerSizeLog2));
+  __ sub(r4, r2, r4);
+
+  // Push a slot for the receiver to be constructed.
+  __ mov(ip, Operand::Zero());
+  __ push(ip);
+
+  // Push the arguments.
+  Generate_InterpreterPushArgs(masm, r2, r4, r5);
+
+  // Call the constructor with r0, r1, and r3 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(r2, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
+                         Code::kHeaderSize - kHeapObjectTag));
+
+  // Initialize the dispatch table register.
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ ldr(kInterpreterBytecodeArrayRegister,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ SmiTst(kInterpreterBytecodeArrayRegister);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ ldr(kInterpreterBytecodeOffsetRegister,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+                         kInterpreterBytecodeOffsetRegister));
+  __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+                        kPointerSizeLog2));
+  __ mov(pc, ip);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : argument count (preserved for callee)
+  //  -- r3 : new target (preserved for callee)
+  //  -- r1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register argument_count = r0;
+  Register closure = r1;
+  Register new_target = r3;
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = argument_count;
+  Register index = r2;
+  __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(map,
+         FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ cmp(index, Operand(Smi::FromInt(2)));
+  __ b(lt, &gotta_call_runtime);
+
+  // Find literals.
+  // r3  : native context
+  // r2  : length / index
+  // r0  : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = r3;
+  __ ldr(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = r1;
+  Register array_pointer = r5;
+
+  // Does the native context match?
+  __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
+  __ ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousContext));
+  __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ b(ne, &loop_bottom);
+  // OSR id set to none?
+  __ ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
+  __ b(ne, &loop_bottom);
+  // Literals available?
+  __ ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ ldr(r4, MemOperand(sp, 0));
+  __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+  __ push(index);
+  __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = r4;
+  __ ldr(entry,
+         FieldMemOperand(array_pointer,
+                         SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, r5);
+
+  // Link the closure into the optimized function list.
+  // r4 : code entry
+  // r3 : native context
+  // r1 : closure
+  __ ldr(r5,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ str(closure,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Save closure before the write barrier.
+  __ mov(r5, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ mov(closure, r5);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ Jump(entry);
+
+  __ bind(&loop_bottom);
+  __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ cmp(index, Operand(Smi::FromInt(1)));
+  __ b(gt, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                         SharedFunctionInfo::kSharedCodeIndex));
+  __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ ldr(entry,
+         FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ and_(r5, r5, Operand(Code::KindField::kMask));
+  __ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
+  __ cmp(r5, Operand(Code::BUILTIN));
+  __ b(eq, &gotta_call_runtime_no_stack);
+  // Yes, install the full code.
+  __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, r5);
+  __ Jump(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : argument count (preserved for callee)
+  //  -- r1 : new target (preserved for callee)
+  //  -- r3 : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ Move(r4, r0);
+    // Push the number of arguments to the callee.
+    __ SmiTag(r0);
+    __ push(r0);
+    // Push a copy of the target function and the new target.
+    __ push(r1);
+    __ push(r3);
+
+    // The function.
+    __ push(r1);
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ cmp(r4, Operand(j));
+        __ b(ne, &over);
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+                                      i * kPointerSize));
+        __ push(r4);
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(r0, &failed);
+
+    __ Drop(2);
+    __ pop(r4);
+    __ SmiUntag(r4);
+    scope.GenerateLeaveFrame();
+
+    __ add(r4, r4, Operand(1));
+    __ Drop(r4);
+    __ Ret();
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ pop(r3);
+    __ pop(r1);
+    __ pop(r0);
+    __ SmiUntag(r0);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r0 - contains return address (beginning of patch sequence)
+  //   r1 - isolate
+  //   r3 - new target
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
+  __ PrepareCallCFunction(2, 0, r2);
+  __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
+  __ mov(pc, r0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r0 - contains return address (beginning of patch sequence)
+  //   r1 - isolate
+  //   r3 - new target
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
+  __ PrepareCallCFunction(2, 0, r2);
+  __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PushStandardFrame(r1);
+
+  // Jump to point after the code-age stub.
+  __ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
+  __ mov(pc, r0);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ add(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ mov(pc, lr);                         // Jump to miss handler
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(r0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+  }
+
+  // Get the full codegen state from the stack and untag it -> r6.
+  __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(r6);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ cmp(r6,
+         Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
+  __ b(ne, &with_tos_register);
+  __ add(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+  __ cmp(r6,
+         Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
+  __ b(ne, &unknown_state);
+  __ add(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Register scratch0, Register scratch1,
+                                    Register scratch2,
+                                    Label* receiver_check_failed) {
+  Register signature = scratch0;
+  Register map = scratch1;
+  Register constructor = scratch2;
+
+  // If there is no signature, return the holder.
+  __ ldr(signature, FieldMemOperand(function_template_info,
+                                    FunctionTemplateInfo::kSignatureOffset));
+  __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+  Label receiver_check_passed;
+  __ b(eq, &receiver_check_passed);
+
+  // Walk the prototype chain.
+  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(constructor, map, ip, ip);
+  __ cmp(ip, Operand(JS_FUNCTION_TYPE));
+  Label next_prototype;
+  __ b(ne, &next_prototype);
+  Register type = constructor;
+  __ ldr(type,
+         FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ cmp(signature, type);
+  __ b(eq, &receiver_check_passed);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype);
+  __ CompareObjectType(type, ip, ip, FUNCTION_TEMPLATE_INFO_TYPE);
+
+  // Otherwise load the parent function template and iterate.
+  __ ldr(type,
+         FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset),
+         eq);
+  __ b(&function_template_loop, eq);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ ldr(ip, FieldMemOperand(map, Map::kBitField3Offset));
+  __ tst(ip, Operand(Map::HasHiddenPrototype::kMask));
+  __ b(eq, receiver_check_failed);
+  __ ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ b(&prototype_loop_start);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0                 : number of arguments excluding receiver
+  //  -- r1                 : callee
+  //  -- lr                 : return address
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+  CompatibleReceiverCheck(masm, r2, r3, r4, r5, r6, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ ldr(r4, FieldMemOperand(r3, FunctionTemplateInfo::kCallCodeOffset));
+  __ ldr(r4, FieldMemOperand(r4, CallHandlerInfo::kFastHandlerOffset));
+  __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(r4);
+
+  // Compatible receiver check failed: throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  // Drop the arguments (including the receiver)
+  __ add(r0, r0, Operand(1));
+  __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+  __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(r0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  // If the code object is null, just return to the caller.
+  Label skip;
+  __ cmp(r0, Operand(Smi::FromInt(0)));
+  __ b(ne, &skip);
+  __ Ret();
+
+  __ bind(&skip);
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ LeaveFrame(StackFrame::STUB);
+  }
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
+
+  {
+    ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+    __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
+
+    if (FLAG_enable_embedded_constant_pool) {
+      __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
+    }
+
+    // Load the OSR entrypoint offset from the deoptimization data.
+    // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+    __ ldr(r1, FieldMemOperand(
+                   r1, FixedArray::OffsetOfElementAt(
+                           DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+    // Compute the target address = code start + osr_offset
+    __ add(lr, r0, Operand::SmiUntag(r1));
+
+    // And "return" to the OSR entry point of the function.
+    __ Ret();
+  }
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- r0    : number of arguments
+  //  -- r1    : function
+  //  -- cp    : context
+  //  -- lr    : return address
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // 1. Pop receiver into r0 and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ Pop(r0);
+    __ JumpIfSmi(r0, &receiver_not_date);
+    __ CompareObjectType(r0, r2, r3, JS_DATE_TYPE);
+    __ b(ne, &receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ ldr(r0, FieldMemOperand(r0, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ mov(r1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+      __ ldr(r1, MemOperand(r1));
+      __ ldr(ip, FieldMemOperand(r0, JSDate::kCacheStampOffset));
+      __ cmp(r1, ip);
+      __ b(ne, &stamp_mismatch);
+      __ ldr(r0, FieldMemOperand(
+                     r0, JSDate::kValueOffset + field_index * kPointerSize));
+      __ Ret();
+      __ bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, r1);
+    __ mov(r1, Operand(Smi::FromInt(field_index)));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ Ret();
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(r0);
+    __ Move(r0, Smi::FromInt(0));
+    __ EnterBuiltinFrame(cp, r1, r0);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : argc
+  //  -- sp[0] : argArray
+  //  -- sp[4] : thisArg
+  //  -- sp[8] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into r1, argArray into r0 (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ mov(r3, r2);
+    __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));  // receiver
+    __ sub(r4, r0, Operand(1), SetCC);
+    __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // thisArg
+    __ sub(r4, r4, Operand(1), SetCC, ge);
+    __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // argArray
+    __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+    __ str(r2, MemOperand(sp, 0));
+    __ mov(r0, r3);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r0    : argArray
+  //  -- r1    : receiver
+  //  -- sp[0] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(r1, &receiver_not_callable);
+  __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ tst(r4, Operand(1 << Map::kIsCallable));
+  __ b(eq, &receiver_not_callable);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(r0, Heap::kNullValueRootIndex, &no_arguments);
+  __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ mov(r0, Operand(0));
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ str(r1, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // r0: actual number of arguments
+  {
+    Label done;
+    __ cmp(r0, Operand::Zero());
+    __ b(ne, &done);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ add(r0, r0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  // r0: actual number of arguments
+  __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // r0: actual number of arguments
+  // r1: callable
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+
+    __ bind(&loop);
+    __ ldr(ip, MemOperand(r2, -kPointerSize));
+    __ str(ip, MemOperand(r2));
+    __ sub(r2, r2, Operand(kPointerSize));
+    __ cmp(r2, sp);
+    __ b(ne, &loop);
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ sub(r0, r0, Operand(1));
+    __ pop();
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : argc
+  //  -- sp[0]  : argumentsList
+  //  -- sp[4]  : thisArgument
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ mov(r2, r1);
+    __ mov(r3, r1);
+    __ sub(r4, r0, Operand(1), SetCC);
+    __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // target
+    __ sub(r4, r4, Operand(1), SetCC, ge);
+    __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // thisArgument
+    __ sub(r4, r4, Operand(1), SetCC, ge);
+    __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // argumentsList
+    __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+    __ str(r2, MemOperand(sp, 0));
+    __ mov(r0, r3);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r0    : argumentsList
+  //  -- r1    : target
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(r1, &target_not_callable);
+  __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ tst(r4, Operand(1 << Map::kIsCallable));
+  __ b(eq, &target_not_callable);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ str(r1, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0     : argc
+  //  -- sp[0]  : new.target (optional)
+  //  -- sp[4]  : argumentsList
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+  // new.target into r3 (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ mov(r2, r1);
+    __ str(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));  // receiver
+    __ sub(r4, r0, Operand(1), SetCC);
+    __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // target
+    __ mov(r3, r1);  // new.target defaults to target
+    __ sub(r4, r4, Operand(1), SetCC, ge);
+    __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // argumentsList
+    __ sub(r4, r4, Operand(1), SetCC, ge);
+    __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge);  // new.target
+    __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+    __ mov(r0, r2);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r0    : argumentsList
+  //  -- r3    : new.target
+  //  -- r1    : target
+  //  -- sp[0] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(r1, &target_not_constructor);
+  __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ tst(r4, Operand(1 << Map::kIsConstructor));
+  __ b(eq, &target_not_constructor);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(r3, &new_target_not_constructor);
+  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ tst(r4, Operand(1 << Map::kIsConstructor));
+  __ b(eq, &new_target_not_constructor);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ str(r1, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ str(r3, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- r0 : actual number of arguments
+  //  -- r1 : function (passed through to callee)
+  //  -- r2 : expected number of arguments
+  //  -- r3 : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+  // Make r5 the space we have left. The stack might already be overflowed
+  // here which will cause r5 to become negative.
+  __ sub(r5, sp, r5);
+  // Check if the arguments will overflow the stack.
+  __ cmp(r5, Operand(r2, LSL, kPointerSizeLog2));
+  __ b(le, stack_overflow);  // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ SmiTag(r0);
+  __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
+                       (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
+                       fp.bit() | lr.bit());
+  __ add(fp, sp,
+         Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                              kPointerSize)));
+
+  __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
+  __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
+  __ add(sp, sp, Operand(kPointerSize));  // adjust for receiver
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : argumentsList
+  //  -- r1    : target
+  //  -- r3    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(r0, &create_runtime);
+
+    // Load the map of argumentsList into r2.
+    __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+    // Load native context into r4.
+    __ ldr(r4, NativeContextMemOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ ldr(ip, ContextMemOperand(r4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ cmp(ip, r2);
+    __ b(eq, &create_arguments);
+    __ ldr(ip, ContextMemOperand(r4, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ cmp(ip, r2);
+    __ b(eq, &create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CompareInstanceType(r2, ip, JS_ARRAY_TYPE);
+    __ b(eq, &create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r1, r3, r0);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(r1, r3);
+      __ ldr(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+      __ SmiUntag(r2);
+    }
+    __ jmp(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ ldr(r2, FieldMemOperand(r0, JSArgumentsObject::kLengthOffset));
+    __ ldr(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+    __ ldr(ip, FieldMemOperand(r4, FixedArray::kLengthOffset));
+    __ cmp(r2, ip);
+    __ b(ne, &create_runtime);
+    __ SmiUntag(r2);
+    __ mov(r0, r4);
+    __ b(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ ldr(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(r2);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ cmp(r2, Operand(FAST_ELEMENTS));
+    __ b(hi, &create_runtime);
+    __ cmp(r2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ b(eq, &create_runtime);
+    __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
+    __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
+    __ SmiUntag(r2);
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+    // Make ip the space we have left. The stack might already be overflowed
+    // here which will cause ip to become negative.
+    __ sub(ip, sp, ip);
+    // Check if the arguments will overflow the stack.
+    __ cmp(ip, Operand(r2, LSL, kPointerSizeLog2));
+    __ b(gt, &done);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r1    : target
+  //  -- r0    : args (a FixedArray built from argumentsList)
+  //  -- r2    : len (number of elements to push from args)
+  //  -- r3    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    __ mov(r4, Operand(0));
+    Label done, loop;
+    __ bind(&loop);
+    __ cmp(r4, r2);
+    __ b(eq, &done);
+    __ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
+    __ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
+    __ Push(ip);
+    __ add(r4, r4, Operand(1));
+    __ b(&loop);
+    __ bind(&done);
+    __ Move(r0, r4);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
+  __ ldrb(scratch1, MemOperand(scratch1));
+  __ cmp(scratch1, Operand(0));
+  __ b(eq, &done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ ldr(scratch3,
+           MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+    __ b(ne, &no_interpreter_frame);
+    __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch3,
+         MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ ldr(scratch1,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  __ ldr(scratch1,
+         FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(caller_args_count_reg,
+         FieldMemOperand(scratch1,
+                         SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(r1);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kFunctionKindByteOffset));
+  __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+  __ b(ne, &class_constructor);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+                SharedFunctionInfo::kStrictModeByteOffset);
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kNativeByteOffset));
+  __ tst(r3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                     (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+  __ b(ne, &done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- r0 : the number of arguments (not including the receiver)
+    //  -- r1 : the function to call (checked to be a JSFunction)
+    //  -- r2 : the shared function info.
+    //  -- cp : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(r3);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+      __ JumpIfSmi(r3, &convert_to_object);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+      __ b(hs, &done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy);
+        __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(r3);
+        }
+        __ b(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(r0);
+        __ Push(r0, r1);
+        __ mov(r0, r3);
+        __ Push(cp);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(cp);
+        __ mov(r3, r0);
+        __ Pop(r0, r1);
+        __ SmiUntag(r0);
+      }
+      __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the function to call (checked to be a JSFunction)
+  //  -- r2 : the shared function info.
+  //  -- cp : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r0, r3, r4, r5);
+  }
+
+  __ ldr(r2,
+         FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(r2);
+  ParameterCount actual(r0);
+  ParameterCount expected(r2);
+  __ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ push(r1);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : target (checked to be a JSBoundFunction)
+  //  -- r3 : new.target (only in case of [[Construct]])
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into r2 and length of that into r4.
+  Label no_bound_arguments;
+  __ ldr(r2, FieldMemOperand(r1, JSBoundFunction::kBoundArgumentsOffset));
+  __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ SmiUntag(r4);
+  __ cmp(r4, Operand(0));
+  __ b(eq, &no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- r0 : the number of arguments (not including the receiver)
+    //  -- r1 : target (checked to be a JSBoundFunction)
+    //  -- r2 : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- r3 : new.target (only in case of [[Construct]])
+    //  -- r4 : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ sub(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+      __ b(gt, &done);  // Signed comparison.
+      // Restore the stack pointer.
+      __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Relocate arguments down the stack.
+    {
+      Label loop, done_loop;
+      __ mov(r5, Operand(0));
+      __ bind(&loop);
+      __ cmp(r5, r0);
+      __ b(gt, &done_loop);
+      __ ldr(ip, MemOperand(sp, r4, LSL, kPointerSizeLog2));
+      __ str(ip, MemOperand(sp, r5, LSL, kPointerSizeLog2));
+      __ add(r4, r4, Operand(1));
+      __ add(r5, r5, Operand(1));
+      __ b(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+      __ SmiUntag(r4);
+      __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ bind(&loop);
+      __ sub(r4, r4, Operand(1), SetCC);
+      __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2));
+      __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+      __ add(r0, r0, Operand(1));
+      __ b(gt, &loop);
+    }
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(r1);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r0, r3, r4, r5);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  __ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
+  __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+                                       masm->isolate())));
+  __ ldr(ip, MemOperand(ip));
+  __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(r1, &non_callable);
+  __ bind(&non_smi);
+  __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+  __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ tst(r4, Operand(1 << Map::kIsCallable));
+  __ b(eq, &non_callable);
+
+  __ cmp(r5, Operand(JS_PROXY_TYPE));
+  __ b(ne, &non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r0, r3, r4, r5);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ Push(r1);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ add(r0, r0, Operand(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver the (original) target.
+  __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the constructor to call (checked to be a JSFunction)
+  //  -- r3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertFunction(r1);
+
+  // Calling convention for function specific ConstructStubs require
+  // r2 to contain either an AllocationSite or undefined.
+  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
+  __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the function to call (checked to be a JSBoundFunction)
+  //  -- r3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertBoundFunction(r1);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  __ cmp(r1, r3);
+  __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset),
+         eq);
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ ldr(ip, MemOperand(ip));
+  __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the constructor to call (checked to be a JSProxy)
+  //  -- r3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ Push(r1);
+  __ Push(r3);
+  // Include the pushed new_target, constructor and the receiver.
+  __ add(r0, r0, Operand(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the constructor to call (can be any Object)
+  //  -- r3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(r1, &non_constructor);
+
+  // Dispatch based on instance type.
+  __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Construct]] internal method.
+  __ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ tst(r2, Operand(1 << Map::kIsConstructor));
+  __ b(eq, &non_constructor);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ cmp(r5, Operand(JS_PROXY_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+          eq);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r1);
+  __ Push(r1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r1);
+  __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(r1, r2);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : message_id as Smi
+  //  -- lr : return address
+  // -----------------------------------
+  __ Push(r1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r0.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ tst(r0, Operand(kSmiTagMask));
+  __ Ret(eq);
+
+  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+  // r0: receiver
+  // r1: receiver instance type
+  __ Ret(eq);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : actual number of arguments
+  //  -- r1 : function (passed through to callee)
+  //  -- r2 : expected number of arguments
+  //  -- r3 : new target (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+
+  Label enough, too_few;
+  __ cmp(r0, r2);
+  __ b(lt, &too_few);
+  __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ b(eq, &dont_adapt_arguments);
+
+  {  // Enough parameters: actual >= expected
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r0 and copy end address into r4.
+    // r0: actual number of arguments as a smi
+    // r1: function
+    // r2: expected number of arguments
+    // r3: new target (passed through to callee)
+    __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
+    // adjust for return address and receiver
+    __ add(r0, r0, Operand(2 * kPointerSize));
+    __ sub(r4, r0, Operand(r2, LSL, kPointerSizeLog2));
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r0: copy start address
+    // r1: function
+    // r2: expected number of arguments
+    // r3: new target (passed through to callee)
+    // r4: copy end address
+
+    Label copy;
+    __ bind(&copy);
+    __ ldr(ip, MemOperand(r0, 0));
+    __ push(ip);
+    __ cmp(r0, r4);  // Compare before moving to next argument.
+    __ sub(r0, r0, Operand(kPointerSize));
+    __ b(ne, &copy);
+
+    __ b(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r0 and copy end address is fp.
+    // r0: actual number of arguments as a smi
+    // r1: function
+    // r2: expected number of arguments
+    // r3: new target (passed through to callee)
+    __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r0: copy start address
+    // r1: function
+    // r2: expected number of arguments
+    // r3: new target (passed through to callee)
+    Label copy;
+    __ bind(&copy);
+    // Adjust load for return address and receiver.
+    __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
+    __ push(ip);
+    __ cmp(r0, fp);  // Compare before moving to next argument.
+    __ sub(r0, r0, Operand(kPointerSize));
+    __ b(ne, &copy);
+
+    // Fill the remaining expected arguments with undefined.
+    // r1: function
+    // r2: expected number of arguments
+    // r3: new target (passed through to callee)
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
+    // Adjust for frame.
+    __ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                           2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ push(ip);
+    __ cmp(sp, r4);
+    __ b(ne, &fill);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ mov(r0, r2);
+  // r0 : expected number of arguments
+  // r1 : function (passed through to callee)
+  // r3 : new target (passed through to callee)
+  __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ Call(r4);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Jump(lr);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ Jump(r4);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bkpt(0);
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc
new file mode 100644
index 0000000..57395d8
--- /dev/null
+++ b/src/builtins/arm64/builtins-arm64.cc
@@ -0,0 +1,3046 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/arm64/frames-arm64.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the InternalArray function from the native context.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the InternalArray function from the native context.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- x0                 : number of arguments excluding receiver
+  //  -- x1                 : target
+  //  -- x3                 : new target
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+  __ AssertFunction(x1);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects x0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ Add(x0, x0, num_extra_args + 1);
+
+  // Insert extra arguments.
+  __ SmiTag(x0);
+  __ Push(x0, x1, x3);
+  __ SmiUntag(x0);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             exit_frame_type == BUILTIN_EXIT);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_InternalArrayCode");
+  Label generic_array_code;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, x1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ Tst(x10, kSmiTagMask);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+    __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_ArrayCode");
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, x1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ Tst(x10, kSmiTagMask);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+    __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+  __ Mov(x3, x1);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- x0                     : number of arguments
+  //  -- x1                     : function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_MathMaxMin");
+
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in x5 and the double value in d5.
+  __ LoadRoot(x5, root_index);
+  __ Ldr(d5, FieldMemOperand(x5, HeapNumber::kValueOffset));
+
+  Label done_loop, loop;
+  __ mov(x4, x0);
+  __ Bind(&loop);
+  {
+    // Check if all parameters done.
+    __ Subs(x4, x4, 1);
+    __ B(lt, &done_loop);
+
+    // Load the next parameter tagged value into x2.
+    __ Peek(x2, Operand(x4, LSL, kPointerSizeLog2));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert_smi, convert_number, done_convert;
+    __ JumpIfSmi(x2, &convert_smi);
+    __ JumpIfHeapNumber(x2, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(x0);
+      __ SmiTag(x4);
+      __ EnterBuiltinFrame(cp, x1, x0);
+      __ Push(x5, x4);
+      __ Mov(x0, x2);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Mov(x2, x0);
+      __ Pop(x4, x5);
+      __ LeaveBuiltinFrame(cp, x1, x0);
+      __ SmiUntag(x4);
+      __ SmiUntag(x0);
+      {
+        // Restore the double accumulator value (d5).
+        Label done_restore;
+        __ SmiUntagToDouble(d5, x5, kSpeculativeUntag);
+        __ JumpIfSmi(x5, &done_restore);
+        __ Ldr(d5, FieldMemOperand(x5, HeapNumber::kValueOffset));
+        __ Bind(&done_restore);
+      }
+    }
+    __ AssertNumber(x2);
+    __ JumpIfSmi(x2, &convert_smi);
+
+    __ Bind(&convert_number);
+    __ Ldr(d2, FieldMemOperand(x2, HeapNumber::kValueOffset));
+    __ B(&done_convert);
+
+    __ Bind(&convert_smi);
+    __ SmiUntagToDouble(d2, x2);
+    __ Bind(&done_convert);
+
+    // We can use a single fmin/fmax for the operation itself, but we then need
+    // to work out which HeapNumber (or smi) the result came from.
+    __ Fmov(x11, d5);
+    if (kind == MathMaxMinKind::kMin) {
+      __ Fmin(d5, d5, d2);
+    } else {
+      DCHECK(kind == MathMaxMinKind::kMax);
+      __ Fmax(d5, d5, d2);
+    }
+    __ Fmov(x10, d5);
+    __ Cmp(x10, x11);
+    __ Csel(x5, x5, x2, eq);
+    __ B(&loop);
+  }
+
+  __ Bind(&done_loop);
+  // Drop all slots, including the receiver.
+  __ Add(x0, x0, 1);
+  __ Drop(x0);
+  __ Mov(x0, x5);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0                     : number of arguments
+  //  -- x1                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_NumberConstructor");
+
+  // 1. Load the first argument into x0.
+  Label no_arguments;
+  {
+    __ Cbz(x0, &no_arguments);
+    __ Mov(x2, x0);  // Store argc in x2.
+    __ Sub(x0, x0, 1);
+    __ Ldr(x0, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
+  }
+
+  // 2a. Convert first argument to number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(x2);
+    __ EnterBuiltinFrame(cp, x1, x2);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(cp, x1, x2);
+    __ SmiUntag(x2);
+  }
+
+  {
+    // Drop all arguments.
+    __ Drop(x2);
+  }
+
+  // 2b. No arguments, return +0 (already in x0).
+  __ Bind(&no_arguments);
+  __ Drop(1);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0                     : number of arguments
+  //  -- x1                     : constructor function
+  //  -- x3                     : new target
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_NumberConstructor_ConstructStub");
+
+  // 1. Make sure we operate in the context of the called function.
+  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into x2.
+  {
+    Label no_arguments, done;
+    __ Move(x6, x0);  // Store argc in x6.
+    __ Cbz(x0, &no_arguments);
+    __ Sub(x0, x0, 1);
+    __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
+    __ B(&done);
+    __ Bind(&no_arguments);
+    __ Mov(x2, Smi::FromInt(0));
+    __ Bind(&done);
+  }
+
+  // 3. Make sure x2 is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(x2, &done_convert);
+    __ JumpIfObjectType(x2, x4, x4, HEAP_NUMBER_TYPE, &done_convert, eq);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(x6);
+      __ EnterBuiltinFrame(cp, x1, x6);
+      __ Push(x3);
+      __ Move(x0, x2);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Move(x2, x0);
+      __ Pop(x3);
+      __ LeaveBuiltinFrame(cp, x1, x6);
+      __ SmiUntag(x6);
+    }
+    __ Bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ Cmp(x1, x3);
+  __ B(ne, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
+  __ B(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(x6);
+    __ EnterBuiltinFrame(cp, x1, x6);
+    __ Push(x2);  // first argument
+    __ CallStub(&stub);
+    __ Pop(x2);
+    __ LeaveBuiltinFrame(cp, x1, x6);
+    __ SmiUntag(x6);
+  }
+  __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(x6);
+    __ Drop(1);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0                     : number of arguments
+  //  -- x1                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_StringConstructor");
+
+  // 1. Load the first argument into x0.
+  Label no_arguments;
+  {
+    __ Cbz(x0, &no_arguments);
+    __ Mov(x2, x0);  // Store argc in x2.
+    __ Sub(x0, x0, 1);
+    __ Ldr(x0, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
+  }
+
+  // 2a. At least one argument, return x0 if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(x0, &to_string);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CompareObjectType(x0, x3, x3, FIRST_NONSTRING_TYPE);
+    __ B(hi, &to_string);
+    __ B(eq, &symbol_descriptive_string);
+    __ b(&drop_frame_and_ret);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ Bind(&no_arguments);
+  {
+    __ LoadRoot(x0, Heap::kempty_stringRootIndex);
+    __ Drop(1);
+    __ Ret();
+  }
+
+  // 3a. Convert x0 to a string.
+  __ Bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(x2);
+    __ EnterBuiltinFrame(cp, x1, x2);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(cp, x1, x2);
+    __ SmiUntag(x2);
+  }
+  __ b(&drop_frame_and_ret);
+
+  // 3b. Convert symbol in x0 to a string.
+  __ Bind(&symbol_descriptive_string);
+  {
+    __ Drop(x2);
+    __ Drop(1);
+    __ Push(x0);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(x2);
+    __ Drop(1);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0                     : number of arguments
+  //  -- x1                     : constructor function
+  //  -- x3                     : new target
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_StringConstructor_ConstructStub");
+
+  // 1. Make sure we operate in the context of the called function.
+  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into x2.
+  {
+    Label no_arguments, done;
+    __ mov(x6, x0);  // Store argc in x6.
+    __ Cbz(x0, &no_arguments);
+    __ Sub(x0, x0, 1);
+    __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
+    __ B(&done);
+    __ Bind(&no_arguments);
+    __ LoadRoot(x2, Heap::kempty_stringRootIndex);
+    __ Bind(&done);
+  }
+
+  // 3. Make sure x2 is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(x2, &convert);
+    __ JumpIfObjectType(x2, x4, x4, FIRST_NONSTRING_TYPE, &done_convert, lo);
+    __ Bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(x6);
+      __ EnterBuiltinFrame(cp, x1, x6);
+      __ Push(x3);
+      __ Move(x0, x2);
+      __ CallStub(&stub);
+      __ Move(x2, x0);
+      __ Pop(x3);
+      __ LeaveBuiltinFrame(cp, x1, x6);
+      __ SmiUntag(x6);
+    }
+    __ Bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ Cmp(x1, x3);
+  __ B(ne, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
+  __ B(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(x6);
+    __ EnterBuiltinFrame(cp, x1, x6);
+    __ Push(x2);  // first argument
+    __ CallStub(&stub);
+    __ Pop(x2);
+    __ LeaveBuiltinFrame(cp, x1, x6);
+    __ SmiUntag(x6);
+  }
+  __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(x6);
+    __ Drop(1);
+    __ Ret();
+  }
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+  __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+  __ Br(x2);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- x0 : argument count (preserved for callee)
+  //  -- x1 : target function (preserved for callee)
+  //  -- x3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push a copy of the target function and the new target.
+    // Push another copy as a parameter to the runtime call.
+    __ SmiTag(x0);
+    __ Push(x0, x1, x3, x1);
+
+    __ CallRuntime(function_id, 1);
+    __ Move(x2, x0);
+
+    // Restore target function and new target.
+    __ Pop(x3, x1, x0);
+    __ SmiUntag(x0);
+  }
+
+  __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+  __ Br(x2);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere. However, not
+  // checking may delay installing ready functions, and always checking would be
+  // quite expensive. A good compromise is to first check against stack limit as
+  // a cue for an interrupt signal.
+  Label ok;
+  __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+  __ B(hs, &ok);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ Bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- x0     : number of arguments
+  //  -- x1     : constructor function
+  //  -- x2     : allocation site or undefined
+  //  -- x3     : new target
+  //  -- lr     : return address
+  //  -- cp     : context pointer
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the four incoming parameters on the stack.
+    Register argc = x0;
+    Register constructor = x1;
+    Register allocation_site = x2;
+    Register new_target = x3;
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(allocation_site, x10);
+    __ Push(cp);
+    __ SmiTag(argc);
+    __ Push(allocation_site, argc);
+
+    if (create_implicit_receiver) {
+      // Allocate the new receiver object.
+      __ Push(constructor, new_target);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ Mov(x4, x0);
+      __ Pop(new_target, constructor);
+
+      // ----------- S t a t e -------------
+      //  -- x1: constructor function
+      //  -- x3: new target
+      //  -- x4: newly allocated object
+      // -----------------------------------
+
+      // Reload the number of arguments from the stack.
+      // Set it up in x0 for the function call below.
+      // jssp[0]: number of arguments (smi-tagged)
+      __ Peek(argc, 0);  // Load number of arguments.
+    }
+
+    __ SmiUntag(argc);
+
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ Push(x4, x4);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
+
+    // Set up pointer to last argument.
+    __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+    // Copy arguments and receiver to the expression stack.
+    // Copy 2 values every loop to use ldp/stp.
+    // x0: number of arguments
+    // x1: constructor function
+    // x2: address of last argument (caller sp)
+    // x3: new target
+    // jssp[0]: receiver
+    // jssp[1]: receiver
+    // jssp[2]: number of arguments (smi-tagged)
+    // Compute the start address of the copy in x3.
+    __ Add(x4, x2, Operand(argc, LSL, kPointerSizeLog2));
+    Label loop, entry, done_copying_arguments;
+    __ B(&entry);
+    __ Bind(&loop);
+    __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
+    __ Push(x11, x10);
+    __ Bind(&entry);
+    __ Cmp(x4, x2);
+    __ B(gt, &loop);
+    // Because we copied values 2 by 2 we may have copied one extra value.
+    // Drop it if that is the case.
+    __ B(eq, &done_copying_arguments);
+    __ Drop(1);
+    __ Bind(&done_copying_arguments);
+
+    // Call the function.
+    // x0: number of arguments
+    // x1: constructor function
+    // x3: new target
+    ParameterCount actual(argc);
+    __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore the context from the frame.
+    // x0: result
+    // jssp[0]: receiver
+    // jssp[1]: number of arguments (smi-tagged)
+    __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      // x0: result
+      // jssp[0]: receiver (newly allocated object)
+      // jssp[1]: number of arguments (smi-tagged)
+      __ JumpIfSmi(x0, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ JumpIfObjectType(x0, x1, x3, FIRST_JS_RECEIVER_TYPE, &exit, ge);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ Bind(&use_receiver);
+      __ Peek(x0, 0);
+
+      // Remove the receiver from the stack, remove caller arguments, and
+      // return.
+      __ Bind(&exit);
+      // x0: result
+      // jssp[0]: receiver (newly allocated object)
+      // jssp[1]: number of arguments (smi-tagged)
+      __ Peek(x1, 1 * kXRegSize);
+    } else {
+      __ Peek(x1, 0);
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(x0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ Bind(&dont_throw);
+  }
+
+  __ DropBySMI(x1);
+  __ Drop(1);
+  if (create_implicit_receiver) {
+    __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+  }
+  __ Ret();
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ Push(x1);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the value to pass to the generator
+  //  -- x1 : the JSGeneratorObject to resume
+  //  -- x2 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(x1);
+
+  // Store input value into generator object.
+  __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ Ldr(cp, FieldMemOperand(x1, JSGeneratorObject::kContextOffset));
+  __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ Mov(x10, Operand(last_step_action));
+  __ Ldrsb(x10, MemOperand(x10));
+  __ CompareAndBranch(x10, Operand(StepIn), ge, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ Mov(x10, Operand(debug_suspended_generator));
+  __ Ldr(x10, MemOperand(x10));
+  __ CompareAndBranch(x10, Operand(x1), eq,
+                      &prepare_step_in_suspended_generator);
+  __ Bind(&stepping_prepared);
+
+  // Push receiver.
+  __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
+  __ Push(x5);
+
+  // ----------- S t a t e -------------
+  //  -- x1      : the JSGeneratorObject to resume
+  //  -- x2      : the resume mode (tagged)
+  //  -- x4      : generator function
+  //  -- cp      : generator context
+  //  -- lr      : return address
+  //  -- jssp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(w10,
+         FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
+  __ PushMultipleTimes(x11, w10);
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
+  __ B(ne, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldr(w0, FieldMemOperand(
+                   x0, SharedFunctionInfo::kFormalParameterCountOffset));
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(x3, x1);
+    __ Move(x1, x4);
+    __ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+    __ Jump(x5);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(lr, fp);
+    __ Move(fp, jssp);
+    __ Push(cp, x4);
+
+    // Restore the operand stack.
+    __ Ldr(x0, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
+    __ Ldr(w3, UntagSmiFieldMemOperand(x0, FixedArray::kLengthOffset));
+    __ Add(x0, x0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ Add(x3, x0, Operand(x3, LSL, kPointerSizeLog2));
+    {
+      Label done_loop, loop;
+      __ Bind(&loop);
+      __ Cmp(x0, x3);
+      __ B(eq, &done_loop);
+      __ Ldr(x10, MemOperand(x0, kPointerSize, PostIndex));
+      __ Push(x10);
+      __ B(&loop);
+      __ Bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+    __ Str(x10, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldr(x10, FieldMemOperand(x10, SharedFunctionInfo::kCodeOffset));
+    __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
+    __ Ldrsw(x11, UntagSmiFieldMemOperand(
+                      x1, JSGeneratorObject::kContinuationOffset));
+    __ Add(x10, x10, x11);
+    __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+    __ Str(x12, FieldMemOperand(x1, JSGeneratorObject::kContinuationOffset));
+    __ Move(x0, x1);  // Continuation expects generator object in x0.
+    __ Br(x10);
+  }
+
+  __ Bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(x1, x2, x4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(x2, x1);
+    __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ B(&stepping_prepared);
+
+  __ Bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(x1, x2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(x2, x1);
+    __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ B(&stepping_prepared);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers x10, x15; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+                                        IsTagged argc_is_tagged) {
+  // Check the stack for overflow.
+  // We are not trying to catch interruptions (e.g. debug break and
+  // preemption) here, so the "real stack limit" is checked.
+  Label enough_stack_space;
+  __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+  // Make x10 the space we have left. The stack might already be overflowed
+  // here which will cause x10 to become negative.
+  // TODO(jbramley): Check that the stack usage here is safe.
+  __ Sub(x10, jssp, x10);
+  // Check if the arguments will overflow the stack.
+  if (argc_is_tagged == kArgcIsSmiTagged) {
+    __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
+  } else {
+    DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+    __ Cmp(x10, Operand(argc, LSL, kPointerSizeLog2));
+  }
+  __ B(gt, &enough_stack_space);
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+  // We should never return from the APPLY_OVERFLOW builtin.
+  if (__ emit_debug_code()) {
+    __ Unreachable();
+  }
+
+  __ Bind(&enough_stack_space);
+}
+
+// Input:
+//   x0: new.target.
+//   x1: function.
+//   x2: receiver.
+//   x3: argc.
+//   x4: argv.
+// Output:
+//   x0: result.
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from JSEntryStub::GenerateBody().
+  Register new_target = x0;
+  Register function = x1;
+  Register receiver = x2;
+  Register argc = x3;
+  Register argv = x4;
+  Register scratch = x10;
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  {
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    __ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+                                              masm->isolate())));
+    __ Ldr(cp, MemOperand(scratch));
+
+    __ InitializeRootRegister();
+
+    // Push the function and the receiver onto the stack.
+    __ Push(function, receiver);
+
+    // Check if we have enough stack space to push all arguments.
+    // Expects argument count in eax. Clobbers ecx, edx, edi.
+    Generate_CheckStackOverflow(masm, argc, kArgcIsUntaggedInt);
+
+    // Copy arguments to the stack in a loop, in reverse order.
+    // x3: argc.
+    // x4: argv.
+    Label loop, entry;
+    // Compute the copy end address.
+    __ Add(scratch, argv, Operand(argc, LSL, kPointerSizeLog2));
+
+    __ B(&entry);
+    __ Bind(&loop);
+    __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+    __ Ldr(x12, MemOperand(x11));  // Dereference the handle.
+    __ Push(x12);                  // Push the argument.
+    __ Bind(&entry);
+    __ Cmp(scratch, argv);
+    __ B(ne, &loop);
+
+    __ Mov(scratch, argc);
+    __ Mov(argc, new_target);
+    __ Mov(new_target, scratch);
+    // x0: argc.
+    // x3: new.target.
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    // The original values have been saved in JSEntryStub::GenerateBody().
+    __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
+    __ Mov(x20, x19);
+    __ Mov(x21, x19);
+    __ Mov(x22, x19);
+    __ Mov(x23, x19);
+    __ Mov(x24, x19);
+    __ Mov(x25, x19);
+    // Don't initialize the reserved registers.
+    // x26 : root register (root).
+    // x27 : context pointer (cp).
+    // x28 : JS stack pointer (jssp).
+    // x29 : frame pointer (fp).
+
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the JS internal frame and remove the parameters (except function),
+    // and return.
+  }
+
+  // Result is in x0. Return.
+  __ Ret();
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ ldr(args_count,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ Ldr(args_count.W(),
+         FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ Drop(args_count, 1);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   - x1: the JS function object being called.
+//   - x3: the new target
+//   - cp: our context.
+//   - fp: our caller's frame pointer.
+//   - jssp: stack pointer.
+//   - lr: return address.
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ Push(lr, fp, cp, x1);
+  __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  DCHECK(!debug_info.is(x0));
+  __ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
+  __ Cmp(debug_info, Operand(DebugInfo::uninitialized()));
+  __ B(ne, &load_debug_bytecode_array);
+  __ Ldr(kInterpreterBytecodeArrayRegister,
+         FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+  __ Bind(&bytecode_array_loaded);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ Ldr(x0, FieldMemOperand(x0, SharedFunctionInfo::kCodeOffset));
+  __ Cmp(x0, Operand(masm->CodeObject()));  // Self-reference to this code.
+  __ B(ne, &switch_to_different_code_kind);
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
+                    kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Load the initial bytecode offset.
+  __ Mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(x0, kInterpreterBytecodeOffsetRegister);
+  __ Push(x3, kInterpreterBytecodeArrayRegister, x0);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size from the BytecodeArray object.
+    __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    DCHECK(jssp.Is(__ StackPointer()));
+    __ Sub(x10, jssp, Operand(x11));
+    __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
+    __ B(hs, &ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ Bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    // Note: there should always be at least one stack slot for the return
+    // register in the register file.
+    Label loop_header;
+    __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+    // TODO(rmcilroy): Ensure we always have an even number of registers to
+    // allow stack to be 16 bit aligned (and remove need for jssp).
+    __ Lsr(x11, x11, kPointerSizeLog2);
+    __ PushMultipleTimes(x10, x11);
+    __ Bind(&loop_header);
+  }
+
+  // Load accumulator and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ Mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
+                         kInterpreterBytecodeOffsetRegister));
+  __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
+  __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
+  __ Call(ip0);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in x0.
+  LeaveInterpreterFrame(masm, x2);
+  __ Ret();
+
+  // Load debug copy of the bytecode array.
+  __ Bind(&load_debug_bytecode_array);
+  __ Ldr(kInterpreterBytecodeArrayRegister,
+         FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ B(&bytecode_array_loaded);
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ Ldr(x7, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x7, FieldMemOperand(x7, SharedFunctionInfo::kCodeOffset));
+  __ Add(x7, x7, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(x1, x7, x5);
+  __ Jump(x7);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ ldr(kContextRegister,
+         MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, x2);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(x0);
+
+    // Push function as argument and compile for baseline.
+    __ push(x1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(x0);
+  }
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x2 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- x1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ add(x3, x0, Operand(1));  // Add one for receiver.
+  __ lsl(x3, x3, kPointerSizeLog2);
+  __ sub(x4, x2, x3);
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Mov(x5, jssp);
+  __ Claim(x3, 1);
+  __ B(&loop_check);
+  __ Bind(&loop_header);
+  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+  __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
+  __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
+  __ Bind(&loop_check);
+  __ Cmp(x2, x4);
+  __ B(gt, &loop_header);
+
+  // Call the target.
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- x0 : argument count (not including receiver)
+  // -- x3 : new target
+  // -- x1 : constructor to call
+  // -- x2 : address of the first argument
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ add(x5, x0, Operand(1));  // Add one for receiver (to be constructed).
+  __ lsl(x5, x5, kPointerSizeLog2);
+
+  // Set stack pointer and where to stop.
+  __ Mov(x6, jssp);
+  __ Claim(x5, 1);
+  __ sub(x4, x6, x5);
+
+  // Push a slot for the receiver.
+  __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+
+  Label loop_header, loop_check;
+  // Push the arguments.
+  __ B(&loop_check);
+  __ Bind(&loop_header);
+  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+  __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
+  __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
+  __ Bind(&loop_check);
+  __ Cmp(x6, x4);
+  __ B(gt, &loop_header);
+
+  // Call the constructor with x0, x1, and x3 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ LoadObject(x1, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
+                         Code::kHeaderSize - kHeapObjectTag));
+
+  // Initialize the dispatch table register.
+  __ Mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ Ldr(kInterpreterBytecodeArrayRegister,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
+                    kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ Ldr(kInterpreterBytecodeOffsetRegister,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
+                         kInterpreterBytecodeOffsetRegister));
+  __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
+  __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
+  __ Jump(ip0);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : argument count (preserved for callee)
+  //  -- x3 : new target (preserved for callee)
+  //  -- x1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = x1;
+  Register map = x13;
+  Register index = x2;
+  __ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(map,
+         FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
+  __ Cmp(index, Operand(2));
+  __ B(lt, &gotta_call_runtime);
+
+  // Find literals.
+  // x3  : native context
+  // x2  : length / index
+  // x13 : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = x4;
+  __ Ldr(native_context, NativeContextMemOperand());
+
+  __ Bind(&loop_top);
+  Register temp = x5;
+  Register array_pointer = x6;
+
+  // Does the native context match?
+  __ Add(array_pointer, map, Operand(index, LSL, kPointerSizeLog2));
+  __ Ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousContext));
+  __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ Cmp(temp, native_context);
+  __ B(ne, &loop_bottom);
+  // OSR id set to none?
+  __ Ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
+  __ B(ne, &loop_bottom);
+  // Literals available?
+  __ Ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = x7;
+  __ Ldr(entry,
+         FieldMemOperand(array_pointer,
+                         SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ Bind(&install_optimized_code_and_tailcall);
+  __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, x5);
+
+  // Link the closure into the optimized function list.
+  // x7 : code entry
+  // x4 : native context
+  // x1 : closure
+  __ Ldr(x8,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x13,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ Str(closure,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ Mov(x5, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, x5, x13,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Jump(entry);
+
+  __ Bind(&loop_bottom);
+  __ Sub(index, index, Operand(SharedFunctionInfo::kEntryLength));
+  __ Cmp(index, Operand(1));
+  __ B(gt, &loop_top);
+
+  // We found neither literals nor code.
+  __ B(&gotta_call_runtime);
+
+  __ Bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                         SharedFunctionInfo::kSharedCodeIndex));
+  __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ B(&install_optimized_code_and_tailcall);
+
+  __ Bind(&try_shared);
+  // Is the full code valid?
+  __ Ldr(entry,
+         FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ and_(x5, x5, Operand(Code::KindField::kMask));
+  __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
+  __ Cmp(x5, Operand(Code::BUILTIN));
+  __ B(eq, &gotta_call_runtime);
+  // Yes, install the full code.
+  __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, x5);
+  __ Jump(entry);
+
+  __ Bind(&gotta_call_runtime);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : argument count (preserved for callee)
+  //  -- x1 : new target (preserved for callee)
+  //  -- x3 : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ Move(x4, x0);
+    // Push a copy of the target function and the new target.
+    __ SmiTag(x0);
+    // Push another copy as a parameter to the runtime call.
+    __ Push(x0, x1, x3, x1);
+
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ cmp(x4, Operand(j));
+        __ B(ne, &over);
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ ldr(x4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+                                      i * kPointerSize));
+        __ push(x4);
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(x0, &failed);
+
+    __ Drop(2);
+    __ pop(x4);
+    __ SmiUntag(x4);
+    scope.GenerateLeaveFrame();
+
+    __ add(x4, x4, Operand(1));
+    __ Drop(x4);
+    __ Ret();
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ Pop(x3, x1, x0);
+    __ SmiUntag(x0);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code fast, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // The following caller-saved registers must be saved and restored when
+  // calling through to the runtime:
+  //   x0 - The address from which to resume execution.
+  //   x1 - isolate
+  //   x3 - new target
+  //   lr - The return address for the JSFunction itself. It has not yet been
+  //        preserved on the stack because the frame setup code was replaced
+  //        with a call to this stub, to handle code ageing.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(x0, x1, x3, fp, lr);
+    __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+    __ CallCFunction(
+        ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+    __ Pop(lr, fp, x3, x1, x0);
+  }
+
+  // The calling function has been made young again, so return to execute the
+  // real frame set-up code.
+  __ Br(x0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+
+  // The following caller-saved registers must be saved and restored when
+  // calling through to the runtime:
+  //   x0 - The address from which to resume execution.
+  //   x1 - isolate
+  //   x3 - new target
+  //   lr - The return address for the JSFunction itself. It has not yet been
+  //        preserved on the stack because the frame setup code was replaced
+  //        with a call to this stub, to handle code ageing.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(x0, x1, x3, fp, lr);
+    __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+    __ CallCFunction(
+        ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+        2);
+    __ Pop(lr, fp, x3, x1, x0);
+
+    // Perform prologue operations usually performed by the young code stub.
+    __ EmitFrameSetupForCodeAgePatching(masm);
+  }
+
+  // Jump to point after the code-age stub.
+  __ Add(x0, x0, kNoCodeAgeSequenceLength);
+  __ Br(x0);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    // TODO(jbramley): Is it correct (and appropriate) to use safepoint
+    // registers here? According to the comment above, we should only need to
+    // preserve the registers with parameters.
+    __ PushXRegList(kSafepointSavedRegisters);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ PopXRegList(kSafepointSavedRegisters);
+  }
+
+  // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
+  __ Drop(1);
+
+  // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
+  // into lr before it jumps here.
+  __ Br(lr);
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the deoptimization type to the runtime system.
+    __ Mov(x0, Smi::FromInt(static_cast<int>(type)));
+    __ Push(x0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+  }
+
+  // Get the full codegen state from the stack and untag it.
+  Register state = x6;
+  __ Peek(state, 0);
+  __ SmiUntag(state);
+
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ CompareAndBranch(state,
+                      static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS),
+                      ne, &with_tos_register);
+  __ Drop(1);  // Remove state.
+  __ Ret();
+
+  __ Bind(&with_tos_register);
+  // Reload TOS register.
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), x0.code());
+  __ Peek(x0, kPointerSize);
+  __ CompareAndBranch(state,
+                      static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER),
+                      ne, &unknown_state);
+  __ Drop(2);  // Remove state and TOS.
+  __ Ret();
+
+  __ Bind(&unknown_state);
+  __ Abort(kInvalidFullCodegenState);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Register scratch0, Register scratch1,
+                                    Register scratch2,
+                                    Label* receiver_check_failed) {
+  Register signature = scratch0;
+  Register map = scratch1;
+  Register constructor = scratch2;
+
+  // If there is no signature, return the holder.
+  __ Ldr(signature, FieldMemOperand(function_template_info,
+                                    FunctionTemplateInfo::kSignatureOffset));
+  __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+  Label receiver_check_passed;
+  __ B(eq, &receiver_check_passed);
+
+  // Walk the prototype chain.
+  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ Bind(&prototype_loop_start);
+
+  // Get the constructor, if any
+  __ GetMapConstructor(constructor, map, x16, x16);
+  __ cmp(x16, Operand(JS_FUNCTION_TYPE));
+  Label next_prototype;
+  __ B(ne, &next_prototype);
+  Register type = constructor;
+  __ Ldr(type,
+         FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ Bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ Cmp(signature, type);
+  __ B(eq, &receiver_check_passed);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype);
+  __ CompareObjectType(type, x16, x17, FUNCTION_TEMPLATE_INFO_TYPE);
+  __ B(ne, &next_prototype);
+
+  // Otherwise load the parent function template and iterate.
+  __ Ldr(type,
+         FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+  __ B(&function_template_loop);
+
+  // Load the next prototype.
+  __ Bind(&next_prototype);
+  __ Ldr(x16, FieldMemOperand(map, Map::kBitField3Offset));
+  __ Tst(x16, Operand(Map::HasHiddenPrototype::kMask));
+  __ B(eq, receiver_check_failed);
+  __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ B(&prototype_loop_start);
+
+  __ Bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0                 : number of arguments excluding receiver
+  //  -- x1                 : callee
+  //  -- lr                 : return address
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[8 * (argc - 1)] : first argument
+  //  -- sp[8 * argc]       : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
+  CompatibleReceiverCheck(masm, x2, x3, x4, x5, x6, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ Ldr(x4, FieldMemOperand(x3, FunctionTemplateInfo::kCallCodeOffset));
+  __ Ldr(x4, FieldMemOperand(x4, CallHandlerInfo::kFastHandlerOffset));
+  __ Add(x4, x4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(x4);
+
+  // Compatible receiver check failed: throw an Illegal Invocation exception.
+  __ Bind(&receiver_check_failed);
+  // Drop the arguments (including the receiver)
+  __ add(x0, x0, Operand(1));
+  __ Drop(x0);
+  __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ Push(x0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  // If the code object is null, just return to the caller.
+  Label skip;
+  __ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
+  __ Ret();
+
+  __ Bind(&skip);
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ LeaveFrame(StackFrame::STUB);
+  }
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+  __ Ldrsw(w1, UntagSmiFieldMemOperand(
+                   x1, FixedArray::OffsetOfElementAt(
+                           DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+  __ Add(x0, x0, x1);
+  __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
+
+  // And "return" to the OSR entry point of the function.
+  __ Ret();
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- x0      : number of arguments
+  //  -- x1      : function
+  //  -- cp      : context
+  //  -- lr      : return address
+  //  -- jssp[0] : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_DatePrototype_GetField");
+
+  // 1. Pop receiver into x0 and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ Pop(x0);
+    __ JumpIfSmi(x0, &receiver_not_date);
+    __ JumpIfNotObjectType(x0, x2, x3, JS_DATE_TYPE, &receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ Ldr(x0, FieldMemOperand(x0, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ Mov(x1, ExternalReference::date_cache_stamp(masm->isolate()));
+      __ Ldr(x1, MemOperand(x1));
+      __ Ldr(x2, FieldMemOperand(x0, JSDate::kCacheStampOffset));
+      __ Cmp(x1, x2);
+      __ B(ne, &stamp_mismatch);
+      __ Ldr(x0, FieldMemOperand(
+                     x0, JSDate::kValueOffset + field_index * kPointerSize));
+      __ Ret();
+      __ Bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Mov(x1, Smi::FromInt(field_index));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ Ret();
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ Bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(x0);
+    __ Mov(x0, Smi::FromInt(0));
+    __ EnterBuiltinFrame(cp, x1, x0);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0       : argc
+  //  -- jssp[0]  : argArray (if argc == 2)
+  //  -- jssp[8]  : thisArg  (if argc >= 1)
+  //  -- jssp[16] : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
+
+  Register argc = x0;
+  Register arg_array = x0;
+  Register receiver = x1;
+  Register this_arg = x2;
+  Register undefined_value = x3;
+  Register null_value = x4;
+
+  __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+
+  // 1. Load receiver into x1, argArray into x0 (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    // Claim (2 - argc) dummy arguments from the stack, to put the stack in a
+    // consistent state for a simple pop operation.
+    __ Claim(2);
+    __ Drop(argc);
+
+    // ----------- S t a t e -------------
+    //  -- x0       : argc
+    //  -- jssp[0]  : argArray (dummy value if argc <= 1)
+    //  -- jssp[8]  : thisArg  (dummy value if argc == 0)
+    //  -- jssp[16] : receiver
+    // -----------------------------------
+    __ Cmp(argc, 1);
+    __ Pop(arg_array, this_arg);               // Overwrites argc.
+    __ CmovX(this_arg, undefined_value, lo);   // undefined if argc == 0.
+    __ CmovX(arg_array, undefined_value, ls);  // undefined if argc <= 1.
+
+    __ Peek(receiver, 0);
+    __ Poke(this_arg, 0);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- x0      : argArray
+  //  -- x1      : receiver
+  //  -- x3      : undefined root value
+  //  -- jssp[0] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(receiver, &receiver_not_callable);
+  __ Ldr(x10, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Ldrb(w10, FieldMemOperand(x10, Map::kBitFieldOffset));
+  __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable,
+                             &receiver_not_callable);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ Cmp(arg_array, null_value);
+  __ Ccmp(arg_array, undefined_value, ZFlag, ne);
+  __ B(eq, &no_arguments);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target in x3).
+  DCHECK(undefined_value.Is(x3));
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ Bind(&no_arguments);
+  {
+    __ Mov(x0, 0);
+    DCHECK(receiver.Is(x1));
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ Bind(&receiver_not_callable);
+  {
+    __ Poke(receiver, 0);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  Register argc = x0;
+  Register function = x1;
+  Register scratch1 = x10;
+  Register scratch2 = x11;
+
+  ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
+
+  // 1. Make sure we have at least one argument.
+  {
+    Label done;
+    __ Cbnz(argc, &done);
+    __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+    __ Push(scratch1);
+    __ Mov(argc, 1);
+    __ Bind(&done);
+  }
+
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is jssp.
+    __ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
+    __ Sub(scratch1, scratch2, kPointerSize);
+
+    __ Bind(&loop);
+    __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
+    __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
+    __ Cmp(scratch1, jssp);
+    __ B(ge, &loop);
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ Sub(argc, argc, 1);
+    __ Drop(1);
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0       : argc
+  //  -- jssp[0]  : argumentsList (if argc == 3)
+  //  -- jssp[8]  : thisArgument  (if argc >= 2)
+  //  -- jssp[16] : target        (if argc >= 1)
+  //  -- jssp[24] : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_ReflectApply");
+
+  Register argc = x0;
+  Register arguments_list = x0;
+  Register target = x1;
+  Register this_argument = x2;
+  Register undefined_value = x3;
+
+  __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
+  // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
+    // consistent state for a simple pop operation.
+    __ Claim(3);
+    __ Drop(argc);
+
+    // ----------- S t a t e -------------
+    //  -- x0       : argc
+    //  -- jssp[0]  : argumentsList (dummy value if argc <= 2)
+    //  -- jssp[8]  : thisArgument  (dummy value if argc <= 1)
+    //  -- jssp[16] : target        (dummy value if argc == 0)
+    //  -- jssp[24] : receiver
+    // -----------------------------------
+    __ Adds(x10, argc, 0);  // Preserve argc, and set the Z flag if it is zero.
+    __ Pop(arguments_list, this_argument, target);  // Overwrites argc.
+    __ CmovX(target, undefined_value, eq);          // undefined if argc == 0.
+    __ Cmp(x10, 2);
+    __ CmovX(this_argument, undefined_value, lo);   // undefined if argc <= 1.
+    __ CmovX(arguments_list, undefined_value, ls);  // undefined if argc <= 2.
+
+    __ Poke(this_argument, 0);  // Overwrite receiver.
+  }
+
+  // ----------- S t a t e -------------
+  //  -- x0      : argumentsList
+  //  -- x1      : target
+  //  -- jssp[0] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(target, &target_not_callable);
+  __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
+  __ Ldr(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+  __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable, &target_not_callable);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target in x3).
+  DCHECK(undefined_value.Is(x3));
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ Bind(&target_not_callable);
+  {
+    __ Poke(target, 0);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0       : argc
+  //  -- jssp[0]  : new.target (optional)
+  //  -- jssp[8]  : argumentsList
+  //  -- jssp[16] : target
+  //  -- jssp[24] : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_ReflectConstruct");
+
+  Register argc = x0;
+  Register arguments_list = x0;
+  Register target = x1;
+  Register new_target = x3;
+  Register undefined_value = x4;
+
+  __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
+  // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+  // new.target into x3 (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
+    // consistent state for a simple pop operation.
+    __ Claim(3);
+    __ Drop(argc);
+
+    // ----------- S t a t e -------------
+    //  -- x0       : argc
+    //  -- jssp[0]  : new.target    (dummy value if argc <= 2)
+    //  -- jssp[8]  : argumentsList (dummy value if argc <= 1)
+    //  -- jssp[16] : target        (dummy value if argc == 0)
+    //  -- jssp[24] : receiver
+    // -----------------------------------
+    __ Adds(x10, argc, 0);  // Preserve argc, and set the Z flag if it is zero.
+    __ Pop(new_target, arguments_list, target);  // Overwrites argc.
+    __ CmovX(target, undefined_value, eq);       // undefined if argc == 0.
+    __ Cmp(x10, 2);
+    __ CmovX(arguments_list, undefined_value, lo);  // undefined if argc <= 1.
+    __ CmovX(new_target, target, ls);               // target if argc <= 2.
+
+    __ Poke(undefined_value, 0);  // Overwrite receiver.
+  }
+
+  // ----------- S t a t e -------------
+  //  -- x0      : argumentsList
+  //  -- x1      : target
+  //  -- x3      : new.target
+  //  -- jssp[0] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(target, &target_not_constructor);
+  __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
+  __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+  __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
+                             &target_not_constructor);
+
+  // 3. Make sure the new.target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(new_target, &new_target_not_constructor);
+  __ Ldr(x10, FieldMemOperand(new_target, HeapObject::kMapOffset));
+  __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+  __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
+                             &new_target_not_constructor);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ Bind(&target_not_constructor);
+  {
+    __ Poke(target, 0);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ Bind(&new_target_not_constructor);
+  {
+    __ Poke(new_target, 0);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- x0 : actual number of arguments
+  //  -- x1 : function (passed through to callee)
+  //  -- x2 : expected number of arguments
+  //  -- x3 : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow.
+  // We are not trying to catch interruptions (e.g. debug break and
+  // preemption) here, so the "real stack limit" is checked.
+  Label enough_stack_space;
+  __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+  // Make x10 the space we have left. The stack might already be overflowed
+  // here which will cause x10 to become negative.
+  __ Sub(x10, jssp, x10);
+  // Check if the arguments will overflow the stack.
+  __ Cmp(x10, Operand(x2, LSL, kPointerSizeLog2));
+  __ B(le, stack_overflow);
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ SmiTag(x10, x0);
+  __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Push(lr, fp);
+  __ Push(x11, x1, x10);
+  __ Add(fp, jssp,
+         StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then drop the parameters and the receiver.
+  __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                               kPointerSize)));
+  __ Mov(jssp, fp);
+  __ Pop(fp, lr);
+  __ DropBySMI(x10, kXRegSize);
+  __ Drop(1);
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0      : argumentsList
+  //  -- x1      : target
+  //  -- x3      : new.target (checked to be constructor or undefined)
+  //  -- jssp[0] : thisArgument
+  // -----------------------------------
+
+  Register arguments_list = x0;
+  Register target = x1;
+  Register new_target = x3;
+
+  Register args = x0;
+  Register len = x2;
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(arguments_list, &create_runtime);
+
+    // Load native context.
+    Register native_context = x4;
+    __ Ldr(native_context, NativeContextMemOperand());
+
+    // Load the map of argumentsList.
+    Register arguments_list_map = x2;
+    __ Ldr(arguments_list_map,
+           FieldMemOperand(arguments_list, HeapObject::kMapOffset));
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ Ldr(x10, ContextMemOperand(native_context,
+                                  Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ Ldr(x11, ContextMemOperand(native_context,
+                                  Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ Cmp(arguments_list_map, x10);
+    __ Ccmp(arguments_list_map, x11, ZFlag, ne);
+    __ B(eq, &create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CompareInstanceType(arguments_list_map, native_context, JS_ARRAY_TYPE);
+    __ B(eq, &create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ Bind(&create_runtime);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(target, new_target, arguments_list);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(new_target, target);
+      __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
+                                            FixedArray::kLengthOffset));
+    }
+    __ B(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ Bind(&create_arguments);
+    __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
+                                          JSArgumentsObject::kLengthOffset));
+    __ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
+    __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+    __ CompareAndBranch(len, x11, ne, &create_runtime);
+    __ Mov(args, x10);
+    __ B(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ Bind(&create_array);
+    __ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(x10);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    // Branch for anything that's not FAST_{SMI_}ELEMENTS.
+    __ TestAndBranchIfAnySet(x10, ~FAST_ELEMENTS, &create_runtime);
+    __ Ldrsw(len,
+             UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+    __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
+
+    __ Bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+    // Make x10 the space we have left. The stack might already be overflowed
+    // here which will cause x10 to become negative.
+    __ Sub(x10, masm->StackPointer(), x10);
+    // Check if the arguments will overflow the stack.
+    __ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
+    __ B(gt, &done);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ Bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- x0      : args (a FixedArray built from argumentsList)
+  //  -- x1      : target
+  //  -- x2      : len (number of elements to push from args)
+  //  -- x3      : new.target (checked to be constructor or undefined)
+  //  -- jssp[0] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    Label done, loop;
+    Register src = x4;
+
+    __ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+    __ Mov(x0, len);  // The 'len' argument for Call() or Construct().
+    __ Cbz(len, &done);
+    __ Claim(len);
+    __ Bind(&loop);
+    __ Sub(len, len, 1);
+    __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
+    __ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
+    __ Cbnz(len, &loop);
+    __ Bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- x0              : argument count (len)
+  //  -- x1              : target
+  //  -- x3              : new.target (checked to be constructor or undefined)
+  //  -- jssp[0]         : args[len-1]
+  //  -- jssp[8]         : args[len-2]
+  //      ...            :  ...
+  //  -- jssp[8*(len-2)] : args[1]
+  //  -- jssp[8*(len-1)] : args[0]
+  // -----------------------------------
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(new_target, Heap::kUndefinedValueRootIndex);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ Mov(scratch1, Operand(is_tail_call_elimination_enabled));
+  __ Ldrb(scratch1, MemOperand(scratch1));
+  __ Cmp(scratch1, Operand(0));
+  __ B(eq, &done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ Ldr(scratch3,
+           MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+    __ B(ne, &no_interpreter_frame);
+    __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(scratch3,
+         MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ B(ne, &no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ Ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ B(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ Ldr(scratch1,
+         FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldrsw(caller_args_count_reg,
+           FieldMemOperand(scratch1,
+                           SharedFunctionInfo::kFormalParameterCountOffset));
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  ASM_LOCATION("Builtins::Generate_CallFunction");
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(x1);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that function is not a "classConstructor".
+  Label class_constructor;
+  __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+  __ TestAndBranchIfAnySet(
+      w3, (1 << SharedFunctionInfo::kIsDefaultConstructor) |
+              (1 << SharedFunctionInfo::kIsSubclassConstructor) |
+              (1 << SharedFunctionInfo::kIsBaseConstructor),
+      &class_constructor);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ TestAndBranchIfAnySet(w3,
+                           (1 << SharedFunctionInfo::kNative) |
+                               (1 << SharedFunctionInfo::kStrictModeFunction),
+                           &done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- x0 : the number of arguments (not including the receiver)
+    //  -- x1 : the function to call (checked to be a JSFunction)
+    //  -- x2 : the shared function info.
+    //  -- cp : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(x3);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
+      __ JumpIfSmi(x3, &convert_to_object);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
+      __ B(hs, &done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy);
+        __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
+        __ Bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(x3);
+        }
+        __ B(&convert_receiver);
+      }
+      __ Bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(x0);
+        __ Push(x0, x1);
+        __ Mov(x0, x3);
+        __ Push(cp);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(cp);
+        __ Mov(x3, x0);
+        __ Pop(x1, x0);
+        __ SmiUntag(x0);
+      }
+      __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+      __ Bind(&convert_receiver);
+    }
+    __ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
+  }
+  __ Bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the function to call (checked to be a JSFunction)
+  //  -- x2 : the shared function info.
+  //  -- cp : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, x0, x3, x4, x5);
+  }
+
+  __ Ldrsw(
+      x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
+  ParameterCount actual(x0);
+  ParameterCount expected(x2);
+  __ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ Push(x1);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : target (checked to be a JSBoundFunction)
+  //  -- x3 : new.target (only in case of [[Construct]])
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into x2 and length of that into x4.
+  Label no_bound_arguments;
+  __ Ldr(x2, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+  __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
+  __ Cmp(x4, 0);
+  __ B(eq, &no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- x0 : the number of arguments (not including the receiver)
+    //  -- x1 : target (checked to be a JSBoundFunction)
+    //  -- x2 : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- x3 : new.target (only in case of [[Construct]])
+    //  -- x4 : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ Claim(x4);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+      __ B(gt, &done);  // Signed comparison.
+      // Restore the stack pointer.
+      __ Drop(x4);
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ Bind(&done);
+    }
+
+    // Relocate arguments down the stack.
+    {
+      Label loop, done_loop;
+      __ Mov(x5, 0);
+      __ Bind(&loop);
+      __ Cmp(x5, x0);
+      __ B(gt, &done_loop);
+      __ Peek(x10, Operand(x4, LSL, kPointerSizeLog2));
+      __ Poke(x10, Operand(x5, LSL, kPointerSizeLog2));
+      __ Add(x4, x4, 1);
+      __ Add(x5, x5, 1);
+      __ B(&loop);
+      __ Bind(&done_loop);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
+      __ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag);
+      __ Bind(&loop);
+      __ Sub(x4, x4, 1);
+      __ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2));
+      __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+      __ Add(x0, x0, 1);
+      __ Cmp(x4, 0);
+      __ B(gt, &loop);
+    }
+  }
+  __ Bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(x1);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, x0, x3, x4, x5);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
+  __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ Mov(x10,
+         ExternalReference(Builtins::kCall_ReceiverIsAny, masm->isolate()));
+  __ Ldr(x11, MemOperand(x10));
+  __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+  __ Br(x12);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(x1, &non_callable);
+  __ Bind(&non_smi);
+  __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+  __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
+  __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+
+  __ Cmp(x5, JS_PROXY_TYPE);
+  __ B(ne, &non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, x0, x3, x4, x5);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ Push(x1);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ Add(x0, x0, Operand(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ Bind(&non_function);
+  // Overwrite the original receiver with the (original) target.
+  __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(x1);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the constructor to call (checked to be a JSFunction)
+  //  -- x3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertFunction(x1);
+
+  // Calling convention for function specific ConstructStubs require
+  // x2 to contain either an AllocationSite or undefined.
+  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
+  __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
+  __ Br(x4);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the function to call (checked to be a JSBoundFunction)
+  //  -- x3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertBoundFunction(x1);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  {
+    Label done;
+    __ Cmp(x1, x3);
+    __ B(ne, &done);
+    __ Ldr(x3,
+           FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+    __ Bind(&done);
+  }
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ Mov(x10, ExternalReference(Builtins::kConstruct, masm->isolate()));
+  __ Ldr(x11, MemOperand(x10));
+  __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+  __ Br(x12);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the constructor to call (checked to be a JSProxy)
+  //  -- x3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ Push(x1);
+  __ Push(x3);
+  // Include the pushed new_target, constructor and the receiver.
+  __ Add(x0, x0, 3);
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the constructor to call (can be any Object)
+  //  -- x3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(x1, &non_constructor);
+
+  // Dispatch based on instance type.
+  __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Construct]] internal method.
+  __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
+  __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ Cmp(x5, JS_PROXY_TYPE);
+  __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+          eq);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
+  // ----------- S t a t e -------------
+  //  -- x1 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(x1);
+  __ Push(x1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
+  // ----------- S t a t e -------------
+  //  -- x1 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(x1);
+  __ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(x1, x2);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  ASM_LOCATION("Builtins::Generate_Abort");
+  // ----------- S t a t e -------------
+  //  -- x1 : message_id as Smi
+  //  -- lr : return address
+  // -----------------------------------
+  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+  __ Push(x1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in x0.
+  Label not_smi;
+  __ JumpIfNotSmi(x0, &not_smi);
+  __ Ret();
+  __ Bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
+  // x0: receiver
+  // x1: receiver instance type
+  __ B(ne, &not_heap_number);
+  __ Ret();
+  __ Bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
+  // ----------- S t a t e -------------
+  //  -- x0 : actual number of arguments
+  //  -- x1 : function (passed through to callee)
+  //  -- x2 : expected number of arguments
+  //  -- x3 : new target (passed through to callee)
+  // -----------------------------------
+
+  Register argc_actual = x0;    // Excluding the receiver.
+  Register argc_expected = x2;  // Excluding the receiver.
+  Register function = x1;
+  Register code_entry = x10;
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+
+  Label enough, too_few;
+  __ Cmp(argc_actual, argc_expected);
+  __ B(lt, &too_few);
+  __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+  __ B(eq, &dont_adapt_arguments);
+
+  {  // Enough parameters: actual >= expected
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    Register copy_start = x10;
+    Register copy_end = x11;
+    Register copy_to = x12;
+    Register scratch1 = x13, scratch2 = x14;
+
+    __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
+
+    // Adjust for fp, lr, and the receiver.
+    __ Add(copy_start, fp, 3 * kPointerSize);
+    __ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
+    __ Sub(copy_end, copy_start, scratch2);
+    __ Sub(copy_end, copy_end, kPointerSize);
+    __ Mov(copy_to, jssp);
+
+    // Claim space for the arguments, the receiver, and one extra slot.
+    // The extra slot ensures we do not write under jssp. It will be popped
+    // later.
+    __ Add(scratch1, scratch2, 2 * kPointerSize);
+    __ Claim(scratch1, 1);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    Label copy_2_by_2;
+    __ Bind(&copy_2_by_2);
+    __ Ldp(scratch1, scratch2,
+           MemOperand(copy_start, -2 * kPointerSize, PreIndex));
+    __ Stp(scratch1, scratch2,
+           MemOperand(copy_to, -2 * kPointerSize, PreIndex));
+    __ Cmp(copy_start, copy_end);
+    __ B(hi, &copy_2_by_2);
+
+    // Correct the space allocated for the extra slot.
+    __ Drop(1);
+
+    __ B(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected
+    __ Bind(&too_few);
+
+    Register copy_from = x10;
+    Register copy_end = x11;
+    Register copy_to = x12;
+    Register scratch1 = x13, scratch2 = x14;
+
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
+    __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
+
+    // Adjust for fp, lr, and the receiver.
+    __ Add(copy_from, fp, 3 * kPointerSize);
+    __ Add(copy_from, copy_from, argc_actual);
+    __ Mov(copy_to, jssp);
+    __ Sub(copy_end, copy_to, 1 * kPointerSize);  // Adjust for the receiver.
+    __ Sub(copy_end, copy_end, argc_actual);
+
+    // Claim space for the arguments, the receiver, and one extra slot.
+    // The extra slot ensures we do not write under jssp. It will be popped
+    // later.
+    __ Add(scratch1, scratch2, 2 * kPointerSize);
+    __ Claim(scratch1, 1);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    Label copy_2_by_2;
+    __ Bind(&copy_2_by_2);
+    __ Ldp(scratch1, scratch2,
+           MemOperand(copy_from, -2 * kPointerSize, PreIndex));
+    __ Stp(scratch1, scratch2,
+           MemOperand(copy_to, -2 * kPointerSize, PreIndex));
+    __ Cmp(copy_to, copy_end);
+    __ B(hi, &copy_2_by_2);
+
+    __ Mov(copy_to, copy_end);
+
+    // Fill the remaining expected arguments with undefined.
+    __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+    __ Add(copy_end, jssp, kPointerSize);
+
+    Label fill;
+    __ Bind(&fill);
+    __ Stp(scratch1, scratch1,
+           MemOperand(copy_to, -2 * kPointerSize, PreIndex));
+    __ Cmp(copy_to, copy_end);
+    __ B(hi, &fill);
+
+    // Correct the space allocated for the extra slot.
+    __ Drop(1);
+  }
+
+  // Arguments have been adapted. Now call the entry point.
+  __ Bind(&invoke);
+  __ Mov(argc_actual, argc_expected);
+  // x0 : expected number of arguments
+  // x1 : function (passed through to callee)
+  // x3 : new target (passed through to callee)
+  __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+  __ Call(code_entry);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+  // Call the entry point without adapting the arguments.
+  __ Bind(&dont_adapt_arguments);
+  __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+  __ Jump(code_entry);
+
+  __ Bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ Unreachable();
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/builtins/builtins-api.cc b/src/builtins/builtins-api.cc
new file mode 100644
index 0000000..aed10b1
--- /dev/null
+++ b/src/builtins/builtins-api.cc
@@ -0,0 +1,291 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+
+#include "src/api-arguments.h"
+#include "src/api-natives.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Returns the holder JSObject if the function can legally be called with this
+// receiver.  Returns nullptr if the call is illegal.
+// TODO(dcarney): CallOptimization duplicates this logic, merge.
+JSObject* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
+                                JSObject* receiver) {
+  Object* recv_type = info->signature();
+  // No signature, return holder.
+  if (!recv_type->IsFunctionTemplateInfo()) return receiver;
+  FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
+
+  // Check the receiver. Fast path for receivers with no hidden prototypes.
+  if (signature->IsTemplateFor(receiver)) return receiver;
+  if (!receiver->map()->has_hidden_prototype()) return nullptr;
+  for (PrototypeIterator iter(isolate, receiver, kStartAtPrototype,
+                              PrototypeIterator::END_AT_NON_HIDDEN);
+       !iter.IsAtEnd(); iter.Advance()) {
+    JSObject* current = iter.GetCurrent<JSObject>();
+    if (signature->IsTemplateFor(current)) return current;
+  }
+  return nullptr;
+}
+
+template <bool is_construct>
+MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
+    Isolate* isolate, Handle<HeapObject> function,
+    Handle<HeapObject> new_target, Handle<FunctionTemplateInfo> fun_data,
+    Handle<Object> receiver, BuiltinArguments args) {
+  Handle<JSObject> js_receiver;
+  JSObject* raw_holder;
+  if (is_construct) {
+    DCHECK(args.receiver()->IsTheHole(isolate));
+    if (fun_data->instance_template()->IsUndefined(isolate)) {
+      v8::Local<ObjectTemplate> templ =
+          ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
+                              ToApiHandle<v8::FunctionTemplate>(fun_data));
+      fun_data->set_instance_template(*Utils::OpenHandle(*templ));
+    }
+    Handle<ObjectTemplateInfo> instance_template(
+        ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, js_receiver,
+        ApiNatives::InstantiateObject(instance_template,
+                                      Handle<JSReceiver>::cast(new_target)),
+        Object);
+    args[0] = *js_receiver;
+    DCHECK_EQ(*js_receiver, *args.receiver());
+
+    raw_holder = *js_receiver;
+  } else {
+    DCHECK(receiver->IsJSReceiver());
+
+    if (!receiver->IsJSObject()) {
+      // This function cannot be called with the given receiver.  Abort!
+      THROW_NEW_ERROR(
+          isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
+    }
+
+    js_receiver = Handle<JSObject>::cast(receiver);
+
+    if (!fun_data->accept_any_receiver() &&
+        js_receiver->IsAccessCheckNeeded() &&
+        !isolate->MayAccess(handle(isolate->context()), js_receiver)) {
+      isolate->ReportFailedAccessCheck(js_receiver);
+      RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+    }
+
+    raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
+
+    if (raw_holder == nullptr) {
+      // This function cannot be called with the given receiver.  Abort!
+      THROW_NEW_ERROR(
+          isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
+    }
+  }
+
+  Object* raw_call_data = fun_data->call_code();
+  if (!raw_call_data->IsUndefined(isolate)) {
+    DCHECK(raw_call_data->IsCallHandlerInfo());
+    CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
+    Object* callback_obj = call_data->callback();
+    v8::FunctionCallback callback =
+        v8::ToCData<v8::FunctionCallback>(callback_obj);
+    Object* data_obj = call_data->data();
+
+    LOG(isolate, ApiObjectAccess("call", JSObject::cast(*js_receiver)));
+
+    FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
+                                     *new_target, &args[0] - 1,
+                                     args.length() - 1);
+
+    Handle<Object> result = custom.Call(callback);
+
+    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+    if (result.is_null()) {
+      if (is_construct) return js_receiver;
+      return isolate->factory()->undefined_value();
+    }
+    // Rebox the result.
+    result->VerifyApiCallResultType();
+    if (!is_construct || result->IsJSObject()) return handle(*result, isolate);
+  }
+
+  return js_receiver;
+}
+
+}  // anonymous namespace
+
+BUILTIN(HandleApiCall) {
+  HandleScope scope(isolate);
+  Handle<JSFunction> function = args.target<JSFunction>();
+  Handle<Object> receiver = args.receiver();
+  Handle<HeapObject> new_target = args.new_target();
+  Handle<FunctionTemplateInfo> fun_data(function->shared()->get_api_func_data(),
+                                        isolate);
+  if (new_target->IsJSReceiver()) {
+    RETURN_RESULT_OR_FAILURE(
+        isolate, HandleApiCallHelper<true>(isolate, function, new_target,
+                                           fun_data, receiver, args));
+  } else {
+    RETURN_RESULT_OR_FAILURE(
+        isolate, HandleApiCallHelper<false>(isolate, function, new_target,
+                                            fun_data, receiver, args));
+  }
+}
+
+namespace {
+
+class RelocatableArguments : public BuiltinArguments, public Relocatable {
+ public:
+  RelocatableArguments(Isolate* isolate, int length, Object** arguments)
+      : BuiltinArguments(length, arguments), Relocatable(isolate) {}
+
+  virtual inline void IterateInstance(ObjectVisitor* v) {
+    if (length() == 0) return;
+    v->VisitPointers(lowest_address(), highest_address() + 1);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(RelocatableArguments);
+};
+
+}  // namespace
+
+MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
+                                                bool is_construct,
+                                                Handle<HeapObject> function,
+                                                Handle<Object> receiver,
+                                                int argc, Handle<Object> args[],
+                                                Handle<HeapObject> new_target) {
+  DCHECK(function->IsFunctionTemplateInfo() ||
+         (function->IsJSFunction() &&
+          JSFunction::cast(*function)->shared()->IsApiFunction()));
+
+  // Do proper receiver conversion for non-strict mode api functions.
+  if (!is_construct && !receiver->IsJSReceiver()) {
+    if (function->IsFunctionTemplateInfo() ||
+        is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+                                 Object::ConvertReceiver(isolate, receiver),
+                                 Object);
+    }
+  }
+
+  Handle<FunctionTemplateInfo> fun_data =
+      function->IsFunctionTemplateInfo()
+          ? Handle<FunctionTemplateInfo>::cast(function)
+          : handle(JSFunction::cast(*function)->shared()->get_api_func_data(),
+                   isolate);
+  // Construct BuiltinArguments object:
+  // new target, function, arguments reversed, receiver.
+  const int kBufferSize = 32;
+  Object* small_argv[kBufferSize];
+  Object** argv;
+  const int frame_argc = argc + BuiltinArguments::kNumExtraArgsWithReceiver;
+  if (frame_argc <= kBufferSize) {
+    argv = small_argv;
+  } else {
+    argv = new Object*[frame_argc];
+  }
+  int cursor = frame_argc - 1;
+  argv[cursor--] = *receiver;
+  for (int i = 0; i < argc; ++i) {
+    argv[cursor--] = *args[i];
+  }
+  DCHECK(cursor == BuiltinArguments::kArgcOffset);
+  argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc);
+  argv[BuiltinArguments::kTargetOffset] = *function;
+  argv[BuiltinArguments::kNewTargetOffset] = *new_target;
+  MaybeHandle<Object> result;
+  {
+    RelocatableArguments arguments(isolate, frame_argc, &argv[frame_argc - 1]);
+    if (is_construct) {
+      result = HandleApiCallHelper<true>(isolate, function, new_target,
+                                         fun_data, receiver, arguments);
+    } else {
+      result = HandleApiCallHelper<false>(isolate, function, new_target,
+                                          fun_data, receiver, arguments);
+    }
+  }
+  if (argv != small_argv) delete[] argv;
+  return result;
+}
+
+// Helper function to handle calls to non-function objects created through the
+// API. The object can be called as either a constructor (using new) or just as
+// a function (without new).
+MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
+    Isolate* isolate, bool is_construct_call, BuiltinArguments args) {
+  Handle<Object> receiver = args.receiver();
+
+  // Get the object called.
+  JSObject* obj = JSObject::cast(*receiver);
+
+  // Set the new target.
+  HeapObject* new_target;
+  if (is_construct_call) {
+    // TODO(adamk): This should be passed through in args instead of
+    // being patched in here. We need to set a non-undefined value
+    // for v8::FunctionCallbackInfo::IsConstructCall() to get the
+    // right answer.
+    new_target = obj;
+  } else {
+    new_target = isolate->heap()->undefined_value();
+  }
+
+  // Get the invocation callback from the function descriptor that was
+  // used to create the called object.
+  DCHECK(obj->map()->is_callable());
+  JSFunction* constructor = JSFunction::cast(obj->map()->GetConstructor());
+  // TODO(ishell): turn this back to a DCHECK.
+  CHECK(constructor->shared()->IsApiFunction());
+  Object* handler =
+      constructor->shared()->get_api_func_data()->instance_call_handler();
+  DCHECK(!handler->IsUndefined(isolate));
+  // TODO(ishell): remove this debugging code.
+  CHECK(handler->IsCallHandlerInfo());
+  CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
+  Object* callback_obj = call_data->callback();
+  v8::FunctionCallback callback =
+      v8::ToCData<v8::FunctionCallback>(callback_obj);
+
+  // Get the data for the call and perform the callback.
+  Object* result;
+  {
+    HandleScope scope(isolate);
+    LOG(isolate, ApiObjectAccess("call non-function", obj));
+
+    FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
+                                     obj, new_target, &args[0] - 1,
+                                     args.length() - 1);
+    Handle<Object> result_handle = custom.Call(callback);
+    if (result_handle.is_null()) {
+      result = isolate->heap()->undefined_value();
+    } else {
+      result = *result_handle;
+    }
+  }
+  // Check for exceptions and return result.
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+  return result;
+}
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a normal function call.
+BUILTIN(HandleApiCallAsFunction) {
+  return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
+}
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a construct call.
+BUILTIN(HandleApiCallAsConstructor) {
+  return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-array.cc b/src/builtins/builtins-array.cc
new file mode 100644
index 0000000..09ee4cc
--- /dev/null
+++ b/src/builtins/builtins-array.cc
@@ -0,0 +1,2119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+#include "src/elements.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
+  // This is an extended version of ECMA-262 7.1.11 handling signed values
+  // Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
+  if (object->IsSmi()) {
+    *out = Smi::cast(object)->value();
+    return true;
+  } else if (object->IsHeapNumber()) {
+    double value = HeapNumber::cast(object)->value();
+    if (std::isnan(value)) {
+      *out = 0;
+    } else if (value > kMaxInt) {
+      *out = kMaxInt;
+    } else if (value < kMinInt) {
+      *out = kMinInt;
+    } else {
+      *out = static_cast<int>(value);
+    }
+    return true;
+  } else if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+    *out = 0;
+    return true;
+  } else if (object->IsBoolean()) {
+    *out = object->IsTrue(isolate);
+    return true;
+  }
+  return false;
+}
+
+inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
+                                     int* out) {
+  Context* context = *isolate->native_context();
+  Map* map = object->map();
+  if (map != context->sloppy_arguments_map() &&
+      map != context->strict_arguments_map() &&
+      map != context->fast_aliased_arguments_map()) {
+    return false;
+  }
+  DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
+  Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
+  if (!len_obj->IsSmi()) return false;
+  *out = Max(0, Smi::cast(len_obj)->value());
+  return *out <= object->elements()->length();
+}
+
+inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
+                                              JSArray* receiver) {
+  return JSObject::PrototypeHasNoElements(isolate, receiver);
+}
+
+inline bool HasSimpleElements(JSObject* current) {
+  return current->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
+         !current->GetElementsAccessor()->HasAccessors(current);
+}
+
+inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
+                                          JSObject* receiver) {
+  // Check that we have no accessors on the receiver's elements.
+  if (!HasSimpleElements(receiver)) return false;
+  return JSObject::PrototypeHasNoElements(isolate, receiver);
+}
+
+inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
+  DisallowHeapAllocation no_gc;
+  PrototypeIterator iter(isolate, receiver, kStartAtReceiver);
+  for (; !iter.IsAtEnd(); iter.Advance()) {
+    if (iter.GetCurrent()->IsJSProxy()) return false;
+    JSObject* current = iter.GetCurrent<JSObject>();
+    if (!HasSimpleElements(current)) return false;
+  }
+  return true;
+}
+
+// Returns |false| if not applicable.
+MUST_USE_RESULT
+inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
+                                                  Handle<Object> receiver,
+                                                  BuiltinArguments* args,
+                                                  int first_added_arg) {
+  if (!receiver->IsJSArray()) return false;
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  ElementsKind origin_kind = array->GetElementsKind();
+  if (IsDictionaryElementsKind(origin_kind)) return false;
+  if (!array->map()->is_extensible()) return false;
+  if (args == nullptr) return true;
+
+  // If there may be elements accessors in the prototype chain, the fast path
+  // cannot be used if there arguments to add to the array.
+  if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false;
+
+  // Adding elements to the array prototype would break code that makes sure
+  // it has no elements. Handle that elsewhere.
+  if (isolate->IsAnyInitialArrayPrototype(array)) return false;
+
+  // Need to ensure that the arguments passed in args can be contained in
+  // the array.
+  int args_length = args->length();
+  if (first_added_arg >= args_length) return true;
+
+  if (IsFastObjectElementsKind(origin_kind)) return true;
+  ElementsKind target_kind = origin_kind;
+  {
+    DisallowHeapAllocation no_gc;
+    for (int i = first_added_arg; i < args_length; i++) {
+      Object* arg = (*args)[i];
+      if (arg->IsHeapObject()) {
+        if (arg->IsHeapNumber()) {
+          target_kind = FAST_DOUBLE_ELEMENTS;
+        } else {
+          target_kind = FAST_ELEMENTS;
+          break;
+        }
+      }
+    }
+  }
+  if (target_kind != origin_kind) {
+    // Use a short-lived HandleScope to avoid creating several copies of the
+    // elements handle which would cause issues when left-trimming later-on.
+    HandleScope scope(isolate);
+    JSObject::TransitionElementsKind(array, target_kind);
+  }
+  return true;
+}
+
+MUST_USE_RESULT static Object* CallJsIntrinsic(Isolate* isolate,
+                                               Handle<JSFunction> function,
+                                               BuiltinArguments args) {
+  HandleScope handleScope(isolate);
+  int argc = args.length() - 1;
+  ScopedVector<Handle<Object>> argv(argc);
+  for (int i = 0; i < argc; ++i) {
+    argv[i] = args.at<Object>(i + 1);
+  }
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
+      Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
+}
+
+Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
+    return CallJsIntrinsic(isolate, isolate->array_push(), args);
+  }
+  // Fast Elements Path
+  int to_add = args.length() - 1;
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  int len = Smi::cast(array->length())->value();
+  if (to_add == 0) return Smi::FromInt(len);
+
+  // Currently fixed arrays cannot grow too big, so we should never hit this.
+  DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+
+  if (JSArray::HasReadOnlyLength(array)) {
+    return CallJsIntrinsic(isolate, isolate->array_push(), args);
+  }
+
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  int new_length = accessor->Push(array, &args, to_add);
+  return Smi::FromInt(new_length);
+}
+}  // namespace
+
+BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
+
+// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
+// tailcall to the builtin directly.
+RUNTIME_FUNCTION(Runtime_ArrayPush) {
+  DCHECK_EQ(2, args.length());
+  Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
+  // Rewrap the arguments as builtins arguments.
+  int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
+  BuiltinArguments caller_args(argc, incoming->arguments() + 1);
+  return DoArrayPush(isolate, caller_args);
+}
+
+BUILTIN(ArrayPop) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0)) {
+    return CallJsIntrinsic(isolate, isolate->array_pop(), args);
+  }
+
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+
+  uint32_t len = static_cast<uint32_t>(Smi::cast(array->length())->value());
+  if (len == 0) return isolate->heap()->undefined_value();
+
+  if (JSArray::HasReadOnlyLength(array)) {
+    return CallJsIntrinsic(isolate, isolate->array_pop(), args);
+  }
+
+  Handle<Object> result;
+  if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
+    // Fast Elements Path
+    result = array->GetElementsAccessor()->Pop(array);
+  } else {
+    // Use Slow Lookup otherwise
+    uint32_t new_length = len - 1;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, JSReceiver::GetElement(isolate, array, new_length));
+    JSArray::SetLength(array, new_length);
+  }
+  return *result;
+}
+
+BUILTIN(ArrayShift) {
+  HandleScope scope(isolate);
+  Heap* heap = isolate->heap();
+  Handle<Object> receiver = args.receiver();
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0) ||
+      !IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
+    return CallJsIntrinsic(isolate, isolate->array_shift(), args);
+  }
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+
+  int len = Smi::cast(array->length())->value();
+  if (len == 0) return heap->undefined_value();
+
+  if (JSArray::HasReadOnlyLength(array)) {
+    return CallJsIntrinsic(isolate, isolate->array_shift(), args);
+  }
+
+  Handle<Object> first = array->GetElementsAccessor()->Shift(array);
+  return *first;
+}
+
+BUILTIN(ArrayUnshift) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
+    return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
+  }
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+  int to_add = args.length() - 1;
+  if (to_add == 0) return array->length();
+
+  // Currently fixed arrays cannot grow too big, so we should never hit this.
+  DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+
+  if (JSArray::HasReadOnlyLength(array)) {
+    return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
+  }
+
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  int new_length = accessor->Unshift(array, &args, to_add);
+  return Smi::FromInt(new_length);
+}
+
+BUILTIN(ArraySlice) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  int len = -1;
+  int relative_start = 0;
+  int relative_end = 0;
+
+  if (receiver->IsJSArray()) {
+    DisallowHeapAllocation no_gc;
+    JSArray* array = JSArray::cast(*receiver);
+    if (V8_UNLIKELY(!array->HasFastElements() ||
+                    !IsJSArrayFastElementMovingAllowed(isolate, array) ||
+                    !isolate->IsArraySpeciesLookupChainIntact() ||
+                    // If this is a subclass of Array, then call out to JS
+                    !array->HasArrayPrototype(isolate))) {
+      AllowHeapAllocation allow_allocation;
+      return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+    }
+    len = Smi::cast(array->length())->value();
+  } else if (receiver->IsJSObject() &&
+             GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
+                                      &len)) {
+    // Array.prototype.slice.call(arguments, ...) is quite a common idiom
+    // (notably more than 50% of invocations in Web apps).
+    // Treat it in C++ as well.
+    DCHECK(JSObject::cast(*receiver)->HasFastElements() ||
+           JSObject::cast(*receiver)->HasFastArgumentsElements());
+  } else {
+    AllowHeapAllocation allow_allocation;
+    return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+  }
+  DCHECK_LE(0, len);
+  int argument_count = args.length() - 1;
+  // Note carefully chosen defaults---if argument is missing,
+  // it's undefined which gets converted to 0 for relative_start
+  // and to len for relative_end.
+  relative_start = 0;
+  relative_end = len;
+  if (argument_count > 0) {
+    DisallowHeapAllocation no_gc;
+    if (!ClampedToInteger(isolate, args[1], &relative_start)) {
+      AllowHeapAllocation allow_allocation;
+      return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+    }
+    if (argument_count > 1) {
+      Object* end_arg = args[2];
+      // slice handles the end_arg specially
+      if (end_arg->IsUndefined(isolate)) {
+        relative_end = len;
+      } else if (!ClampedToInteger(isolate, end_arg, &relative_end)) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+      }
+    }
+  }
+
+  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
+  uint32_t actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
+                                               : Min(relative_start, len);
+
+  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
+  uint32_t actual_end =
+      (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
+
+  Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+  ElementsAccessor* accessor = object->GetElementsAccessor();
+  return *accessor->Slice(object, actual_start, actual_end);
+}
+
+BUILTIN(ArraySplice) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  if (V8_UNLIKELY(
+          !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3) ||
+          // If this is a subclass of Array, then call out to JS.
+          !Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
+          // If anything with @@species has been messed with, call out to JS.
+          !isolate->IsArraySpeciesLookupChainIntact())) {
+    return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+  }
+  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+
+  int argument_count = args.length() - 1;
+  int relative_start = 0;
+  if (argument_count > 0) {
+    DisallowHeapAllocation no_gc;
+    if (!ClampedToInteger(isolate, args[1], &relative_start)) {
+      AllowHeapAllocation allow_allocation;
+      return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+    }
+  }
+  int len = Smi::cast(array->length())->value();
+  // clip relative start to [0, len]
+  int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
+                                          : Min(relative_start, len);
+
+  int actual_delete_count;
+  if (argument_count == 1) {
+    // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
+    // given as a request to delete all the elements from the start.
+    // And it differs from the case of undefined delete count.
+    // This does not follow ECMA-262, but we do the same for compatibility.
+    DCHECK(len - actual_start >= 0);
+    actual_delete_count = len - actual_start;
+  } else {
+    int delete_count = 0;
+    DisallowHeapAllocation no_gc;
+    if (argument_count > 1) {
+      if (!ClampedToInteger(isolate, args[2], &delete_count)) {
+        AllowHeapAllocation allow_allocation;
+        return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+      }
+    }
+    actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
+  }
+
+  int add_count = (argument_count > 1) ? (argument_count - 2) : 0;
+  int new_length = len - actual_delete_count + add_count;
+
+  if (new_length != len && JSArray::HasReadOnlyLength(array)) {
+    AllowHeapAllocation allow_allocation;
+    return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+  }
+  ElementsAccessor* accessor = array->GetElementsAccessor();
+  Handle<JSArray> result_array = accessor->Splice(
+      array, actual_start, actual_delete_count, &args, add_count);
+  return *result_array;
+}
+
+// Array Concat -------------------------------------------------------------
+
+namespace {
+
+/**
+ * A simple visitor visits every element of Array's.
+ * The backend storage can be a fixed array for fast elements case,
+ * or a dictionary for sparse array. Since Dictionary is a subtype
+ * of FixedArray, the class can be used by both fast and slow cases.
+ * The second parameter of the constructor, fast_elements, specifies
+ * whether the storage is a FixedArray or Dictionary.
+ *
+ * An index limit is used to deal with the situation that a result array
+ * length overflows 32-bit non-negative integer.
+ */
+class ArrayConcatVisitor {
+ public:
+  ArrayConcatVisitor(Isolate* isolate, Handle<Object> storage,
+                     bool fast_elements)
+      : isolate_(isolate),
+        storage_(isolate->global_handles()->Create(*storage)),
+        index_offset_(0u),
+        bit_field_(FastElementsField::encode(fast_elements) |
+                   ExceedsLimitField::encode(false) |
+                   IsFixedArrayField::encode(storage->IsFixedArray())) {
+    DCHECK(!(this->fast_elements() && !is_fixed_array()));
+  }
+
+  ~ArrayConcatVisitor() { clear_storage(); }
+
+  MUST_USE_RESULT bool visit(uint32_t i, Handle<Object> elm) {
+    uint32_t index = index_offset_ + i;
+
+    if (i >= JSObject::kMaxElementCount - index_offset_) {
+      set_exceeds_array_limit(true);
+      // Exception hasn't been thrown at this point. Return true to
+      // break out, and caller will throw. !visit would imply that
+      // there is already a pending exception.
+      return true;
+    }
+
+    if (!is_fixed_array()) {
+      LookupIterator it(isolate_, storage_, index, LookupIterator::OWN);
+      MAYBE_RETURN(
+          JSReceiver::CreateDataProperty(&it, elm, Object::THROW_ON_ERROR),
+          false);
+      return true;
+    }
+
+    if (fast_elements()) {
+      if (index < static_cast<uint32_t>(storage_fixed_array()->length())) {
+        storage_fixed_array()->set(index, *elm);
+        return true;
+      }
+      // Our initial estimate of length was foiled, possibly by
+      // getters on the arrays increasing the length of later arrays
+      // during iteration.
+      // This shouldn't happen in anything but pathological cases.
+      SetDictionaryMode();
+      // Fall-through to dictionary mode.
+    }
+    DCHECK(!fast_elements());
+    Handle<SeededNumberDictionary> dict(
+        SeededNumberDictionary::cast(*storage_));
+    // The object holding this backing store has just been allocated, so
+    // it cannot yet be used as a prototype.
+    Handle<SeededNumberDictionary> result =
+        SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
+    if (!result.is_identical_to(dict)) {
+      // Dictionary needed to grow.
+      clear_storage();
+      set_storage(*result);
+    }
+    return true;
+  }
+
+  void increase_index_offset(uint32_t delta) {
+    if (JSObject::kMaxElementCount - index_offset_ < delta) {
+      index_offset_ = JSObject::kMaxElementCount;
+    } else {
+      index_offset_ += delta;
+    }
+    // If the initial length estimate was off (see special case in visit()),
+    // but the array blowing the limit didn't contain elements beyond the
+    // provided-for index range, go to dictionary mode now.
+    if (fast_elements() &&
+        index_offset_ >
+            static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
+      SetDictionaryMode();
+    }
+  }
+
+  bool exceeds_array_limit() const {
+    return ExceedsLimitField::decode(bit_field_);
+  }
+
+  Handle<JSArray> ToArray() {
+    DCHECK(is_fixed_array());
+    Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
+    Handle<Object> length =
+        isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
+    Handle<Map> map = JSObject::GetElementsTransitionMap(
+        array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
+    array->set_map(*map);
+    array->set_length(*length);
+    array->set_elements(*storage_fixed_array());
+    return array;
+  }
+
+  // Storage is either a FixedArray (if is_fixed_array()) or a JSReciever
+  // (otherwise)
+  Handle<FixedArray> storage_fixed_array() {
+    DCHECK(is_fixed_array());
+    return Handle<FixedArray>::cast(storage_);
+  }
+  Handle<JSReceiver> storage_jsreceiver() {
+    DCHECK(!is_fixed_array());
+    return Handle<JSReceiver>::cast(storage_);
+  }
+
+ private:
+  // Convert storage to dictionary mode.
+  void SetDictionaryMode() {
+    DCHECK(fast_elements() && is_fixed_array());
+    Handle<FixedArray> current_storage = storage_fixed_array();
+    Handle<SeededNumberDictionary> slow_storage(
+        SeededNumberDictionary::New(isolate_, current_storage->length()));
+    uint32_t current_length = static_cast<uint32_t>(current_storage->length());
+    FOR_WITH_HANDLE_SCOPE(
+        isolate_, uint32_t, i = 0, i, i < current_length, i++, {
+          Handle<Object> element(current_storage->get(i), isolate_);
+          if (!element->IsTheHole(isolate_)) {
+            // The object holding this backing store has just been allocated, so
+            // it cannot yet be used as a prototype.
+            Handle<SeededNumberDictionary> new_storage =
+                SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
+                                                    false);
+            if (!new_storage.is_identical_to(slow_storage)) {
+              slow_storage = loop_scope.CloseAndEscape(new_storage);
+            }
+          }
+        });
+    clear_storage();
+    set_storage(*slow_storage);
+    set_fast_elements(false);
+  }
+
+  inline void clear_storage() { GlobalHandles::Destroy(storage_.location()); }
+
+  inline void set_storage(FixedArray* storage) {
+    DCHECK(is_fixed_array());
+    storage_ = isolate_->global_handles()->Create(storage);
+  }
+
+  class FastElementsField : public BitField<bool, 0, 1> {};
+  class ExceedsLimitField : public BitField<bool, 1, 1> {};
+  class IsFixedArrayField : public BitField<bool, 2, 1> {};
+
+  bool fast_elements() const { return FastElementsField::decode(bit_field_); }
+  void set_fast_elements(bool fast) {
+    bit_field_ = FastElementsField::update(bit_field_, fast);
+  }
+  void set_exceeds_array_limit(bool exceeds) {
+    bit_field_ = ExceedsLimitField::update(bit_field_, exceeds);
+  }
+  bool is_fixed_array() const { return IsFixedArrayField::decode(bit_field_); }
+
+  Isolate* isolate_;
+  Handle<Object> storage_;  // Always a global handle.
+  // Index after last seen index. Always less than or equal to
+  // JSObject::kMaxElementCount.
+  uint32_t index_offset_;
+  uint32_t bit_field_;
+};
+
+uint32_t EstimateElementCount(Handle<JSArray> array) {
+  DisallowHeapAllocation no_gc;
+  uint32_t length = static_cast<uint32_t>(array->length()->Number());
+  int element_count = 0;
+  switch (array->GetElementsKind()) {
+    case FAST_SMI_ELEMENTS:
+    case FAST_HOLEY_SMI_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_HOLEY_ELEMENTS: {
+      // Fast elements can't have lengths that are not representable by
+      // a 32-bit signed integer.
+      DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
+      int fast_length = static_cast<int>(length);
+      Isolate* isolate = array->GetIsolate();
+      FixedArray* elements = FixedArray::cast(array->elements());
+      for (int i = 0; i < fast_length; i++) {
+        if (!elements->get(i)->IsTheHole(isolate)) element_count++;
+      }
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS:
+    case FAST_HOLEY_DOUBLE_ELEMENTS: {
+      // Fast elements can't have lengths that are not representable by
+      // a 32-bit signed integer.
+      DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+      int fast_length = static_cast<int>(length);
+      if (array->elements()->IsFixedArray()) {
+        DCHECK(FixedArray::cast(array->elements())->length() == 0);
+        break;
+      }
+      FixedDoubleArray* elements = FixedDoubleArray::cast(array->elements());
+      for (int i = 0; i < fast_length; i++) {
+        if (!elements->is_the_hole(i)) element_count++;
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      SeededNumberDictionary* dictionary =
+          SeededNumberDictionary::cast(array->elements());
+      Isolate* isolate = dictionary->GetIsolate();
+      int capacity = dictionary->Capacity();
+      for (int i = 0; i < capacity; i++) {
+        Object* key = dictionary->KeyAt(i);
+        if (dictionary->IsKey(isolate, key)) {
+          element_count++;
+        }
+      }
+      break;
+    }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+      // External arrays are always dense.
+      return length;
+    case NO_ELEMENTS:
+      return 0;
+    case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+    case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
+      UNREACHABLE();
+      return 0;
+  }
+  // As an estimate, we assume that the prototype doesn't contain any
+  // inherited elements.
+  return element_count;
+}
+
+// Used for sorting indices in a List<uint32_t>.
+int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
+  uint32_t a = *ap;
+  uint32_t b = *bp;
+  return (a == b) ? 0 : (a < b) ? -1 : 1;
+}
+
+void CollectElementIndices(Handle<JSObject> object, uint32_t range,
+                           List<uint32_t>* indices) {
+  Isolate* isolate = object->GetIsolate();
+  ElementsKind kind = object->GetElementsKind();
+  switch (kind) {
+    case FAST_SMI_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_HOLEY_SMI_ELEMENTS:
+    case FAST_HOLEY_ELEMENTS: {
+      DisallowHeapAllocation no_gc;
+      FixedArray* elements = FixedArray::cast(object->elements());
+      uint32_t length = static_cast<uint32_t>(elements->length());
+      if (range < length) length = range;
+      for (uint32_t i = 0; i < length; i++) {
+        if (!elements->get(i)->IsTheHole(isolate)) {
+          indices->Add(i);
+        }
+      }
+      break;
+    }
+    case FAST_HOLEY_DOUBLE_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS: {
+      if (object->elements()->IsFixedArray()) {
+        DCHECK(object->elements()->length() == 0);
+        break;
+      }
+      Handle<FixedDoubleArray> elements(
+          FixedDoubleArray::cast(object->elements()));
+      uint32_t length = static_cast<uint32_t>(elements->length());
+      if (range < length) length = range;
+      for (uint32_t i = 0; i < length; i++) {
+        if (!elements->is_the_hole(i)) {
+          indices->Add(i);
+        }
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      DisallowHeapAllocation no_gc;
+      SeededNumberDictionary* dict =
+          SeededNumberDictionary::cast(object->elements());
+      uint32_t capacity = dict->Capacity();
+      FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
+        Object* k = dict->KeyAt(j);
+        if (!dict->IsKey(isolate, k)) continue;
+        DCHECK(k->IsNumber());
+        uint32_t index = static_cast<uint32_t>(k->Number());
+        if (index < range) {
+          indices->Add(index);
+        }
+      });
+      break;
+    }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+      {
+        uint32_t length = static_cast<uint32_t>(
+            FixedArrayBase::cast(object->elements())->length());
+        if (range <= length) {
+          length = range;
+          // We will add all indices, so we might as well clear it first
+          // and avoid duplicates.
+          indices->Clear();
+        }
+        for (uint32_t i = 0; i < length; i++) {
+          indices->Add(i);
+        }
+        if (length == range) return;  // All indices accounted for already.
+        break;
+      }
+    case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+    case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
+      ElementsAccessor* accessor = object->GetElementsAccessor();
+      for (uint32_t i = 0; i < range; i++) {
+        if (accessor->HasElement(object, i)) {
+          indices->Add(i);
+        }
+      }
+      break;
+    }
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS: {
+      DCHECK(object->IsJSValue());
+      Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+      DCHECK(js_value->value()->IsString());
+      Handle<String> string(String::cast(js_value->value()), isolate);
+      uint32_t length = static_cast<uint32_t>(string->length());
+      uint32_t i = 0;
+      uint32_t limit = Min(length, range);
+      for (; i < limit; i++) {
+        indices->Add(i);
+      }
+      ElementsAccessor* accessor = object->GetElementsAccessor();
+      for (; i < range; i++) {
+        if (accessor->HasElement(object, i)) {
+          indices->Add(i);
+        }
+      }
+      break;
+    }
+    case NO_ELEMENTS:
+      break;
+  }
+
+  PrototypeIterator iter(isolate, object);
+  if (!iter.IsAtEnd()) {
+    // The prototype will usually have no inherited element indices,
+    // but we have to check.
+    CollectElementIndices(PrototypeIterator::GetCurrent<JSObject>(iter), range,
+                          indices);
+  }
+}
+
+bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
+                         uint32_t length, ArrayConcatVisitor* visitor) {
+  FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, ++i, {
+    Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
+    if (!maybe.IsJust()) return false;
+    if (maybe.FromJust()) {
+      Handle<Object> element_value;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, element_value, JSReceiver::GetElement(isolate, receiver, i),
+          false);
+      if (!visitor->visit(i, element_value)) return false;
+    }
+  });
+  visitor->increase_index_offset(length);
+  return true;
+}
+
+/**
+ * A helper function that visits "array" elements of a JSReceiver in numerical
+ * order.
+ *
+ * The visitor argument called for each existing element in the array
+ * with the element index and the element's value.
+ * Afterwards it increments the base-index of the visitor by the array
+ * length.
+ * Returns false if any access threw an exception, otherwise true.
+ */
+bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
+                     ArrayConcatVisitor* visitor) {
+  uint32_t length = 0;
+
+  if (receiver->IsJSArray()) {
+    Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+    length = static_cast<uint32_t>(array->length()->Number());
+  } else {
+    Handle<Object> val;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate, val, Object::GetLengthFromArrayLike(isolate, receiver), false);
+    // TODO(caitp): Support larger element indexes (up to 2^53-1).
+    if (!val->ToUint32(&length)) {
+      length = 0;
+    }
+    // TODO(cbruni): handle other element kind as well
+    return IterateElementsSlow(isolate, receiver, length, visitor);
+  }
+
+  if (!HasOnlySimpleElements(isolate, *receiver)) {
+    return IterateElementsSlow(isolate, receiver, length, visitor);
+  }
+  Handle<JSObject> array = Handle<JSObject>::cast(receiver);
+
+  switch (array->GetElementsKind()) {
+    case FAST_SMI_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_HOLEY_SMI_ELEMENTS:
+    case FAST_HOLEY_ELEMENTS: {
+      // Run through the elements FixedArray and use HasElement and GetElement
+      // to check the prototype for missing elements.
+      Handle<FixedArray> elements(FixedArray::cast(array->elements()));
+      int fast_length = static_cast<int>(length);
+      DCHECK(fast_length <= elements->length());
+      FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
+        Handle<Object> element_value(elements->get(j), isolate);
+        if (!element_value->IsTheHole(isolate)) {
+          if (!visitor->visit(j, element_value)) return false;
+        } else {
+          Maybe<bool> maybe = JSReceiver::HasElement(array, j);
+          if (!maybe.IsJust()) return false;
+          if (maybe.FromJust()) {
+            // Call GetElement on array, not its prototype, or getters won't
+            // have the correct receiver.
+            ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+                isolate, element_value,
+                JSReceiver::GetElement(isolate, array, j), false);
+            if (!visitor->visit(j, element_value)) return false;
+          }
+        }
+      });
+      break;
+    }
+    case FAST_HOLEY_DOUBLE_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS: {
+      // Empty array is FixedArray but not FixedDoubleArray.
+      if (length == 0) break;
+      // Run through the elements FixedArray and use HasElement and GetElement
+      // to check the prototype for missing elements.
+      if (array->elements()->IsFixedArray()) {
+        DCHECK(array->elements()->length() == 0);
+        break;
+      }
+      Handle<FixedDoubleArray> elements(
+          FixedDoubleArray::cast(array->elements()));
+      int fast_length = static_cast<int>(length);
+      DCHECK(fast_length <= elements->length());
+      FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
+        if (!elements->is_the_hole(j)) {
+          double double_value = elements->get_scalar(j);
+          Handle<Object> element_value =
+              isolate->factory()->NewNumber(double_value);
+          if (!visitor->visit(j, element_value)) return false;
+        } else {
+          Maybe<bool> maybe = JSReceiver::HasElement(array, j);
+          if (!maybe.IsJust()) return false;
+          if (maybe.FromJust()) {
+            // Call GetElement on array, not its prototype, or getters won't
+            // have the correct receiver.
+            Handle<Object> element_value;
+            ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+                isolate, element_value,
+                JSReceiver::GetElement(isolate, array, j), false);
+            if (!visitor->visit(j, element_value)) return false;
+          }
+        }
+      });
+      break;
+    }
+
+    case DICTIONARY_ELEMENTS: {
+      Handle<SeededNumberDictionary> dict(array->element_dictionary());
+      List<uint32_t> indices(dict->Capacity() / 2);
+      // Collect all indices in the object and the prototypes less
+      // than length. This might introduce duplicates in the indices list.
+      CollectElementIndices(array, length, &indices);
+      indices.Sort(&compareUInt32);
+      int n = indices.length();
+      FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < n, (void)0, {
+        uint32_t index = indices[j];
+        Handle<Object> element;
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, element, JSReceiver::GetElement(isolate, array, index),
+            false);
+        if (!visitor->visit(index, element)) return false;
+        // Skip to next different index (i.e., omit duplicates).
+        do {
+          j++;
+        } while (j < n && indices[j] == index);
+      });
+      break;
+    }
+    case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+    case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
+      FOR_WITH_HANDLE_SCOPE(
+          isolate, uint32_t, index = 0, index, index < length, index++, {
+            Handle<Object> element;
+            ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+                isolate, element, JSReceiver::GetElement(isolate, array, index),
+                false);
+            if (!visitor->visit(index, element)) return false;
+          });
+      break;
+    }
+    case NO_ELEMENTS:
+      break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+      return IterateElementsSlow(isolate, receiver, length, visitor);
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
+      // |array| is guaranteed to be an array or typed array.
+      UNREACHABLE();
+      break;
+  }
+  visitor->increase_index_offset(length);
+  return true;
+}
+
+static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
+  HandleScope handle_scope(isolate);
+  if (!obj->IsJSReceiver()) return Just(false);
+  if (!isolate->IsIsConcatSpreadableLookupChainIntact(JSReceiver::cast(*obj))) {
+    // Slow path if @@isConcatSpreadable has been used.
+    Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+    Handle<Object> value;
+    MaybeHandle<Object> maybeValue =
+        i::Runtime::GetObjectProperty(isolate, obj, key);
+    if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
+    if (!value->IsUndefined(isolate)) return Just(value->BooleanValue());
+  }
+  return Object::IsArray(obj);
+}
+
+Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
+                         Isolate* isolate) {
+  int argument_count = args->length();
+
+  bool is_array_species = *species == isolate->context()->array_function();
+
+  // Pass 1: estimate the length and number of elements of the result.
+  // The actual length can be larger if any of the arguments have getters
+  // that mutate other arguments (but will otherwise be precise).
+  // The number of elements is precise if there are no inherited elements.
+
+  ElementsKind kind = FAST_SMI_ELEMENTS;
+
+  uint32_t estimate_result_length = 0;
+  uint32_t estimate_nof_elements = 0;
+  FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
+    Handle<Object> obj((*args)[i], isolate);
+    uint32_t length_estimate;
+    uint32_t element_estimate;
+    if (obj->IsJSArray()) {
+      Handle<JSArray> array(Handle<JSArray>::cast(obj));
+      length_estimate = static_cast<uint32_t>(array->length()->Number());
+      if (length_estimate != 0) {
+        ElementsKind array_kind =
+            GetPackedElementsKind(array->GetElementsKind());
+        kind = GetMoreGeneralElementsKind(kind, array_kind);
+      }
+      element_estimate = EstimateElementCount(array);
+    } else {
+      if (obj->IsHeapObject()) {
+        kind = GetMoreGeneralElementsKind(
+            kind, obj->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS);
+      }
+      length_estimate = 1;
+      element_estimate = 1;
+    }
+    // Avoid overflows by capping at kMaxElementCount.
+    if (JSObject::kMaxElementCount - estimate_result_length < length_estimate) {
+      estimate_result_length = JSObject::kMaxElementCount;
+    } else {
+      estimate_result_length += length_estimate;
+    }
+    if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
+      estimate_nof_elements = JSObject::kMaxElementCount;
+    } else {
+      estimate_nof_elements += element_estimate;
+    }
+  });
+
+  // If estimated number of elements is more than half of length, a
+  // fixed array (fast case) is more time and space-efficient than a
+  // dictionary.
+  bool fast_case =
+      is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
+
+  if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
+    Handle<FixedArrayBase> storage =
+        isolate->factory()->NewFixedDoubleArray(estimate_result_length);
+    int j = 0;
+    bool failure = false;
+    if (estimate_result_length > 0) {
+      Handle<FixedDoubleArray> double_storage =
+          Handle<FixedDoubleArray>::cast(storage);
+      for (int i = 0; i < argument_count; i++) {
+        Handle<Object> obj((*args)[i], isolate);
+        if (obj->IsSmi()) {
+          double_storage->set(j, Smi::cast(*obj)->value());
+          j++;
+        } else if (obj->IsNumber()) {
+          double_storage->set(j, obj->Number());
+          j++;
+        } else {
+          DisallowHeapAllocation no_gc;
+          JSArray* array = JSArray::cast(*obj);
+          uint32_t length = static_cast<uint32_t>(array->length()->Number());
+          switch (array->GetElementsKind()) {
+            case FAST_HOLEY_DOUBLE_ELEMENTS:
+            case FAST_DOUBLE_ELEMENTS: {
+              // Empty array is FixedArray but not FixedDoubleArray.
+              if (length == 0) break;
+              FixedDoubleArray* elements =
+                  FixedDoubleArray::cast(array->elements());
+              for (uint32_t i = 0; i < length; i++) {
+                if (elements->is_the_hole(i)) {
+                  // TODO(jkummerow/verwaest): We could be a bit more clever
+                  // here: Check if there are no elements/getters on the
+                  // prototype chain, and if so, allow creation of a holey
+                  // result array.
+                  // Same thing below (holey smi case).
+                  failure = true;
+                  break;
+                }
+                double double_value = elements->get_scalar(i);
+                double_storage->set(j, double_value);
+                j++;
+              }
+              break;
+            }
+            case FAST_HOLEY_SMI_ELEMENTS:
+            case FAST_SMI_ELEMENTS: {
+              Object* the_hole = isolate->heap()->the_hole_value();
+              FixedArray* elements(FixedArray::cast(array->elements()));
+              for (uint32_t i = 0; i < length; i++) {
+                Object* element = elements->get(i);
+                if (element == the_hole) {
+                  failure = true;
+                  break;
+                }
+                int32_t int_value = Smi::cast(element)->value();
+                double_storage->set(j, int_value);
+                j++;
+              }
+              break;
+            }
+            case FAST_HOLEY_ELEMENTS:
+            case FAST_ELEMENTS:
+            case DICTIONARY_ELEMENTS:
+            case NO_ELEMENTS:
+              DCHECK_EQ(0u, length);
+              break;
+            default:
+              UNREACHABLE();
+          }
+        }
+        if (failure) break;
+      }
+    }
+    if (!failure) {
+      return *isolate->factory()->NewJSArrayWithElements(storage, kind, j);
+    }
+    // In case of failure, fall through.
+  }
+
+  Handle<Object> storage;
+  if (fast_case) {
+    // The backing storage array must have non-existing elements to preserve
+    // holes across concat operations.
+    storage =
+        isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
+  } else if (is_array_species) {
+    // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
+    uint32_t at_least_space_for =
+        estimate_nof_elements + (estimate_nof_elements >> 2);
+    storage = SeededNumberDictionary::New(isolate, at_least_space_for);
+  } else {
+    DCHECK(species->IsConstructor());
+    Handle<Object> length(Smi::FromInt(0), isolate);
+    Handle<Object> storage_object;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, storage_object,
+        Execution::New(isolate, species, species, 1, &length));
+    storage = storage_object;
+  }
+
+  ArrayConcatVisitor visitor(isolate, storage, fast_case);
+
+  for (int i = 0; i < argument_count; i++) {
+    Handle<Object> obj((*args)[i], isolate);
+    Maybe<bool> spreadable = IsConcatSpreadable(isolate, obj);
+    MAYBE_RETURN(spreadable, isolate->heap()->exception());
+    if (spreadable.FromJust()) {
+      Handle<JSReceiver> object = Handle<JSReceiver>::cast(obj);
+      if (!IterateElements(isolate, object, &visitor)) {
+        return isolate->heap()->exception();
+      }
+    } else {
+      if (!visitor.visit(0, obj)) return isolate->heap()->exception();
+      visitor.increase_index_offset(1);
+    }
+  }
+
+  if (visitor.exceeds_array_limit()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kInvalidArrayLength));
+  }
+
+  if (is_array_species) {
+    return *visitor.ToArray();
+  } else {
+    return *visitor.storage_jsreceiver();
+  }
+}
+
+bool IsSimpleArray(Isolate* isolate, Handle<JSArray> obj) {
+  DisallowHeapAllocation no_gc;
+  Map* map = obj->map();
+  // If there is only the 'length' property we are fine.
+  if (map->prototype() ==
+          isolate->native_context()->initial_array_prototype() &&
+      map->NumberOfOwnDescriptors() == 1) {
+    return true;
+  }
+  // TODO(cbruni): slower lookup for array subclasses and support slow
+  // @@IsConcatSpreadable lookup.
+  return false;
+}
+
+MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
+                                      BuiltinArguments* args) {
+  if (!isolate->IsIsConcatSpreadableLookupChainIntact()) {
+    return MaybeHandle<JSArray>();
+  }
+  // We shouldn't overflow when adding another len.
+  const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+  STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+  STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
+  USE(kHalfOfMaxInt);
+
+  int n_arguments = args->length();
+  int result_len = 0;
+  {
+    DisallowHeapAllocation no_gc;
+    // Iterate through all the arguments performing checks
+    // and calculating total length.
+    for (int i = 0; i < n_arguments; i++) {
+      Object* arg = (*args)[i];
+      if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
+      if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
+        return MaybeHandle<JSArray>();
+      }
+      // TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
+      if (!JSObject::cast(arg)->HasFastElements()) {
+        return MaybeHandle<JSArray>();
+      }
+      Handle<JSArray> array(JSArray::cast(arg), isolate);
+      if (!IsSimpleArray(isolate, array)) {
+        return MaybeHandle<JSArray>();
+      }
+      // The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
+      // overflow.
+      result_len += Smi::cast(array->length())->value();
+      DCHECK(result_len >= 0);
+      // Throw an Error if we overflow the FixedArray limits
+      if (FixedDoubleArray::kMaxLength < result_len ||
+          FixedArray::kMaxLength < result_len) {
+        AllowHeapAllocation gc;
+        THROW_NEW_ERROR(isolate,
+                        NewRangeError(MessageTemplate::kInvalidArrayLength),
+                        JSArray);
+      }
+    }
+  }
+  return ElementsAccessor::Concat(isolate, args, n_arguments, result_len);
+}
+
+}  // namespace
+
+// ES6 22.1.3.1 Array.prototype.concat
+BUILTIN(ArrayConcat) {
+  HandleScope scope(isolate);
+
+  Handle<Object> receiver = args.receiver();
+  // TODO(bmeurer): Do we really care about the exact exception message here?
+  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Array.prototype.concat")));
+  }
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, receiver, Object::ToObject(isolate, args.receiver()));
+  args[0] = *receiver;
+
+  Handle<JSArray> result_array;
+
+  // Avoid a real species read to avoid extra lookups to the array constructor
+  if (V8_LIKELY(receiver->IsJSArray() &&
+                Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
+                isolate->IsArraySpeciesLookupChainIntact())) {
+    if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+      return *result_array;
+    }
+    if (isolate->has_pending_exception()) return isolate->heap()->exception();
+  }
+  // Reading @@species happens before anything else with a side effect, so
+  // we can do it here to determine whether to take the fast path.
+  Handle<Object> species;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+  if (*species == *isolate->array_function()) {
+    if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+      return *result_array;
+    }
+    if (isolate->has_pending_exception()) return isolate->heap()->exception();
+  }
+  return Slow_ArrayConcat(&args, species, isolate);
+}
+
+void Builtins::Generate_ArrayIsArray(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+
+  Node* object = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  Label call_runtime(assembler), return_true(assembler),
+      return_false(assembler);
+
+  assembler->GotoIf(assembler->WordIsSmi(object), &return_false);
+  Node* instance_type = assembler->LoadInstanceType(object);
+
+  assembler->GotoIf(assembler->Word32Equal(
+                        instance_type, assembler->Int32Constant(JS_ARRAY_TYPE)),
+                    &return_true);
+
+  // TODO(verwaest): Handle proxies in-place.
+  assembler->Branch(assembler->Word32Equal(
+                        instance_type, assembler->Int32Constant(JS_PROXY_TYPE)),
+                    &call_runtime, &return_false);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+
+  assembler->Bind(&call_runtime);
+  assembler->Return(
+      assembler->CallRuntime(Runtime::kArrayIsArray, context, object));
+}
+
+void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* array = assembler->Parameter(0);
+  Node* search_element = assembler->Parameter(1);
+  Node* start_from = assembler->Parameter(2);
+  Node* context = assembler->Parameter(3 + 2);
+
+  Node* int32_zero = assembler->Int32Constant(0);
+  Node* int32_one = assembler->Int32Constant(1);
+
+  Node* the_hole = assembler->TheHoleConstant();
+  Node* undefined = assembler->UndefinedConstant();
+  Node* heap_number_map = assembler->HeapNumberMapConstant();
+
+  Variable len_var(assembler, MachineRepresentation::kWord32),
+      index_var(assembler, MachineRepresentation::kWord32),
+      start_from_var(assembler, MachineRepresentation::kWord32);
+
+  Label init_k(assembler), return_true(assembler), return_false(assembler),
+      call_runtime(assembler);
+
+  Label init_len(assembler);
+
+  index_var.Bind(int32_zero);
+  len_var.Bind(int32_zero);
+
+  // Take slow path if not a JSArray, if retrieving elements requires
+  // traversing prototype, or if access checks are required.
+  assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+
+  assembler->Bind(&init_len);
+  {
+    // Handle case where JSArray length is not an Smi in the runtime
+    Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+    assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+
+    len_var.Bind(assembler->SmiToWord(len));
+    assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+                      &return_false, &init_k);
+  }
+
+  assembler->Bind(&init_k);
+  {
+    Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
+        init_k_zero(assembler), init_k_n(assembler);
+    Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
+    Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+
+    assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+                      &init_k_heap_num);
+
+    assembler->Bind(&init_k_smi);
+    {
+      start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+      assembler->Goto(&init_k_n);
+    }
+
+    assembler->Bind(&init_k_heap_num);
+    {
+      Label do_return_false(assembler);
+      Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+      Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
+      assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
+                        &do_return_false);
+      start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+      assembler->Goto(&init_k_n);
+
+      assembler->Bind(&do_return_false);
+      {
+        index_var.Bind(int32_zero);
+        assembler->Goto(&return_false);
+      }
+    }
+
+    assembler->Bind(&init_k_n);
+    {
+      Label if_positive(assembler), if_negative(assembler), done(assembler);
+      assembler->Branch(
+          assembler->Int32LessThan(start_from_var.value(), int32_zero),
+          &if_negative, &if_positive);
+
+      assembler->Bind(&if_positive);
+      {
+        index_var.Bind(start_from_var.value());
+        assembler->Goto(&done);
+      }
+
+      assembler->Bind(&if_negative);
+      {
+        index_var.Bind(
+            assembler->Int32Add(len_var.value(), start_from_var.value()));
+        assembler->Branch(
+            assembler->Int32LessThan(index_var.value(), int32_zero),
+            &init_k_zero, &done);
+      }
+
+      assembler->Bind(&init_k_zero);
+      {
+        index_var.Bind(int32_zero);
+        assembler->Goto(&done);
+      }
+
+      assembler->Bind(&done);
+    }
+  }
+
+  static int32_t kElementsKind[] = {
+      FAST_SMI_ELEMENTS,   FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
+      FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS,    FAST_HOLEY_DOUBLE_ELEMENTS,
+  };
+
+  Label if_smiorobjects(assembler), if_packed_doubles(assembler),
+      if_holey_doubles(assembler);
+  Label* element_kind_handlers[] = {&if_smiorobjects,   &if_smiorobjects,
+                                    &if_smiorobjects,   &if_smiorobjects,
+                                    &if_packed_doubles, &if_holey_doubles};
+
+  Node* map = assembler->LoadMap(array);
+  Node* bit_field2 = assembler->LoadMapBitField2(map);
+  Node* elements_kind =
+      assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+  Node* elements = assembler->LoadElements(array);
+  assembler->Switch(elements_kind, &return_false, kElementsKind,
+                    element_kind_handlers, arraysize(kElementsKind));
+
+  assembler->Bind(&if_smiorobjects);
+  {
+    Variable search_num(assembler, MachineRepresentation::kFloat64);
+    Label ident_loop(assembler, &index_var),
+        heap_num_loop(assembler, &search_num),
+        string_loop(assembler, &index_var), simd_loop(assembler),
+        undef_loop(assembler, &index_var), not_smi(assembler),
+        not_heap_num(assembler);
+
+    assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+    search_num.Bind(assembler->SmiToFloat64(search_element));
+    assembler->Goto(&heap_num_loop);
+
+    assembler->Bind(&not_smi);
+    assembler->GotoIf(assembler->WordEqual(search_element, undefined),
+                      &undef_loop);
+    Node* map = assembler->LoadMap(search_element);
+    assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
+                      &not_heap_num);
+    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+    assembler->Goto(&heap_num_loop);
+
+    assembler->Bind(&not_heap_num);
+    Node* search_type = assembler->LoadMapInstanceType(map);
+    assembler->GotoIf(
+        assembler->Int32LessThan(
+            search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+        &string_loop);
+    assembler->GotoIf(
+        assembler->WordEqual(search_type,
+                             assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+        &simd_loop);
+    assembler->Goto(&ident_loop);
+
+    assembler->Bind(&ident_loop);
+    {
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordEqual(element_k, search_element),
+                        &return_true);
+
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&ident_loop);
+    }
+
+    assembler->Bind(&undef_loop);
+    {
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordEqual(element_k, undefined),
+                        &return_true);
+      assembler->GotoIf(assembler->WordEqual(element_k, the_hole),
+                        &return_true);
+
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&undef_loop);
+    }
+
+    assembler->Bind(&heap_num_loop);
+    {
+      Label nan_loop(assembler, &index_var),
+          not_nan_loop(assembler, &index_var);
+      assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+                                      &not_nan_loop);
+
+      assembler->Bind(&not_nan_loop);
+      {
+        Label continue_loop(assembler), not_smi(assembler);
+        assembler->GotoUnless(
+            assembler->Int32LessThan(index_var.value(), len_var.value()),
+            &return_false);
+        Node* element_k =
+            assembler->LoadFixedArrayElement(elements, index_var.value());
+        assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+        assembler->Branch(
+            assembler->Float64Equal(search_num.value(),
+                                    assembler->SmiToFloat64(element_k)),
+            &return_true, &continue_loop);
+
+        assembler->Bind(&not_smi);
+        assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
+                                                  heap_number_map),
+                          &continue_loop);
+        assembler->BranchIfFloat64Equal(
+            search_num.value(), assembler->LoadHeapNumberValue(element_k),
+            &return_true, &continue_loop);
+
+        assembler->Bind(&continue_loop);
+        index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+        assembler->Goto(&not_nan_loop);
+      }
+
+      assembler->Bind(&nan_loop);
+      {
+        Label continue_loop(assembler);
+        assembler->GotoUnless(
+            assembler->Int32LessThan(index_var.value(), len_var.value()),
+            &return_false);
+        Node* element_k =
+            assembler->LoadFixedArrayElement(elements, index_var.value());
+        assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+        assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
+                                                  heap_number_map),
+                          &continue_loop);
+        assembler->BranchIfFloat64IsNaN(
+            assembler->LoadHeapNumberValue(element_k), &return_true,
+            &continue_loop);
+
+        assembler->Bind(&continue_loop);
+        index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+        assembler->Goto(&nan_loop);
+      }
+    }
+
+    assembler->Bind(&string_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+      assembler->GotoUnless(assembler->Int32LessThan(
+                                assembler->LoadInstanceType(element_k),
+                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                            &continue_loop);
+
+      // TODO(bmeurer): Consider inlining the StringEqual logic here.
+      Callable callable = CodeFactory::StringEqual(assembler->isolate());
+      Node* result =
+          assembler->CallStub(callable, context, search_element, element_k);
+      assembler->Branch(
+          assembler->WordEqual(assembler->BooleanConstant(true), result),
+          &return_true, &continue_loop);
+
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&string_loop);
+    }
+
+    assembler->Bind(&simd_loop);
+    {
+      Label continue_loop(assembler, &index_var),
+          loop_body(assembler, &index_var);
+      Node* map = assembler->LoadMap(search_element);
+
+      assembler->Goto(&loop_body);
+      assembler->Bind(&loop_body);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+
+      Node* map_k = assembler->LoadMap(element_k);
+      assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
+                                      &return_true, &continue_loop);
+
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&loop_body);
+    }
+  }
+
+  assembler->Bind(&if_packed_doubles);
+  {
+    Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
+        hole_loop(assembler, &index_var), search_notnan(assembler);
+    Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    search_num.Bind(assembler->SmiToFloat64(search_element));
+    assembler->Goto(&not_nan_loop);
+
+    assembler->Bind(&search_notnan);
+    assembler->GotoIf(assembler->WordNotEqual(
+                          assembler->LoadMap(search_element), heap_number_map),
+                      &return_false);
+
+    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+    assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+                                    &not_nan_loop);
+
+    // Search for HeapNumber
+    assembler->Bind(&not_nan_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::Float64());
+      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+                                      &return_true, &continue_loop);
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&not_nan_loop);
+    }
+
+    // Search for NaN
+    assembler->Bind(&nan_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::Float64());
+      assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&nan_loop);
+    }
+  }
+
+  assembler->Bind(&if_holey_doubles);
+  {
+    Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
+        hole_loop(assembler, &index_var), search_notnan(assembler);
+    Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    search_num.Bind(assembler->SmiToFloat64(search_element));
+    assembler->Goto(&not_nan_loop);
+
+    assembler->Bind(&search_notnan);
+    assembler->GotoIf(assembler->WordEqual(search_element, undefined),
+                      &hole_loop);
+    assembler->GotoIf(assembler->WordNotEqual(
+                          assembler->LoadMap(search_element), heap_number_map),
+                      &return_false);
+
+    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+    assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+                                    &not_nan_loop);
+
+    // Search for HeapNumber
+    assembler->Bind(&not_nan_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+
+      if (kPointerSize == kDoubleSize) {
+        Node* element = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint64());
+        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+                          &continue_loop);
+      } else {
+        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint32(),
+            kIeeeDoubleExponentWordOffset);
+        assembler->GotoIf(
+            assembler->Word32Equal(element_upper,
+                                   assembler->Int32Constant(kHoleNanUpper32)),
+            &continue_loop);
+      }
+
+      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::Float64());
+      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+                                      &return_true, &continue_loop);
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&not_nan_loop);
+    }
+
+    // Search for NaN
+    assembler->Bind(&nan_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+
+      if (kPointerSize == kDoubleSize) {
+        Node* element = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint64());
+        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+                          &continue_loop);
+      } else {
+        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint32(),
+            kIeeeDoubleExponentWordOffset);
+        assembler->GotoIf(
+            assembler->Word32Equal(element_upper,
+                                   assembler->Int32Constant(kHoleNanUpper32)),
+            &continue_loop);
+      }
+
+      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::Float64());
+      assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&nan_loop);
+    }
+
+    // Search for the Hole
+    assembler->Bind(&hole_loop);
+    {
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_false);
+
+      if (kPointerSize == kDoubleSize) {
+        Node* element = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint64());
+        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+                          &return_true);
+      } else {
+        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint32(),
+            kIeeeDoubleExponentWordOffset);
+        assembler->GotoIf(
+            assembler->Word32Equal(element_upper,
+                                   assembler->Int32Constant(kHoleNanUpper32)),
+            &return_true);
+      }
+
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&hole_loop);
+    }
+  }
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+
+  assembler->Bind(&call_runtime);
+  assembler->Return(assembler->CallRuntime(Runtime::kArrayIncludes_Slow,
+                                           context, array, search_element,
+                                           start_from));
+}
+
+void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* array = assembler->Parameter(0);
+  Node* search_element = assembler->Parameter(1);
+  Node* start_from = assembler->Parameter(2);
+  Node* context = assembler->Parameter(3 + 2);
+
+  Node* int32_zero = assembler->Int32Constant(0);
+  Node* int32_one = assembler->Int32Constant(1);
+
+  Node* undefined = assembler->UndefinedConstant();
+  Node* heap_number_map = assembler->HeapNumberMapConstant();
+
+  Variable len_var(assembler, MachineRepresentation::kWord32),
+      index_var(assembler, MachineRepresentation::kWord32),
+      start_from_var(assembler, MachineRepresentation::kWord32);
+
+  Label init_k(assembler), return_found(assembler), return_not_found(assembler),
+      call_runtime(assembler);
+
+  Label init_len(assembler);
+
+  index_var.Bind(int32_zero);
+  len_var.Bind(int32_zero);
+
+  // Take slow path if not a JSArray, if retrieving elements requires
+  // traversing prototype, or if access checks are required.
+  assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+
+  assembler->Bind(&init_len);
+  {
+    // Handle case where JSArray length is not an Smi in the runtime
+    Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+    assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+
+    len_var.Bind(assembler->SmiToWord(len));
+    assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+                      &return_not_found, &init_k);
+  }
+
+  assembler->Bind(&init_k);
+  {
+    Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
+        init_k_zero(assembler), init_k_n(assembler);
+    Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
+    Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+
+    assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+                      &init_k_heap_num);
+
+    assembler->Bind(&init_k_smi);
+    {
+      start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+      assembler->Goto(&init_k_n);
+    }
+
+    assembler->Bind(&init_k_heap_num);
+    {
+      Label do_return_not_found(assembler);
+      Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+      Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
+      assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
+                        &do_return_not_found);
+      start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+      assembler->Goto(&init_k_n);
+
+      assembler->Bind(&do_return_not_found);
+      {
+        index_var.Bind(int32_zero);
+        assembler->Goto(&return_not_found);
+      }
+    }
+
+    assembler->Bind(&init_k_n);
+    {
+      Label if_positive(assembler), if_negative(assembler), done(assembler);
+      assembler->Branch(
+          assembler->Int32LessThan(start_from_var.value(), int32_zero),
+          &if_negative, &if_positive);
+
+      assembler->Bind(&if_positive);
+      {
+        index_var.Bind(start_from_var.value());
+        assembler->Goto(&done);
+      }
+
+      assembler->Bind(&if_negative);
+      {
+        index_var.Bind(
+            assembler->Int32Add(len_var.value(), start_from_var.value()));
+        assembler->Branch(
+            assembler->Int32LessThan(index_var.value(), int32_zero),
+            &init_k_zero, &done);
+      }
+
+      assembler->Bind(&init_k_zero);
+      {
+        index_var.Bind(int32_zero);
+        assembler->Goto(&done);
+      }
+
+      assembler->Bind(&done);
+    }
+  }
+
+  static int32_t kElementsKind[] = {
+      FAST_SMI_ELEMENTS,   FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
+      FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS,    FAST_HOLEY_DOUBLE_ELEMENTS,
+  };
+
+  Label if_smiorobjects(assembler), if_packed_doubles(assembler),
+      if_holey_doubles(assembler);
+  Label* element_kind_handlers[] = {&if_smiorobjects,   &if_smiorobjects,
+                                    &if_smiorobjects,   &if_smiorobjects,
+                                    &if_packed_doubles, &if_holey_doubles};
+
+  Node* map = assembler->LoadMap(array);
+  Node* bit_field2 = assembler->LoadMapBitField2(map);
+  Node* elements_kind =
+      assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+  Node* elements = assembler->LoadElements(array);
+  assembler->Switch(elements_kind, &return_not_found, kElementsKind,
+                    element_kind_handlers, arraysize(kElementsKind));
+
+  assembler->Bind(&if_smiorobjects);
+  {
+    Variable search_num(assembler, MachineRepresentation::kFloat64);
+    Label ident_loop(assembler, &index_var),
+        heap_num_loop(assembler, &search_num),
+        string_loop(assembler, &index_var), simd_loop(assembler),
+        undef_loop(assembler, &index_var), not_smi(assembler),
+        not_heap_num(assembler);
+
+    assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+    search_num.Bind(assembler->SmiToFloat64(search_element));
+    assembler->Goto(&heap_num_loop);
+
+    assembler->Bind(&not_smi);
+    assembler->GotoIf(assembler->WordEqual(search_element, undefined),
+                      &undef_loop);
+    Node* map = assembler->LoadMap(search_element);
+    assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
+                      &not_heap_num);
+    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+    assembler->Goto(&heap_num_loop);
+
+    assembler->Bind(&not_heap_num);
+    Node* search_type = assembler->LoadMapInstanceType(map);
+    assembler->GotoIf(
+        assembler->Int32LessThan(
+            search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+        &string_loop);
+    assembler->GotoIf(
+        assembler->WordEqual(search_type,
+                             assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+        &simd_loop);
+    assembler->Goto(&ident_loop);
+
+    assembler->Bind(&ident_loop);
+    {
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_not_found);
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordEqual(element_k, search_element),
+                        &return_found);
+
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&ident_loop);
+    }
+
+    assembler->Bind(&undef_loop);
+    {
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_not_found);
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordEqual(element_k, undefined),
+                        &return_found);
+
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&undef_loop);
+    }
+
+    assembler->Bind(&heap_num_loop);
+    {
+      Label not_nan_loop(assembler, &index_var);
+      assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+                                      &not_nan_loop);
+
+      assembler->Bind(&not_nan_loop);
+      {
+        Label continue_loop(assembler), not_smi(assembler);
+        assembler->GotoUnless(
+            assembler->Int32LessThan(index_var.value(), len_var.value()),
+            &return_not_found);
+        Node* element_k =
+            assembler->LoadFixedArrayElement(elements, index_var.value());
+        assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+        assembler->Branch(
+            assembler->Float64Equal(search_num.value(),
+                                    assembler->SmiToFloat64(element_k)),
+            &return_found, &continue_loop);
+
+        assembler->Bind(&not_smi);
+        assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
+                                                  heap_number_map),
+                          &continue_loop);
+        assembler->BranchIfFloat64Equal(
+            search_num.value(), assembler->LoadHeapNumberValue(element_k),
+            &return_found, &continue_loop);
+
+        assembler->Bind(&continue_loop);
+        index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+        assembler->Goto(&not_nan_loop);
+      }
+    }
+
+    assembler->Bind(&string_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_not_found);
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+      assembler->GotoUnless(assembler->Int32LessThan(
+                                assembler->LoadInstanceType(element_k),
+                                assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                            &continue_loop);
+
+      // TODO(bmeurer): Consider inlining the StringEqual logic here.
+      Callable callable = CodeFactory::StringEqual(assembler->isolate());
+      Node* result =
+          assembler->CallStub(callable, context, search_element, element_k);
+      assembler->Branch(
+          assembler->WordEqual(assembler->BooleanConstant(true), result),
+          &return_found, &continue_loop);
+
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&string_loop);
+    }
+
+    assembler->Bind(&simd_loop);
+    {
+      Label continue_loop(assembler, &index_var),
+          loop_body(assembler, &index_var);
+      Node* map = assembler->LoadMap(search_element);
+
+      assembler->Goto(&loop_body);
+      assembler->Bind(&loop_body);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_not_found);
+
+      Node* element_k =
+          assembler->LoadFixedArrayElement(elements, index_var.value());
+      assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+
+      Node* map_k = assembler->LoadMap(element_k);
+      assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
+                                      &return_found, &continue_loop);
+
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&loop_body);
+    }
+  }
+
+  assembler->Bind(&if_packed_doubles);
+  {
+    Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
+    Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    search_num.Bind(assembler->SmiToFloat64(search_element));
+    assembler->Goto(&not_nan_loop);
+
+    assembler->Bind(&search_notnan);
+    assembler->GotoIf(assembler->WordNotEqual(
+                          assembler->LoadMap(search_element), heap_number_map),
+                      &return_not_found);
+
+    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+    assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+                                    &not_nan_loop);
+
+    // Search for HeapNumber
+    assembler->Bind(&not_nan_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_not_found);
+      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::Float64());
+      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+                                      &return_found, &continue_loop);
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&not_nan_loop);
+    }
+  }
+
+  assembler->Bind(&if_holey_doubles);
+  {
+    Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
+    Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+    assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+    search_num.Bind(assembler->SmiToFloat64(search_element));
+    assembler->Goto(&not_nan_loop);
+
+    assembler->Bind(&search_notnan);
+    assembler->GotoIf(assembler->WordNotEqual(
+                          assembler->LoadMap(search_element), heap_number_map),
+                      &return_not_found);
+
+    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+    assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+                                    &not_nan_loop);
+
+    // Search for HeapNumber
+    assembler->Bind(&not_nan_loop);
+    {
+      Label continue_loop(assembler);
+      assembler->GotoUnless(
+          assembler->Int32LessThan(index_var.value(), len_var.value()),
+          &return_not_found);
+
+      if (kPointerSize == kDoubleSize) {
+        Node* element = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint64());
+        Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+        assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+                          &continue_loop);
+      } else {
+        Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+            elements, index_var.value(), MachineType::Uint32(),
+            kIeeeDoubleExponentWordOffset);
+        assembler->GotoIf(
+            assembler->Word32Equal(element_upper,
+                                   assembler->Int32Constant(kHoleNanUpper32)),
+            &continue_loop);
+      }
+
+      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::Float64());
+      assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+                                      &return_found, &continue_loop);
+      assembler->Bind(&continue_loop);
+      index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+      assembler->Goto(&not_nan_loop);
+    }
+  }
+
+  assembler->Bind(&return_found);
+  assembler->Return(assembler->ChangeInt32ToTagged(index_var.value()));
+
+  assembler->Bind(&return_not_found);
+  assembler->Return(assembler->NumberConstant(-1));
+
+  assembler->Bind(&call_runtime);
+  assembler->Return(assembler->CallRuntime(Runtime::kArrayIndexOf, context,
+                                           array, search_element, start_from));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-arraybuffer.cc b/src/builtins/builtins-arraybuffer.cc
new file mode 100644
index 0000000..addf8ac
--- /dev/null
+++ b/src/builtins/builtins-arraybuffer.cc
@@ -0,0 +1,88 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 ArrayBuffer Objects
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
+BUILTIN(ArrayBufferConstructor) {
+  HandleScope scope(isolate);
+  Handle<JSFunction> target = args.target<JSFunction>();
+  DCHECK(*target == target->native_context()->array_buffer_fun() ||
+         *target == target->native_context()->shared_array_buffer_fun());
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+                            handle(target->shared()->name(), isolate)));
+}
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
+BUILTIN(ArrayBufferConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+  Handle<Object> length = args.atOrUndefined(isolate, 1);
+  DCHECK(*target == target->native_context()->array_buffer_fun() ||
+         *target == target->native_context()->shared_array_buffer_fun());
+  Handle<Object> number_length;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
+                                     Object::ToInteger(isolate, length));
+  if (number_length->Number() < 0.0) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+  }
+  Handle<JSObject> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     JSObject::New(target, new_target));
+  size_t byte_length;
+  if (!TryNumberToSize(*number_length, &byte_length)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+  }
+  SharedFlag shared_flag =
+      (*target == target->native_context()->array_buffer_fun())
+          ? SharedFlag::kNotShared
+          : SharedFlag::kShared;
+  if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
+                                          isolate, byte_length, true,
+                                          shared_flag)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
+  }
+  return *result;
+}
+
+// ES6 section 24.1.4.1 get ArrayBuffer.prototype.byteLength
+BUILTIN(ArrayBufferPrototypeGetByteLength) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSArrayBuffer, array_buffer,
+                 "get ArrayBuffer.prototype.byteLength");
+
+  if (array_buffer->is_shared()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "get ArrayBuffer.prototype.byteLength"),
+                              args.receiver()));
+  }
+  // TODO(franzih): According to the ES6 spec, we should throw a TypeError
+  // here if the JSArrayBuffer is detached.
+  return array_buffer->byte_length();
+}
+
+// ES6 section 24.1.3.1 ArrayBuffer.isView ( arg )
+BUILTIN(ArrayBufferIsView) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(2, args.length());
+  Object* arg = args[1];
+  return isolate->heap()->ToBoolean(arg->IsJSArrayBufferView());
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-boolean.cc b/src/builtins/builtins-boolean.cc
new file mode 100644
index 0000000..5f5bed1
--- /dev/null
+++ b/src/builtins/builtins-boolean.cc
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.3 Boolean Objects
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Call]] case.
+BUILTIN(BooleanConstructor) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  return isolate->heap()->ToBoolean(value->BooleanValue());
+}
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Construct]] case.
+BUILTIN(BooleanConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+  DCHECK(*target == target->native_context()->boolean_function());
+  Handle<JSObject> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     JSObject::New(target, new_target));
+  Handle<JSValue>::cast(result)->set_value(
+      isolate->heap()->ToBoolean(value->BooleanValue()));
+  return *result;
+}
+
+// ES6 section 19.3.3.2 Boolean.prototype.toString ( )
+void Builtins::Generate_BooleanPrototypeToString(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* value = assembler->ToThisValue(
+      context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.toString");
+  Node* result = assembler->LoadObjectField(value, Oddball::kToStringOffset);
+  assembler->Return(result);
+}
+
+// ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
+void Builtins::Generate_BooleanPrototypeValueOf(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* result = assembler->ToThisValue(
+      context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf");
+  assembler->Return(result);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-call.cc b/src/builtins/builtins-call.cc
new file mode 100644
index 0000000..e3054a9
--- /dev/null
+++ b/src/builtins/builtins-call.cc
@@ -0,0 +1,151 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
+                                    TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return CallFunction_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return CallFunction_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return CallFunction_ReceiverIsAny();
+      }
+      break;
+    case TailCallMode::kAllow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return TailCallFunction_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return TailCallFunction_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return TailCallFunction_ReceiverIsAny();
+      }
+      break;
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+Handle<Code> Builtins::Call(ConvertReceiverMode mode,
+                            TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return Call_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return Call_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return Call_ReceiverIsAny();
+      }
+      break;
+    case TailCallMode::kAllow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return TailCall_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return TailCall_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return TailCall_ReceiverIsAny();
+      }
+      break;
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+Handle<Code> Builtins::CallBoundFunction(TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      return CallBoundFunction();
+    case TailCallMode::kAllow:
+      return TailCallBoundFunction();
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
+    MacroAssembler* masm) {
+  Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+                        TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined(
+    MacroAssembler* masm) {
+  Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                        TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
+  Generate_CallFunction(masm, ConvertReceiverMode::kAny,
+                        TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_TailCallFunction_ReceiverIsNullOrUndefined(
+    MacroAssembler* masm) {
+  Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+                        TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCallFunction_ReceiverIsNotNullOrUndefined(
+    MacroAssembler* masm) {
+  Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                        TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCallFunction_ReceiverIsAny(MacroAssembler* masm) {
+  Generate_CallFunction(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+}
+
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+  Generate_CallBoundFunctionImpl(masm, TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_TailCallBoundFunction(MacroAssembler* masm) {
+  Generate_CallBoundFunctionImpl(masm, TailCallMode::kAllow);
+}
+
+void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
+  Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+                TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
+    MacroAssembler* masm) {
+  Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
+  Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_TailCall_ReceiverIsNullOrUndefined(
+    MacroAssembler* masm) {
+  Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+                TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCall_ReceiverIsNotNullOrUndefined(
+    MacroAssembler* masm) {
+  Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
+  Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-callsite.cc b/src/builtins/builtins-callsite.cc
new file mode 100644
index 0000000..7fc2f98
--- /dev/null
+++ b/src/builtins/builtins-callsite.cc
@@ -0,0 +1,203 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/string-builder.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+
+#define CHECK_CALLSITE(recv, method)                                          \
+  CHECK_RECEIVER(JSObject, recv, method);                                     \
+  if (!JSReceiver::HasOwnProperty(                                            \
+           recv, isolate->factory()->call_site_position_symbol())             \
+           .FromMaybe(false)) {                                               \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                           \
+        isolate,                                                              \
+        NewTypeError(MessageTemplate::kCallSiteMethod,                        \
+                     isolate->factory()->NewStringFromAsciiChecked(method))); \
+  }
+
+namespace {
+
+Object* PositiveNumberOrNull(int value, Isolate* isolate) {
+  if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
+  return isolate->heap()->null_value();
+}
+
+}  // namespace
+
+BUILTIN(CallSitePrototypeGetColumnNumber) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getColumnNumber");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return PositiveNumberOrNull(call_site.GetColumnNumber(), isolate);
+}
+
+BUILTIN(CallSitePrototypeGetEvalOrigin) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getEvalOrigin");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return *call_site.GetEvalOrigin();
+}
+
+BUILTIN(CallSitePrototypeGetFileName) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getFileName");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return *call_site.GetFileName();
+}
+
+namespace {
+
+bool CallSiteIsStrict(Isolate* isolate, Handle<JSObject> receiver) {
+  Handle<Object> strict;
+  Handle<Symbol> symbol = isolate->factory()->call_site_strict_symbol();
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, strict,
+                                     JSObject::GetProperty(receiver, symbol));
+  return strict->BooleanValue();
+}
+
+}  // namespace
+
+BUILTIN(CallSitePrototypeGetFunction) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getFunction");
+
+  if (CallSiteIsStrict(isolate, recv))
+    return *isolate->factory()->undefined_value();
+
+  Handle<Symbol> symbol = isolate->factory()->call_site_function_symbol();
+  RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+}
+
+BUILTIN(CallSitePrototypeGetFunctionName) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getFunctionName");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return *call_site.GetFunctionName();
+}
+
+BUILTIN(CallSitePrototypeGetLineNumber) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getLineNumber");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+
+  int line_number = call_site.IsWasm() ? call_site.wasm_func_index()
+                                       : call_site.GetLineNumber();
+  return PositiveNumberOrNull(line_number, isolate);
+}
+
+BUILTIN(CallSitePrototypeGetMethodName) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getMethodName");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return *call_site.GetMethodName();
+}
+
+BUILTIN(CallSitePrototypeGetPosition) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getPosition");
+
+  Handle<Symbol> symbol = isolate->factory()->call_site_position_symbol();
+  RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+}
+
+BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getScriptNameOrSourceUrl");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return *call_site.GetScriptNameOrSourceUrl();
+}
+
+BUILTIN(CallSitePrototypeGetThis) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getThis");
+
+  if (CallSiteIsStrict(isolate, recv))
+    return *isolate->factory()->undefined_value();
+
+  Handle<Object> receiver;
+  Handle<Symbol> symbol = isolate->factory()->call_site_receiver_symbol();
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     JSObject::GetProperty(recv, symbol));
+
+  if (*receiver == isolate->heap()->call_site_constructor_symbol())
+    return *isolate->factory()->undefined_value();
+
+  return *receiver;
+}
+
+BUILTIN(CallSitePrototypeGetTypeName) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "getTypeName");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return *call_site.GetTypeName();
+}
+
+BUILTIN(CallSitePrototypeIsConstructor) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "isConstructor");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return isolate->heap()->ToBoolean(call_site.IsConstructor());
+}
+
+BUILTIN(CallSitePrototypeIsEval) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "isEval");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return isolate->heap()->ToBoolean(call_site.IsEval());
+}
+
+BUILTIN(CallSitePrototypeIsNative) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "isNative");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return isolate->heap()->ToBoolean(call_site.IsNative());
+}
+
+BUILTIN(CallSitePrototypeIsToplevel) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "isToplevel");
+
+  CallSite call_site(isolate, recv);
+  CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+  return isolate->heap()->ToBoolean(call_site.IsToplevel());
+}
+
+BUILTIN(CallSitePrototypeToString) {
+  HandleScope scope(isolate);
+  CHECK_CALLSITE(recv, "toString");
+  RETURN_RESULT_OR_FAILURE(isolate, CallSiteUtils::ToString(isolate, recv));
+}
+
+#undef CHECK_CALLSITE
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-conversion.cc b/src/builtins/builtins-conversion.cc
new file mode 100644
index 0000000..0d04a02
--- /dev/null
+++ b/src/builtins/builtins-conversion.cc
@@ -0,0 +1,357 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
+  switch (hint) {
+    case ToPrimitiveHint::kDefault:
+      return NonPrimitiveToPrimitive_Default();
+    case ToPrimitiveHint::kNumber:
+      return NonPrimitiveToPrimitive_Number();
+    case ToPrimitiveHint::kString:
+      return NonPrimitiveToPrimitive_String();
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+namespace {
+// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
+void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
+                                      ToPrimitiveHint hint) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  // Lookup the @@toPrimitive property on the {input}.
+  Callable callable = CodeFactory::GetProperty(assembler->isolate());
+  Node* to_primitive_symbol =
+      assembler->HeapConstant(assembler->factory()->to_primitive_symbol());
+  Node* exotic_to_prim =
+      assembler->CallStub(callable, context, input, to_primitive_symbol);
+
+  // Check if {exotic_to_prim} is neither null nor undefined.
+  Label ordinary_to_primitive(assembler);
+  assembler->GotoIf(
+      assembler->WordEqual(exotic_to_prim, assembler->NullConstant()),
+      &ordinary_to_primitive);
+  assembler->GotoIf(
+      assembler->WordEqual(exotic_to_prim, assembler->UndefinedConstant()),
+      &ordinary_to_primitive);
+  {
+    // Invoke the {exotic_to_prim} method on the {input} with a string
+    // representation of the {hint}.
+    Callable callable = CodeFactory::Call(assembler->isolate());
+    Node* hint_string = assembler->HeapConstant(
+        assembler->factory()->ToPrimitiveHintString(hint));
+    Node* result = assembler->CallJS(callable, context, exotic_to_prim, input,
+                                     hint_string);
+
+    // Verify that the {result} is actually a primitive.
+    Label if_resultisprimitive(assembler),
+        if_resultisnotprimitive(assembler, Label::kDeferred);
+    assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisprimitive);
+    Node* result_instance_type = assembler->LoadInstanceType(result);
+    STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+    assembler->Branch(assembler->Int32LessThanOrEqual(
+                          result_instance_type,
+                          assembler->Int32Constant(LAST_PRIMITIVE_TYPE)),
+                      &if_resultisprimitive, &if_resultisnotprimitive);
+
+    assembler->Bind(&if_resultisprimitive);
+    {
+      // Just return the {result}.
+      assembler->Return(result);
+    }
+
+    assembler->Bind(&if_resultisnotprimitive);
+    {
+      // Somehow the @@toPrimitive method on {input} didn't yield a primitive.
+      assembler->TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive,
+                                 context);
+    }
+  }
+
+  // Convert using the OrdinaryToPrimitive algorithm instead.
+  assembler->Bind(&ordinary_to_primitive);
+  {
+    Callable callable = CodeFactory::OrdinaryToPrimitive(
+        assembler->isolate(), (hint == ToPrimitiveHint::kString)
+                                  ? OrdinaryToPrimitiveHint::kString
+                                  : OrdinaryToPrimitiveHint::kNumber);
+    assembler->TailCallStub(callable, context, input);
+  }
+}
+}  // anonymous namespace
+
+void Builtins::Generate_NonPrimitiveToPrimitive_Default(
+    CodeStubAssembler* assembler) {
+  Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kDefault);
+}
+
+void Builtins::Generate_NonPrimitiveToPrimitive_Number(
+    CodeStubAssembler* assembler) {
+  Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kNumber);
+}
+
+void Builtins::Generate_NonPrimitiveToPrimitive_String(
+    CodeStubAssembler* assembler) {
+  Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kString);
+}
+
+void Builtins::Generate_StringToNumber(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label runtime(assembler);
+
+  // Check if string has a cached array index.
+  Node* hash = assembler->LoadNameHashField(input);
+  Node* bit = assembler->Word32And(
+      hash, assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
+  assembler->GotoIf(assembler->Word32NotEqual(bit, assembler->Int32Constant(0)),
+                    &runtime);
+
+  assembler->Return(assembler->SmiTag(
+      assembler->BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+
+  assembler->Bind(&runtime);
+  {
+    // Note: We cannot tail call to the runtime here, as js-to-wasm
+    // trampolines also use this code currently, and they declare all
+    // outgoing parameters as untagged, while we would push a tagged
+    // object here.
+    Node* result =
+        assembler->CallRuntime(Runtime::kStringToNumber, context, input);
+    assembler->Return(result);
+  }
+}
+
+// ES6 section 7.1.3 ToNumber ( argument )
+void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  // We might need to loop once here due to ToPrimitive conversions.
+  Variable var_input(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_input);
+  var_input.Bind(input);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {input} value (known to be a HeapObject).
+    Node* input = var_input.value();
+
+    // Dispatch on the {input} instance type.
+    Node* input_instance_type = assembler->LoadInstanceType(input);
+    Label if_inputisstring(assembler), if_inputisoddball(assembler),
+        if_inputisreceiver(assembler, Label::kDeferred),
+        if_inputisother(assembler, Label::kDeferred);
+    assembler->GotoIf(assembler->Int32LessThan(
+                          input_instance_type,
+                          assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                      &if_inputisstring);
+    assembler->GotoIf(
+        assembler->Word32Equal(input_instance_type,
+                               assembler->Int32Constant(ODDBALL_TYPE)),
+        &if_inputisoddball);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    assembler->Branch(assembler->Int32GreaterThanOrEqual(
+                          input_instance_type,
+                          assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+                      &if_inputisreceiver, &if_inputisother);
+
+    assembler->Bind(&if_inputisstring);
+    {
+      // The {input} is a String, use the fast stub to convert it to a Number.
+      // TODO(bmeurer): Consider inlining the StringToNumber logic here.
+      Callable callable = CodeFactory::StringToNumber(assembler->isolate());
+      assembler->TailCallStub(callable, context, input);
+    }
+
+    assembler->Bind(&if_inputisoddball);
+    {
+      // The {input} is an Oddball, we just need to the Number value of it.
+      Node* result =
+          assembler->LoadObjectField(input, Oddball::kToNumberOffset);
+      assembler->Return(result);
+    }
+
+    assembler->Bind(&if_inputisreceiver);
+    {
+      // The {input} is a JSReceiver, we need to convert it to a Primitive first
+      // using the ToPrimitive type conversion, preferably yielding a Number.
+      Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+          assembler->isolate(), ToPrimitiveHint::kNumber);
+      Node* result = assembler->CallStub(callable, context, input);
+
+      // Check if the {result} is already a Number.
+      Label if_resultisnumber(assembler), if_resultisnotnumber(assembler);
+      assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisnumber);
+      Node* result_map = assembler->LoadMap(result);
+      assembler->Branch(
+          assembler->WordEqual(result_map, assembler->HeapNumberMapConstant()),
+          &if_resultisnumber, &if_resultisnotnumber);
+
+      assembler->Bind(&if_resultisnumber);
+      {
+        // The ToPrimitive conversion already gave us a Number, so we're done.
+        assembler->Return(result);
+      }
+
+      assembler->Bind(&if_resultisnotnumber);
+      {
+        // We now have a Primitive {result}, but it's not yet a Number.
+        var_input.Bind(result);
+        assembler->Goto(&loop);
+      }
+    }
+
+    assembler->Bind(&if_inputisother);
+    {
+      // The {input} is something else (i.e. Symbol or Simd128Value), let the
+      // runtime figure out the correct exception.
+      // Note: We cannot tail call to the runtime here, as js-to-wasm
+      // trampolines also use this code currently, and they declare all
+      // outgoing parameters as untagged, while we would push a tagged
+      // object here.
+      Node* result = assembler->CallRuntime(Runtime::kToNumber, context, input);
+      assembler->Return(result);
+    }
+  }
+}
+
+Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
+  switch (hint) {
+    case OrdinaryToPrimitiveHint::kNumber:
+      return OrdinaryToPrimitive_Number();
+    case OrdinaryToPrimitiveHint::kString:
+      return OrdinaryToPrimitive_String();
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+namespace {
+// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
+void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
+                                  OrdinaryToPrimitiveHint hint) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Node* input = assembler->Parameter(Descriptor::kArgument);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Variable var_result(assembler, MachineRepresentation::kTagged);
+  Label return_result(assembler, &var_result);
+
+  Handle<String> method_names[2];
+  switch (hint) {
+    case OrdinaryToPrimitiveHint::kNumber:
+      method_names[0] = assembler->factory()->valueOf_string();
+      method_names[1] = assembler->factory()->toString_string();
+      break;
+    case OrdinaryToPrimitiveHint::kString:
+      method_names[0] = assembler->factory()->toString_string();
+      method_names[1] = assembler->factory()->valueOf_string();
+      break;
+  }
+  for (Handle<String> name : method_names) {
+    // Lookup the {name} on the {input}.
+    Callable callable = CodeFactory::GetProperty(assembler->isolate());
+    Node* name_string = assembler->HeapConstant(name);
+    Node* method = assembler->CallStub(callable, context, input, name_string);
+
+    // Check if the {method} is callable.
+    Label if_methodiscallable(assembler),
+        if_methodisnotcallable(assembler, Label::kDeferred);
+    assembler->GotoIf(assembler->WordIsSmi(method), &if_methodisnotcallable);
+    Node* method_map = assembler->LoadMap(method);
+    Node* method_bit_field = assembler->LoadMapBitField(method_map);
+    assembler->Branch(
+        assembler->Word32Equal(
+            assembler->Word32And(method_bit_field, assembler->Int32Constant(
+                                                       1 << Map::kIsCallable)),
+            assembler->Int32Constant(0)),
+        &if_methodisnotcallable, &if_methodiscallable);
+
+    assembler->Bind(&if_methodiscallable);
+    {
+      // Call the {method} on the {input}.
+      Callable callable = CodeFactory::Call(assembler->isolate());
+      Node* result = assembler->CallJS(callable, context, method, input);
+      var_result.Bind(result);
+
+      // Return the {result} if it is a primitive.
+      assembler->GotoIf(assembler->WordIsSmi(result), &return_result);
+      Node* result_instance_type = assembler->LoadInstanceType(result);
+      STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+      assembler->GotoIf(assembler->Int32LessThanOrEqual(
+                            result_instance_type,
+                            assembler->Int32Constant(LAST_PRIMITIVE_TYPE)),
+                        &return_result);
+    }
+
+    // Just continue with the next {name} if the {method} is not callable.
+    assembler->Goto(&if_methodisnotcallable);
+    assembler->Bind(&if_methodisnotcallable);
+  }
+
+  assembler->TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+
+  assembler->Bind(&return_result);
+  assembler->Return(var_result.value());
+}
+}  // anonymous namespace
+
+void Builtins::Generate_OrdinaryToPrimitive_Number(
+    CodeStubAssembler* assembler) {
+  Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kNumber);
+}
+
+void Builtins::Generate_OrdinaryToPrimitive_String(
+    CodeStubAssembler* assembler) {
+  Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kString);
+}
+
+// ES6 section 7.1.2 ToBoolean ( argument )
+void Builtins::Generate_ToBoolean(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef TypeConversionDescriptor Descriptor;
+
+  Node* value = assembler->Parameter(Descriptor::kArgument);
+
+  Label return_true(assembler), return_false(assembler);
+  assembler->BranchIfToBooleanIsTrue(value, &return_true, &return_false);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-dataview.cc b/src/builtins/builtins-dataview.cc
new file mode 100644
index 0000000..32c5a83
--- /dev/null
+++ b/src/builtins/builtins-dataview.cc
@@ -0,0 +1,133 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 24.2 DataView Objects
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Call]] case.
+BUILTIN(DataViewConstructor) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError(MessageTemplate::kConstructorNotFunction,
+                   isolate->factory()->NewStringFromAsciiChecked("DataView")));
+}
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
+BUILTIN(DataViewConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+  Handle<Object> buffer = args.atOrUndefined(isolate, 1);
+  Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
+  Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
+
+  // 2. If Type(buffer) is not Object, throw a TypeError exception.
+  // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
+  //    TypeError exception.
+  if (!buffer->IsJSArrayBuffer()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
+  }
+  Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
+
+  // 4. Let offset be ToIndex(byteOffset).
+  Handle<Object> offset;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, offset,
+      Object::ToIndex(isolate, byte_offset,
+                      MessageTemplate::kInvalidDataViewOffset));
+
+  // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+  // We currently violate the specification at this point.
+
+  // 6. Let bufferByteLength be the value of buffer's [[ArrayBufferByteLength]]
+  // internal slot.
+  double const buffer_byte_length = array_buffer->byte_length()->Number();
+
+  // 7. If offset > bufferByteLength, throw a RangeError exception
+  if (offset->Number() > buffer_byte_length) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewRangeError(MessageTemplate::kInvalidDataViewOffset, offset));
+  }
+
+  Handle<Object> view_byte_length;
+  if (byte_length->IsUndefined(isolate)) {
+    // 8. If byteLength is undefined, then
+    //       a. Let viewByteLength be bufferByteLength - offset.
+    view_byte_length =
+        isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+  } else {
+    // 9. Else,
+    //       a. Let viewByteLength be ? ToIndex(byteLength).
+    //       b. If offset+viewByteLength > bufferByteLength, throw a RangeError
+    //          exception
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, view_byte_length,
+        Object::ToIndex(isolate, byte_length,
+                        MessageTemplate::kInvalidDataViewLength));
+    if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+    }
+  }
+
+  // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+  //     "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
+  //     [[ByteLength]], [[ByteOffset]]»).
+  // 11. Set O's [[DataView]] internal slot to true.
+  Handle<JSObject> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     JSObject::New(target, new_target));
+  for (int i = 0; i < ArrayBufferView::kInternalFieldCount; ++i) {
+    Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::FromInt(0));
+  }
+
+  // 12. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
+  Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+
+  // 13. Set O's [[ByteLength]] internal slot to viewByteLength.
+  Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+
+  // 14. Set O's [[ByteOffset]] internal slot to offset.
+  Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+
+  // 15. Return O.
+  return *result;
+}
+
+// ES6 section 24.2.4.1 get DataView.prototype.buffer
+BUILTIN(DataViewPrototypeGetBuffer) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.buffer");
+  return data_view->buffer();
+}
+
+// ES6 section 24.2.4.2 get DataView.prototype.byteLength
+BUILTIN(DataViewPrototypeGetByteLength) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteLength");
+  // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+  // here if the JSArrayBuffer of the {data_view} was neutered.
+  return data_view->byte_length();
+}
+
+// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
+BUILTIN(DataViewPrototypeGetByteOffset) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteOffset");
+  // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+  // here if the JSArrayBuffer of the {data_view} was neutered.
+  return data_view->byte_offset();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-date.cc b/src/builtins/builtins-date.cc
new file mode 100644
index 0000000..d5c3476
--- /dev/null
+++ b/src/builtins/builtins-date.cc
@@ -0,0 +1,1002 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/dateparser-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 20.3 Date Objects
+
+namespace {
+
+// ES6 section 20.3.1.1 Time Values and Time Range
+const double kMinYear = -1000000.0;
+const double kMaxYear = -kMinYear;
+const double kMinMonth = -10000000.0;
+const double kMaxMonth = -kMinMonth;
+
+// 20.3.1.2 Day Number and Time within Day
+const double kMsPerDay = 86400000.0;
+
+// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
+const double kMsPerSecond = 1000.0;
+const double kMsPerMinute = 60000.0;
+const double kMsPerHour = 3600000.0;
+
+// ES6 section 20.3.1.14 MakeDate (day, time)
+double MakeDate(double day, double time) {
+  if (std::isfinite(day) && std::isfinite(time)) {
+    return time + day * kMsPerDay;
+  }
+  return std::numeric_limits<double>::quiet_NaN();
+}
+
+// ES6 section 20.3.1.13 MakeDay (year, month, date)
+double MakeDay(double year, double month, double date) {
+  if ((kMinYear <= year && year <= kMaxYear) &&
+      (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
+    int y = FastD2I(year);
+    int m = FastD2I(month);
+    y += m / 12;
+    m %= 12;
+    if (m < 0) {
+      m += 12;
+      y -= 1;
+    }
+    DCHECK_LE(0, m);
+    DCHECK_LT(m, 12);
+
+    // kYearDelta is an arbitrary number such that:
+    // a) kYearDelta = -1 (mod 400)
+    // b) year + kYearDelta > 0 for years in the range defined by
+    //    ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+    //    Jan 1 1970. This is required so that we don't run into integer
+    //    division of negative numbers.
+    // c) there shouldn't be an overflow for 32-bit integers in the following
+    //    operations.
+    static const int kYearDelta = 399999;
+    static const int kBaseDay =
+        365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
+        (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
+    int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
+                        (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
+                        kBaseDay;
+    if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
+      static const int kDayFromMonth[] = {0,   31,  59,  90,  120, 151,
+                                          181, 212, 243, 273, 304, 334};
+      day_from_year += kDayFromMonth[m];
+    } else {
+      static const int kDayFromMonth[] = {0,   31,  60,  91,  121, 152,
+                                          182, 213, 244, 274, 305, 335};
+      day_from_year += kDayFromMonth[m];
+    }
+    return static_cast<double>(day_from_year - 1) + date;
+  }
+  return std::numeric_limits<double>::quiet_NaN();
+}
+
+// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
+double MakeTime(double hour, double min, double sec, double ms) {
+  if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
+      std::isfinite(ms)) {
+    double const h = DoubleToInteger(hour);
+    double const m = DoubleToInteger(min);
+    double const s = DoubleToInteger(sec);
+    double const milli = DoubleToInteger(ms);
+    return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
+  }
+  return std::numeric_limits<double>::quiet_NaN();
+}
+
+// ES6 section 20.3.1.15 TimeClip (time)
+double TimeClip(double time) {
+  if (-DateCache::kMaxTimeInMs <= time && time <= DateCache::kMaxTimeInMs) {
+    return DoubleToInteger(time) + 0.0;
+  }
+  return std::numeric_limits<double>::quiet_NaN();
+}
+
+const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
+                                "Thu", "Fri", "Sat"};
+const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+                              "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
+
+// ES6 section 20.3.1.16 Date Time String Format
+double ParseDateTimeString(Handle<String> str) {
+  Isolate* const isolate = str->GetIsolate();
+  str = String::Flatten(str);
+  // TODO(bmeurer): Change DateParser to not use the FixedArray.
+  Handle<FixedArray> tmp =
+      isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
+  DisallowHeapAllocation no_gc;
+  String::FlatContent str_content = str->GetFlatContent();
+  bool result;
+  if (str_content.IsOneByte()) {
+    result = DateParser::Parse(isolate, str_content.ToOneByteVector(), *tmp);
+  } else {
+    result = DateParser::Parse(isolate, str_content.ToUC16Vector(), *tmp);
+  }
+  if (!result) return std::numeric_limits<double>::quiet_NaN();
+  double const day = MakeDay(tmp->get(0)->Number(), tmp->get(1)->Number(),
+                             tmp->get(2)->Number());
+  double const time = MakeTime(tmp->get(3)->Number(), tmp->get(4)->Number(),
+                               tmp->get(5)->Number(), tmp->get(6)->Number());
+  double date = MakeDate(day, time);
+  if (tmp->get(7)->IsNull(isolate)) {
+    if (!std::isnan(date)) {
+      date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
+    }
+  } else {
+    date -= tmp->get(7)->Number() * 1000.0;
+  }
+  return date;
+}
+
+enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
+
+// ES6 section 20.3.4.41.1 ToDateString(tv)
+void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
+                  ToDateStringMode mode = kDateAndTime) {
+  if (std::isnan(time_val)) {
+    SNPrintF(str, "Invalid Date");
+    return;
+  }
+  int64_t time_ms = static_cast<int64_t>(time_val);
+  int64_t local_time_ms = date_cache->ToLocal(time_ms);
+  int year, month, day, weekday, hour, min, sec, ms;
+  date_cache->BreakDownTime(local_time_ms, &year, &month, &day, &weekday, &hour,
+                            &min, &sec, &ms);
+  int timezone_offset = -date_cache->TimezoneOffset(time_ms);
+  int timezone_hour = std::abs(timezone_offset) / 60;
+  int timezone_min = std::abs(timezone_offset) % 60;
+  const char* local_timezone = date_cache->LocalTimezone(time_ms);
+  switch (mode) {
+    case kDateOnly:
+      SNPrintF(str, "%s %s %02d %4d", kShortWeekDays[weekday],
+               kShortMonths[month], day, year);
+      return;
+    case kTimeOnly:
+      SNPrintF(str, "%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
+               (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
+               local_timezone);
+      return;
+    case kDateAndTime:
+      SNPrintF(str, "%s %s %02d %4d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+               kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
+               min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
+               timezone_min, local_timezone);
+      return;
+  }
+  UNREACHABLE();
+}
+
+Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
+  if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+      time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+    Isolate* const isolate = date->GetIsolate();
+    time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+  } else {
+    time_val = std::numeric_limits<double>::quiet_NaN();
+  }
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+}  // namespace
+
+// ES6 section 20.3.2 The Date Constructor for the [[Call]] case.
+BUILTIN(DateConstructor) {
+  HandleScope scope(isolate);
+  double const time_val = JSDate::CurrentTimeValue(isolate);
+  char buffer[128];
+  ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.2 The Date Constructor for the [[Construct]] case.
+BUILTIN(DateConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  int const argc = args.length() - 1;
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+  double time_val;
+  if (argc == 0) {
+    time_val = JSDate::CurrentTimeValue(isolate);
+  } else if (argc == 1) {
+    Handle<Object> value = args.at<Object>(1);
+    if (value->IsJSDate()) {
+      time_val = Handle<JSDate>::cast(value)->value()->Number();
+    } else {
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+                                         Object::ToPrimitive(value));
+      if (value->IsString()) {
+        time_val = ParseDateTimeString(Handle<String>::cast(value));
+      } else {
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+                                           Object::ToNumber(value));
+        time_val = value->Number();
+      }
+    }
+  } else {
+    Handle<Object> year_object;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+                                       Object::ToNumber(args.at<Object>(1)));
+    Handle<Object> month_object;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+                                       Object::ToNumber(args.at<Object>(2)));
+    double year = year_object->Number();
+    double month = month_object->Number();
+    double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+    if (argc >= 3) {
+      Handle<Object> date_object;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+                                         Object::ToNumber(args.at<Object>(3)));
+      date = date_object->Number();
+      if (argc >= 4) {
+        Handle<Object> hours_object;
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+            isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+        hours = hours_object->Number();
+        if (argc >= 5) {
+          Handle<Object> minutes_object;
+          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+              isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+          minutes = minutes_object->Number();
+          if (argc >= 6) {
+            Handle<Object> seconds_object;
+            ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+                isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
+            seconds = seconds_object->Number();
+            if (argc >= 7) {
+              Handle<Object> ms_object;
+              ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+                  isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+              ms = ms_object->Number();
+            }
+          }
+        }
+      }
+    }
+    if (!std::isnan(year)) {
+      double const y = DoubleToInteger(year);
+      if (0.0 <= y && y <= 99) year = 1900 + y;
+    }
+    double const day = MakeDay(year, month, date);
+    double const time = MakeTime(hours, minutes, seconds, ms);
+    time_val = MakeDate(day, time);
+    if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+        time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+      time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+    } else {
+      time_val = std::numeric_limits<double>::quiet_NaN();
+    }
+  }
+  RETURN_RESULT_OR_FAILURE(isolate, JSDate::New(target, new_target, time_val));
+}
+
+// ES6 section 20.3.3.1 Date.now ( )
+BUILTIN(DateNow) {
+  HandleScope scope(isolate);
+  return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
+}
+
+// ES6 section 20.3.3.2 Date.parse ( string )
+BUILTIN(DateParse) {
+  HandleScope scope(isolate);
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, string,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+  return *isolate->factory()->NewNumber(ParseDateTimeString(string));
+}
+
+// ES6 section 20.3.3.4 Date.UTC (year,month,date,hours,minutes,seconds,ms)
+BUILTIN(DateUTC) {
+  HandleScope scope(isolate);
+  int const argc = args.length() - 1;
+  double year = std::numeric_limits<double>::quiet_NaN();
+  double month = std::numeric_limits<double>::quiet_NaN();
+  double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+  if (argc >= 1) {
+    Handle<Object> year_object;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+                                       Object::ToNumber(args.at<Object>(1)));
+    year = year_object->Number();
+    if (argc >= 2) {
+      Handle<Object> month_object;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+                                         Object::ToNumber(args.at<Object>(2)));
+      month = month_object->Number();
+      if (argc >= 3) {
+        Handle<Object> date_object;
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+            isolate, date_object, Object::ToNumber(args.at<Object>(3)));
+        date = date_object->Number();
+        if (argc >= 4) {
+          Handle<Object> hours_object;
+          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+              isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+          hours = hours_object->Number();
+          if (argc >= 5) {
+            Handle<Object> minutes_object;
+            ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+                isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+            minutes = minutes_object->Number();
+            if (argc >= 6) {
+              Handle<Object> seconds_object;
+              ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+                  isolate, seconds_object,
+                  Object::ToNumber(args.at<Object>(6)));
+              seconds = seconds_object->Number();
+              if (argc >= 7) {
+                Handle<Object> ms_object;
+                ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+                    isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+                ms = ms_object->Number();
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+  if (!std::isnan(year)) {
+    double const y = DoubleToInteger(year);
+    if (0.0 <= y && y <= 99) year = 1900 + y;
+  }
+  double const day = MakeDay(year, month, date);
+  double const time = MakeTime(hours, minutes, seconds, ms);
+  return *isolate->factory()->NewNumber(TimeClip(MakeDate(day, time)));
+}
+
+// ES6 section 20.3.4.20 Date.prototype.setDate ( date )
+BUILTIN(DatePrototypeSetDate) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setDate");
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+    int year, month, day;
+    isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+    time_val = MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+  }
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.21 Date.prototype.setFullYear (year, month, date)
+BUILTIN(DatePrototypeSetFullYear) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setFullYear");
+  int const argc = args.length() - 1;
+  Handle<Object> year = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+  double y = year->Number(), m = 0.0, dt = 1.0;
+  int time_within_day = 0;
+  if (!std::isnan(date->value()->Number())) {
+    int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+    time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+    int year, month, day;
+    isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+    m = month;
+    dt = day;
+  }
+  if (argc >= 2) {
+    Handle<Object> month = args.at<Object>(2);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+    m = month->Number();
+    if (argc >= 3) {
+      Handle<Object> date = args.at<Object>(3);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+      dt = date->Number();
+    }
+  }
+  double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.22 Date.prototype.setHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetHours) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setHours");
+  int const argc = args.length() - 1;
+  Handle<Object> hour = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+  double h = hour->Number();
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+    double m = (time_within_day / (60 * 1000)) % 60;
+    double s = (time_within_day / 1000) % 60;
+    double milli = time_within_day % 1000;
+    if (argc >= 2) {
+      Handle<Object> min = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+      m = min->Number();
+      if (argc >= 3) {
+        Handle<Object> sec = args.at<Object>(3);
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+        s = sec->Number();
+        if (argc >= 4) {
+          Handle<Object> ms = args.at<Object>(4);
+          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+          milli = ms->Number();
+        }
+      }
+    }
+    time_val = MakeDate(day, MakeTime(h, m, s, milli));
+  }
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.23 Date.prototype.setMilliseconds(ms)
+BUILTIN(DatePrototypeSetMilliseconds) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setMilliseconds");
+  Handle<Object> ms = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+    int h = time_within_day / (60 * 60 * 1000);
+    int m = (time_within_day / (60 * 1000)) % 60;
+    int s = (time_within_day / 1000) % 60;
+    time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+  }
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.24 Date.prototype.setMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetMinutes) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setMinutes");
+  int const argc = args.length() - 1;
+  Handle<Object> min = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+    int h = time_within_day / (60 * 60 * 1000);
+    double m = min->Number();
+    double s = (time_within_day / 1000) % 60;
+    double milli = time_within_day % 1000;
+    if (argc >= 2) {
+      Handle<Object> sec = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+      s = sec->Number();
+      if (argc >= 3) {
+        Handle<Object> ms = args.at<Object>(3);
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+        milli = ms->Number();
+      }
+    }
+    time_val = MakeDate(day, MakeTime(h, m, s, milli));
+  }
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date )
+BUILTIN(DatePrototypeSetMonth) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth");
+  int const argc = args.length() - 1;
+  Handle<Object> month = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+    int year, unused, day;
+    isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+    double m = month->Number();
+    double dt = day;
+    if (argc >= 2) {
+      Handle<Object> date = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+      dt = date->Number();
+    }
+    time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+  }
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetSeconds) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setSeconds");
+  int const argc = args.length() - 1;
+  Handle<Object> sec = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+    int h = time_within_day / (60 * 60 * 1000);
+    double m = (time_within_day / (60 * 1000)) % 60;
+    double s = sec->Number();
+    double milli = time_within_day % 1000;
+    if (argc >= 2) {
+      Handle<Object> ms = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+      milli = ms->Number();
+    }
+    time_val = MakeDate(day, MakeTime(h, m, s, milli));
+  }
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.27 Date.prototype.setTime ( time )
+BUILTIN(DatePrototypeSetTime) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setTime");
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+  return *JSDate::SetValue(date, TimeClip(value->Number()));
+}
+
+// ES6 section 20.3.4.28 Date.prototype.setUTCDate ( date )
+BUILTIN(DatePrototypeSetUTCDate) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCDate");
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+  if (std::isnan(date->value()->Number())) return date->value();
+  int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+  int const days = isolate->date_cache()->DaysFromTime(time_ms);
+  int const time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+  int year, month, day;
+  isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+  double const time_val =
+      MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.29 Date.prototype.setUTCFullYear (year, month, date)
+BUILTIN(DatePrototypeSetUTCFullYear) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCFullYear");
+  int const argc = args.length() - 1;
+  Handle<Object> year = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+  double y = year->Number(), m = 0.0, dt = 1.0;
+  int time_within_day = 0;
+  if (!std::isnan(date->value()->Number())) {
+    int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+    int const days = isolate->date_cache()->DaysFromTime(time_ms);
+    time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+    int year, month, day;
+    isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+    m = month;
+    dt = day;
+  }
+  if (argc >= 2) {
+    Handle<Object> month = args.at<Object>(2);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+    m = month->Number();
+    if (argc >= 3) {
+      Handle<Object> date = args.at<Object>(3);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+      dt = date->Number();
+    }
+  }
+  double const time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.30 Date.prototype.setUTCHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetUTCHours) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCHours");
+  int const argc = args.length() - 1;
+  Handle<Object> hour = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+  double h = hour->Number();
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int day = isolate->date_cache()->DaysFromTime(time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+    double m = (time_within_day / (60 * 1000)) % 60;
+    double s = (time_within_day / 1000) % 60;
+    double milli = time_within_day % 1000;
+    if (argc >= 2) {
+      Handle<Object> min = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+      m = min->Number();
+      if (argc >= 3) {
+        Handle<Object> sec = args.at<Object>(3);
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+        s = sec->Number();
+        if (argc >= 4) {
+          Handle<Object> ms = args.at<Object>(4);
+          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+          milli = ms->Number();
+        }
+      }
+    }
+    time_val = MakeDate(day, MakeTime(h, m, s, milli));
+  }
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMilliseconds(ms)
+BUILTIN(DatePrototypeSetUTCMilliseconds) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMilliseconds");
+  Handle<Object> ms = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int day = isolate->date_cache()->DaysFromTime(time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+    int h = time_within_day / (60 * 60 * 1000);
+    int m = (time_within_day / (60 * 1000)) % 60;
+    int s = (time_within_day / 1000) % 60;
+    time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+  }
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.32 Date.prototype.setUTCMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetUTCMinutes) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMinutes");
+  int const argc = args.length() - 1;
+  Handle<Object> min = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int day = isolate->date_cache()->DaysFromTime(time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+    int h = time_within_day / (60 * 60 * 1000);
+    double m = min->Number();
+    double s = (time_within_day / 1000) % 60;
+    double milli = time_within_day % 1000;
+    if (argc >= 2) {
+      Handle<Object> sec = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+      s = sec->Number();
+      if (argc >= 3) {
+        Handle<Object> ms = args.at<Object>(3);
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+        milli = ms->Number();
+      }
+    }
+    time_val = MakeDate(day, MakeTime(h, m, s, milli));
+  }
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMonth ( month, date )
+BUILTIN(DatePrototypeSetUTCMonth) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth");
+  int const argc = args.length() - 1;
+  Handle<Object> month = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int days = isolate->date_cache()->DaysFromTime(time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+    int year, unused, day;
+    isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+    double m = month->Number();
+    double dt = day;
+    if (argc >= 2) {
+      Handle<Object> date = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+      dt = date->Number();
+    }
+    time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+  }
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.34 Date.prototype.setUTCSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetUTCSeconds) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCSeconds");
+  int const argc = args.length() - 1;
+  Handle<Object> sec = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+  double time_val = date->value()->Number();
+  if (!std::isnan(time_val)) {
+    int64_t const time_ms = static_cast<int64_t>(time_val);
+    int day = isolate->date_cache()->DaysFromTime(time_ms);
+    int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+    int h = time_within_day / (60 * 60 * 1000);
+    double m = (time_within_day / (60 * 1000)) % 60;
+    double s = sec->Number();
+    double milli = time_within_day % 1000;
+    if (argc >= 2) {
+      Handle<Object> ms = args.at<Object>(2);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+      milli = ms->Number();
+    }
+    time_val = MakeDate(day, MakeTime(h, m, s, milli));
+  }
+  return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.35 Date.prototype.toDateString ( )
+BUILTIN(DatePrototypeToDateString) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
+  char buffer[128];
+  ToDateString(date->value()->Number(), ArrayVector(buffer),
+               isolate->date_cache(), kDateOnly);
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.4.36 Date.prototype.toISOString ( )
+BUILTIN(DatePrototypeToISOString) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.toISOString");
+  double const time_val = date->value()->Number();
+  if (std::isnan(time_val)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
+  }
+  int64_t const time_ms = static_cast<int64_t>(time_val);
+  int year, month, day, weekday, hour, min, sec, ms;
+  isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+                                       &hour, &min, &sec, &ms);
+  char buffer[128];
+  if (year >= 0 && year <= 9999) {
+    SNPrintF(ArrayVector(buffer), "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", year,
+             month + 1, day, hour, min, sec, ms);
+  } else if (year < 0) {
+    SNPrintF(ArrayVector(buffer), "-%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", -year,
+             month + 1, day, hour, min, sec, ms);
+  } else {
+    SNPrintF(ArrayVector(buffer), "+%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", year,
+             month + 1, day, hour, min, sec, ms);
+  }
+  return *isolate->factory()->NewStringFromAsciiChecked(buffer);
+}
+
+// ES6 section 20.3.4.41 Date.prototype.toString ( )
+BUILTIN(DatePrototypeToString) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
+  char buffer[128];
+  ToDateString(date->value()->Number(), ArrayVector(buffer),
+               isolate->date_cache());
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.4.42 Date.prototype.toTimeString ( )
+BUILTIN(DatePrototypeToTimeString) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
+  char buffer[128];
+  ToDateString(date->value()->Number(), ArrayVector(buffer),
+               isolate->date_cache(), kTimeOnly);
+  RETURN_RESULT_OR_FAILURE(
+      isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.4.43 Date.prototype.toUTCString ( )
+BUILTIN(DatePrototypeToUTCString) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.toUTCString");
+  double const time_val = date->value()->Number();
+  if (std::isnan(time_val)) {
+    return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
+  }
+  char buffer[128];
+  int64_t time_ms = static_cast<int64_t>(time_val);
+  int year, month, day, weekday, hour, min, sec, ms;
+  isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+                                       &hour, &min, &sec, &ms);
+  SNPrintF(ArrayVector(buffer), "%s, %02d %s %4d %02d:%02d:%02d GMT",
+           kShortWeekDays[weekday], day, kShortMonths[month], year, hour, min,
+           sec);
+  return *isolate->factory()->NewStringFromAsciiChecked(buffer);
+}
+
+// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
+BUILTIN(DatePrototypeValueOf) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
+  return date->value();
+}
+
+// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
+BUILTIN(DatePrototypeToPrimitive) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
+  Handle<Object> hint = args.at<Object>(1);
+  RETURN_RESULT_OR_FAILURE(isolate, JSDate::ToPrimitive(receiver, hint));
+}
+
+// ES6 section B.2.4.1 Date.prototype.getYear ( )
+BUILTIN(DatePrototypeGetYear) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.getYear");
+  double time_val = date->value()->Number();
+  if (std::isnan(time_val)) return date->value();
+  int64_t time_ms = static_cast<int64_t>(time_val);
+  int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+  int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+  int year, month, day;
+  isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+  return Smi::FromInt(year - 1900);
+}
+
+// ES6 section B.2.4.2 Date.prototype.setYear ( year )
+BUILTIN(DatePrototypeSetYear) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSDate, date, "Date.prototype.setYear");
+  Handle<Object> year = args.atOrUndefined(isolate, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+  double m = 0.0, dt = 1.0, y = year->Number();
+  if (0.0 <= y && y <= 99.0) {
+    y = 1900.0 + DoubleToInteger(y);
+  }
+  int time_within_day = 0;
+  if (!std::isnan(date->value()->Number())) {
+    int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+    int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+    int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+    time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+    int year, month, day;
+    isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+    m = month;
+    dt = day;
+  }
+  double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+  return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.37 Date.prototype.toJSON ( key )
+BUILTIN(DatePrototypeToJson) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.atOrUndefined(isolate, 0);
+  Handle<JSReceiver> receiver_obj;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_obj,
+                                     Object::ToObject(isolate, receiver));
+  Handle<Object> primitive;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, primitive,
+      Object::ToPrimitive(receiver_obj, ToPrimitiveHint::kNumber));
+  if (primitive->IsNumber() && !std::isfinite(primitive->Number())) {
+    return isolate->heap()->null_value();
+  } else {
+    Handle<String> name =
+        isolate->factory()->NewStringFromAsciiChecked("toISOString");
+    Handle<Object> function;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, function,
+                                       Object::GetProperty(receiver_obj, name));
+    if (!function->IsCallable()) {
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewTypeError(MessageTemplate::kCalledNonCallable, name));
+    }
+    RETURN_RESULT_OR_FAILURE(
+        isolate, Execution::Call(isolate, function, receiver_obj, 0, NULL));
+  }
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kDay);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetDay(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kWeekday);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetFullYear(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kYear);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetHours(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kHour);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kMillisecond);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetMinutes(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kMinute);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetMonth(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kMonth);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetSeconds(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kSecond);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetTime(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kDateValue);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kTimezoneOffset);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDate(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kDayUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDay(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kWeekdayUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kYearUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCHours(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kHourUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kMillisecondUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kMinuteUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kMonthUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm) {
+  Generate_DatePrototype_GetField(masm, JSDate::kSecondUTC);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-debug.cc b/src/builtins/builtins-debug.cc
new file mode 100644
index 0000000..011eba3
--- /dev/null
+++ b/src/builtins/builtins-debug.cc
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_Return_DebugBreak(MacroAssembler* masm) {
+  DebugCodegen::GenerateDebugBreakStub(masm,
+                                       DebugCodegen::SAVE_RESULT_REGISTER);
+}
+
+void Builtins::Generate_Slot_DebugBreak(MacroAssembler* masm) {
+  DebugCodegen::GenerateDebugBreakStub(masm,
+                                       DebugCodegen::IGNORE_RESULT_REGISTER);
+}
+
+void Builtins::Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
+  DebugCodegen::GenerateFrameDropperLiveEdit(masm);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-error.cc b/src/builtins/builtins-error.cc
new file mode 100644
index 0000000..c2a7b99
--- /dev/null
+++ b/src/builtins/builtins-error.cc
@@ -0,0 +1,135 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/messages.h"
+#include "src/property-descriptor.h"
+#include "src/string-builder.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 19.5.1.1 Error ( message )
+BUILTIN(ErrorConstructor) {
+  HandleScope scope(isolate);
+
+  FrameSkipMode mode = SKIP_FIRST;
+  Handle<Object> caller;
+
+  // When we're passed a JSFunction as new target, we can skip frames until that
+  // specific function is seen instead of unconditionally skipping the first
+  // frame.
+  if (args.new_target()->IsJSFunction()) {
+    mode = SKIP_UNTIL_SEEN;
+    caller = args.new_target();
+  }
+
+  RETURN_RESULT_OR_FAILURE(
+      isolate, ErrorUtils::Construct(isolate, args.target<JSFunction>(),
+                                     Handle<Object>::cast(args.new_target()),
+                                     args.atOrUndefined(isolate, 1), mode,
+                                     caller, false));
+}
+
+// static
+BUILTIN(ErrorCaptureStackTrace) {
+  HandleScope scope(isolate);
+  Handle<Object> object_obj = args.atOrUndefined(isolate, 1);
+  if (!object_obj->IsJSObject()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kInvalidArgument, object_obj));
+  }
+  Handle<JSObject> object = Handle<JSObject>::cast(object_obj);
+  Handle<Object> caller = args.atOrUndefined(isolate, 2);
+  FrameSkipMode mode = caller->IsJSFunction() ? SKIP_UNTIL_SEEN : SKIP_FIRST;
+
+  // Collect the stack trace.
+
+  RETURN_FAILURE_ON_EXCEPTION(isolate,
+                              isolate->CaptureAndSetDetailedStackTrace(object));
+
+  // Eagerly format the stack trace and set the stack property.
+
+  Handle<Object> stack_trace =
+      isolate->CaptureSimpleStackTrace(object, mode, caller);
+  if (!stack_trace->IsJSArray()) return *isolate->factory()->undefined_value();
+
+  Handle<Object> formatted_stack_trace;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, formatted_stack_trace,
+      ErrorUtils::FormatStackTrace(isolate, object, stack_trace));
+
+  PropertyDescriptor desc;
+  desc.set_configurable(true);
+  desc.set_writable(true);
+  desc.set_value(formatted_stack_trace);
+  Maybe<bool> status = JSReceiver::DefineOwnProperty(
+      isolate, object, isolate->factory()->stack_string(), &desc,
+      Object::THROW_ON_ERROR);
+  if (!status.IsJust()) return isolate->heap()->exception();
+  CHECK(status.FromJust());
+  return isolate->heap()->undefined_value();
+}
+
+// ES6 section 19.5.3.4 Error.prototype.toString ( )
+BUILTIN(ErrorPrototypeToString) {
+  HandleScope scope(isolate);
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           ErrorUtils::ToString(isolate, args.receiver()));
+}
+
+namespace {
+
+Object* MakeGenericError(Isolate* isolate, BuiltinArguments args,
+                         Handle<JSFunction> constructor) {
+  Handle<Object> template_index = args.atOrUndefined(isolate, 1);
+  Handle<Object> arg0 = args.atOrUndefined(isolate, 2);
+  Handle<Object> arg1 = args.atOrUndefined(isolate, 3);
+  Handle<Object> arg2 = args.atOrUndefined(isolate, 4);
+
+  DCHECK(template_index->IsSmi());
+
+  RETURN_RESULT_OR_FAILURE(
+      isolate, ErrorUtils::MakeGenericError(isolate, constructor,
+                                            Smi::cast(*template_index)->value(),
+                                            arg0, arg1, arg2, SKIP_NONE));
+}
+
+}  // namespace
+
+BUILTIN(MakeError) {
+  HandleScope scope(isolate);
+  return MakeGenericError(isolate, args, isolate->error_function());
+}
+
+BUILTIN(MakeRangeError) {
+  HandleScope scope(isolate);
+  return MakeGenericError(isolate, args, isolate->range_error_function());
+}
+
+BUILTIN(MakeSyntaxError) {
+  HandleScope scope(isolate);
+  return MakeGenericError(isolate, args, isolate->syntax_error_function());
+}
+
+BUILTIN(MakeTypeError) {
+  HandleScope scope(isolate);
+  return MakeGenericError(isolate, args, isolate->type_error_function());
+}
+
+BUILTIN(MakeURIError) {
+  HandleScope scope(isolate);
+  Handle<JSFunction> constructor = isolate->uri_error_function();
+  Handle<Object> undefined = isolate->factory()->undefined_value();
+  const int template_index = MessageTemplate::kURIMalformed;
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
+      ErrorUtils::MakeGenericError(isolate, constructor, template_index,
+                                   undefined, undefined, undefined, SKIP_NONE));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-function.cc b/src/builtins/builtins-function.cc
new file mode 100644
index 0000000..0a631bf
--- /dev/null
+++ b/src/builtins/builtins-function.cc
@@ -0,0 +1,297 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/compiler.h"
+#include "src/string-builder.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// ES6 section 19.2.1.1.1 CreateDynamicFunction
+MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
+                                          BuiltinArguments args,
+                                          const char* token) {
+  // Compute number of arguments, ignoring the receiver.
+  DCHECK_LE(1, args.length());
+  int const argc = args.length() - 1;
+
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+
+  if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
+    isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined);
+    return isolate->factory()->undefined_value();
+  }
+
+  // Build the source string.
+  Handle<String> source;
+  {
+    IncrementalStringBuilder builder(isolate);
+    builder.AppendCharacter('(');
+    builder.AppendCString(token);
+    builder.AppendCharacter('(');
+    bool parenthesis_in_arg_string = false;
+    if (argc > 1) {
+      for (int i = 1; i < argc; ++i) {
+        if (i > 1) builder.AppendCharacter(',');
+        Handle<String> param;
+        ASSIGN_RETURN_ON_EXCEPTION(
+            isolate, param, Object::ToString(isolate, args.at<Object>(i)),
+            Object);
+        param = String::Flatten(param);
+        builder.AppendString(param);
+        // If the formal parameters string include ) - an illegal
+        // character - it may make the combined function expression
+        // compile. We avoid this problem by checking for this early on.
+        DisallowHeapAllocation no_gc;  // Ensure vectors stay valid.
+        String::FlatContent param_content = param->GetFlatContent();
+        for (int i = 0, length = param->length(); i < length; ++i) {
+          if (param_content.Get(i) == ')') {
+            parenthesis_in_arg_string = true;
+            break;
+          }
+        }
+      }
+      // If the formal parameters include an unbalanced block comment, the
+      // function must be rejected. Since JavaScript does not allow nested
+      // comments we can include a trailing block comment to catch this.
+      builder.AppendCString("\n/**/");
+    }
+    builder.AppendCString(") {\n");
+    if (argc > 0) {
+      Handle<String> body;
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
+          Object);
+      builder.AppendString(body);
+    }
+    builder.AppendCString("\n})");
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), Object);
+
+    // The SyntaxError must be thrown after all the (observable) ToString
+    // conversions are done.
+    if (parenthesis_in_arg_string) {
+      THROW_NEW_ERROR(isolate,
+                      NewSyntaxError(MessageTemplate::kParenthesisInArgString),
+                      Object);
+    }
+  }
+
+  // Compile the string in the constructor and not a helper so that errors to
+  // come from here.
+  Handle<JSFunction> function;
+  {
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, function,
+                               Compiler::GetFunctionFromString(
+                                   handle(target->native_context(), isolate),
+                                   source, ONLY_SINGLE_FUNCTION_LITERAL),
+                               Object);
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, result,
+        Execution::Call(isolate, function, target_global_proxy, 0, nullptr),
+        Object);
+    function = Handle<JSFunction>::cast(result);
+    function->shared()->set_name_should_print_as_anonymous(true);
+  }
+
+  // If new.target is equal to target then the function created
+  // is already correctly setup and nothing else should be done
+  // here. But if new.target is not equal to target then we are
+  // have a Function builtin subclassing case and therefore the
+  // function has wrong initial map. To fix that we create a new
+  // function object with correct initial map.
+  Handle<Object> unchecked_new_target = args.new_target();
+  if (!unchecked_new_target->IsUndefined(isolate) &&
+      !unchecked_new_target.is_identical_to(target)) {
+    Handle<JSReceiver> new_target =
+        Handle<JSReceiver>::cast(unchecked_new_target);
+    Handle<Map> initial_map;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, initial_map,
+        JSFunction::GetDerivedMap(isolate, target, new_target), Object);
+
+    Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+    Handle<Map> map = Map::AsLanguageMode(
+        initial_map, shared_info->language_mode(), shared_info->kind());
+
+    Handle<Context> context(function->context(), isolate);
+    function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+        map, shared_info, context, NOT_TENURED);
+  }
+  return function;
+}
+
+}  // namespace
+
+// ES6 section 19.2.1.1 Function ( p1, p2, ... , pn, body )
+BUILTIN(FunctionConstructor) {
+  HandleScope scope(isolate);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, CreateDynamicFunction(isolate, args, "function"));
+  return *result;
+}
+
+// ES6 section 25.2.1.1 GeneratorFunction (p1, p2, ... , pn, body)
+BUILTIN(GeneratorFunctionConstructor) {
+  HandleScope scope(isolate);
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           CreateDynamicFunction(isolate, args, "function*"));
+}
+
+BUILTIN(AsyncFunctionConstructor) {
+  HandleScope scope(isolate);
+  Handle<Object> maybe_func;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, maybe_func,
+      CreateDynamicFunction(isolate, args, "async function"));
+  if (!maybe_func->IsJSFunction()) return *maybe_func;
+
+  // Do not lazily compute eval position for AsyncFunction, as they may not be
+  // determined after the function is resumed.
+  Handle<JSFunction> func = Handle<JSFunction>::cast(maybe_func);
+  Handle<Script> script = handle(Script::cast(func->shared()->script()));
+  int position = script->GetEvalPosition();
+  USE(position);
+
+  return *func;
+}
+
+namespace {
+
+Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
+  HandleScope scope(isolate);
+  DCHECK_LE(1, args.length());
+  if (!args.receiver()->IsCallable()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kFunctionBind));
+  }
+
+  // Allocate the bound function with the given {this_arg} and {args}.
+  Handle<JSReceiver> target = args.at<JSReceiver>(0);
+  Handle<Object> this_arg = isolate->factory()->undefined_value();
+  ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
+  if (args.length() > 1) {
+    this_arg = args.at<Object>(1);
+    for (int i = 2; i < args.length(); ++i) {
+      argv[i - 2] = args.at<Object>(i);
+    }
+  }
+  Handle<JSBoundFunction> function;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, function,
+      isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
+
+  LookupIterator length_lookup(target, isolate->factory()->length_string(),
+                               target, LookupIterator::OWN);
+  // Setup the "length" property based on the "length" of the {target}.
+  // If the targets length is the default JSFunction accessor, we can keep the
+  // accessor that's installed by default on the JSBoundFunction. It lazily
+  // computes the value from the underlying internal length.
+  if (!target->IsJSFunction() ||
+      length_lookup.state() != LookupIterator::ACCESSOR ||
+      !length_lookup.GetAccessors()->IsAccessorInfo()) {
+    Handle<Object> length(Smi::FromInt(0), isolate);
+    Maybe<PropertyAttributes> attributes =
+        JSReceiver::GetPropertyAttributes(&length_lookup);
+    if (!attributes.IsJust()) return isolate->heap()->exception();
+    if (attributes.FromJust() != ABSENT) {
+      Handle<Object> target_length;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_length,
+                                         Object::GetProperty(&length_lookup));
+      if (target_length->IsNumber()) {
+        length = isolate->factory()->NewNumber(std::max(
+            0.0, DoubleToInteger(target_length->Number()) - argv.length()));
+      }
+    }
+    LookupIterator it(function, isolate->factory()->length_string(), function);
+    DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+    RETURN_FAILURE_ON_EXCEPTION(isolate,
+                                JSObject::DefineOwnPropertyIgnoreAttributes(
+                                    &it, length, it.property_attributes()));
+  }
+
+  // Setup the "name" property based on the "name" of the {target}.
+  // If the targets name is the default JSFunction accessor, we can keep the
+  // accessor that's installed by default on the JSBoundFunction. It lazily
+  // computes the value from the underlying internal name.
+  LookupIterator name_lookup(target, isolate->factory()->name_string(), target,
+                             LookupIterator::OWN);
+  if (!target->IsJSFunction() ||
+      name_lookup.state() != LookupIterator::ACCESSOR ||
+      !name_lookup.GetAccessors()->IsAccessorInfo()) {
+    Handle<Object> target_name;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_name,
+                                       Object::GetProperty(&name_lookup));
+    Handle<String> name;
+    if (target_name->IsString()) {
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, name,
+          Name::ToFunctionName(Handle<String>::cast(target_name)));
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, name, isolate->factory()->NewConsString(
+                             isolate->factory()->bound__string(), name));
+    } else {
+      name = isolate->factory()->bound__string();
+    }
+    LookupIterator it(function, isolate->factory()->name_string());
+    DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+    RETURN_FAILURE_ON_EXCEPTION(isolate,
+                                JSObject::DefineOwnPropertyIgnoreAttributes(
+                                    &it, name, it.property_attributes()));
+  }
+  return *function;
+}
+
+}  // namespace
+
+// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
+BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
+
+// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
+// can tailcall to the builtin directly.
+RUNTIME_FUNCTION(Runtime_FunctionBind) {
+  DCHECK_EQ(2, args.length());
+  Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
+  // Rewrap the arguments as builtins arguments.
+  int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
+  BuiltinArguments caller_args(argc, incoming->arguments() + 1);
+  return DoFunctionBind(isolate, caller_args);
+}
+
+// ES6 section 19.2.3.5 Function.prototype.toString ( )
+BUILTIN(FunctionPrototypeToString) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  if (receiver->IsJSBoundFunction()) {
+    return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
+  } else if (receiver->IsJSFunction()) {
+    return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
+  }
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                            isolate->factory()->NewStringFromAsciiChecked(
+                                "Function.prototype.toString")));
+}
+
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
+void Builtins::Generate_FunctionPrototypeHasInstance(
+    CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* f = assembler->Parameter(0);
+  Node* v = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* result = assembler->OrdinaryHasInstance(context, f, v);
+  assembler->Return(result);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-generator.cc b/src/builtins/builtins-generator.cc
new file mode 100644
index 0000000..93b2e48
--- /dev/null
+++ b/src/builtins/builtins-generator.cc
@@ -0,0 +1,116 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void Generate_GeneratorPrototypeResume(
+    CodeStubAssembler* assembler, JSGeneratorObject::ResumeMode resume_mode,
+    char const* const method_name) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* value = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* closed =
+      assembler->SmiConstant(Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+
+  // Check if the {receiver} is actually a JSGeneratorObject.
+  Label if_receiverisincompatible(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+  assembler->GotoUnless(assembler->Word32Equal(
+                            receiver_instance_type,
+                            assembler->Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
+                        &if_receiverisincompatible);
+
+  // Check if the {receiver} is running or already closed.
+  Node* receiver_continuation = assembler->LoadObjectField(
+      receiver, JSGeneratorObject::kContinuationOffset);
+  Label if_receiverisclosed(assembler, Label::kDeferred),
+      if_receiverisrunning(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->SmiEqual(receiver_continuation, closed),
+                    &if_receiverisclosed);
+  DCHECK_LT(JSGeneratorObject::kGeneratorExecuting,
+            JSGeneratorObject::kGeneratorClosed);
+  assembler->GotoIf(assembler->SmiLessThan(receiver_continuation, closed),
+                    &if_receiverisrunning);
+
+  // Resume the {receiver} using our trampoline.
+  Node* result = assembler->CallStub(
+      CodeFactory::ResumeGenerator(assembler->isolate()), context, value,
+      receiver, assembler->SmiConstant(Smi::FromInt(resume_mode)));
+  assembler->Return(result);
+
+  assembler->Bind(&if_receiverisincompatible);
+  {
+    // The {receiver} is not a valid JSGeneratorObject.
+    Node* result = assembler->CallRuntime(
+        Runtime::kThrowIncompatibleMethodReceiver, context,
+        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+            method_name, TENURED)),
+        receiver);
+    assembler->Return(result);  // Never reached.
+  }
+
+  assembler->Bind(&if_receiverisclosed);
+  {
+    // The {receiver} is closed already.
+    Node* result = nullptr;
+    switch (resume_mode) {
+      case JSGeneratorObject::kNext:
+        result = assembler->CallRuntime(Runtime::kCreateIterResultObject,
+                                        context, assembler->UndefinedConstant(),
+                                        assembler->BooleanConstant(true));
+        break;
+      case JSGeneratorObject::kReturn:
+        result =
+            assembler->CallRuntime(Runtime::kCreateIterResultObject, context,
+                                   value, assembler->BooleanConstant(true));
+        break;
+      case JSGeneratorObject::kThrow:
+        result = assembler->CallRuntime(Runtime::kThrow, context, value);
+        break;
+    }
+    assembler->Return(result);
+  }
+
+  assembler->Bind(&if_receiverisrunning);
+  {
+    Node* result =
+        assembler->CallRuntime(Runtime::kThrowGeneratorRunning, context);
+    assembler->Return(result);  // Never reached.
+  }
+}
+
+}  // anonymous namespace
+
+// ES6 section 25.3.1.2 Generator.prototype.next ( value )
+void Builtins::Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler) {
+  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kNext,
+                                    "[Generator].prototype.next");
+}
+
+// ES6 section 25.3.1.3 Generator.prototype.return ( value )
+void Builtins::Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler) {
+  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kReturn,
+                                    "[Generator].prototype.return");
+}
+
+// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
+void Builtins::Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler) {
+  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kThrow,
+                                    "[Generator].prototype.throw");
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-global.cc b/src/builtins/builtins-global.cc
new file mode 100644
index 0000000..d99a553
--- /dev/null
+++ b/src/builtins/builtins-global.cc
@@ -0,0 +1,103 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/compiler.h"
+#include "src/uri.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 18.2.6.2 decodeURI (encodedURI)
+BUILTIN(GlobalDecodeURI) {
+  HandleScope scope(isolate);
+  Handle<String> encoded_uri;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, encoded_uri,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::DecodeUri(isolate, encoded_uri));
+}
+
+// ES6 section 18.2.6.3 decodeURIComponent (encodedURIComponent)
+BUILTIN(GlobalDecodeURIComponent) {
+  HandleScope scope(isolate);
+  Handle<String> encoded_uri_component;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, encoded_uri_component,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Uri::DecodeUriComponent(isolate, encoded_uri_component));
+}
+
+// ES6 section 18.2.6.4 encodeURI (uri)
+BUILTIN(GlobalEncodeURI) {
+  HandleScope scope(isolate);
+  Handle<String> uri;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, uri, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::EncodeUri(isolate, uri));
+}
+
+// ES6 section 18.2.6.5 encodeURIComponenet (uriComponent)
+BUILTIN(GlobalEncodeURIComponent) {
+  HandleScope scope(isolate);
+  Handle<String> uri_component;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, uri_component,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           Uri::EncodeUriComponent(isolate, uri_component));
+}
+
+// ES6 section B.2.1.1 escape (string)
+BUILTIN(GlobalEscape) {
+  HandleScope scope(isolate);
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, string,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::Escape(isolate, string));
+}
+
+// ES6 section B.2.1.2 unescape (string)
+BUILTIN(GlobalUnescape) {
+  HandleScope scope(isolate);
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, string,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  RETURN_RESULT_OR_FAILURE(isolate, Uri::Unescape(isolate, string));
+}
+
+// ES6 section 18.2.1 eval (x)
+BUILTIN(GlobalEval) {
+  HandleScope scope(isolate);
+  Handle<Object> x = args.atOrUndefined(isolate, 1);
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+  if (!x->IsString()) return *x;
+  if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
+    isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined);
+    return isolate->heap()->undefined_value();
+  }
+  Handle<JSFunction> function;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, function, Compiler::GetFunctionFromString(
+                             handle(target->native_context(), isolate),
+                             Handle<String>::cast(x), NO_PARSE_RESTRICTION));
+  RETURN_RESULT_OR_FAILURE(
+      isolate,
+      Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-handler.cc b/src/builtins/builtins-handler.cc
new file mode 100644
index 0000000..8b3df79
--- /dev/null
+++ b/src/builtins/builtins-handler.cc
@@ -0,0 +1,148 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateMegamorphic(masm);
+}
+
+void Builtins::Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateMiss(masm);
+}
+void Builtins::Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
+}
+
+void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateMiss(masm);
+}
+
+void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+  ElementHandlerCompiler::GenerateStoreSlow(masm);
+}
+
+void Builtins::Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef LoadGlobalWithVectorDescriptor Descriptor;
+
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, slot,
+                             vector);
+}
+
+void Builtins::Generate_LoadGlobalIC_Slow(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef LoadGlobalWithVectorDescriptor Descriptor;
+
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, slot,
+                             vector);
+}
+
+void Builtins::Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
+  NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
+}
+
+void Builtins::Generate_LoadIC_Miss(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
+                             slot, vector);
+}
+
+void Builtins::Generate_LoadIC_Normal(MacroAssembler* masm) {
+  LoadIC::GenerateNormal(masm);
+}
+
+void Builtins::Generate_LoadIC_Slow(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+}
+
+void Builtins::Generate_StoreIC_Miss(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* slot = assembler->Parameter(Descriptor::kSlot);
+  Node* vector = assembler->Parameter(Descriptor::kVector);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, receiver, name,
+                             value, slot, vector);
+}
+
+void Builtins::Generate_StoreIC_Normal(MacroAssembler* masm) {
+  StoreIC::GenerateNormal(masm);
+}
+
+void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
+  NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
+}
+
+namespace {
+void Generate_StoreIC_Slow(CodeStubAssembler* assembler,
+                           LanguageMode language_mode) {
+  typedef compiler::Node Node;
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+  Node* name = assembler->Parameter(Descriptor::kName);
+  Node* value = assembler->Parameter(Descriptor::kValue);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* lang_mode = assembler->SmiConstant(Smi::FromInt(language_mode));
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  assembler->TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
+                             value, lang_mode);
+}
+}  // anonymous namespace
+
+void Builtins::Generate_StoreIC_SlowSloppy(CodeStubAssembler* assembler) {
+  Generate_StoreIC_Slow(assembler, SLOPPY);
+}
+
+void Builtins::Generate_StoreIC_SlowStrict(CodeStubAssembler* assembler) {
+  Generate_StoreIC_Slow(assembler, STRICT);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-internal.cc b/src/builtins/builtins-internal.cc
new file mode 100644
index 0000000..87c5dd5
--- /dev/null
+++ b/src/builtins/builtins-internal.cc
@@ -0,0 +1,145 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/interface-descriptors.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(Illegal) {
+  UNREACHABLE();
+  return isolate->heap()->undefined_value();  // Make compiler happy.
+}
+
+BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
+
+BUILTIN(UnsupportedThrower) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                 NewError(MessageTemplate::kUnsupported));
+}
+
+// -----------------------------------------------------------------------------
+// Throwers for restricted function properties and strict arguments object
+// properties
+
+BUILTIN(RestrictedFunctionPropertiesThrower) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kRestrictedFunctionProperties));
+}
+
+BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
+}
+
+// -----------------------------------------------------------------------------
+// Interrupt and stack checks.
+
+void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+  masm->TailCallRuntime(Runtime::kInterrupt);
+}
+
+void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+  masm->TailCallRuntime(Runtime::kStackGuard);
+}
+
+// -----------------------------------------------------------------------------
+// TurboFan support builtins.
+
+void Builtins::Generate_CopyFastSmiOrObjectElements(
+    CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CopyFastSmiOrObjectElementsDescriptor Descriptor;
+
+  Node* object = assembler->Parameter(Descriptor::kObject);
+
+  // Load the {object}s elements.
+  Node* source = assembler->LoadObjectField(object, JSObject::kElementsOffset);
+
+  CodeStubAssembler::ParameterMode mode =
+      assembler->Is64() ? CodeStubAssembler::INTEGER_PARAMETERS
+                        : CodeStubAssembler::SMI_PARAMETERS;
+  Node* length = (mode == CodeStubAssembler::INTEGER_PARAMETERS)
+                     ? assembler->LoadAndUntagFixedArrayBaseLength(source)
+                     : assembler->LoadFixedArrayBaseLength(source);
+
+  // Check if we can allocate in new space.
+  ElementsKind kind = FAST_ELEMENTS;
+  int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
+  Label if_newspace(assembler), if_oldspace(assembler);
+  assembler->Branch(
+      assembler->UintPtrLessThan(
+          length, assembler->IntPtrOrSmiConstant(max_elements, mode)),
+      &if_newspace, &if_oldspace);
+
+  assembler->Bind(&if_newspace);
+  {
+    Node* target = assembler->AllocateFixedArray(kind, length, mode);
+    assembler->CopyFixedArrayElements(kind, source, target, length,
+                                      SKIP_WRITE_BARRIER, mode);
+    assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
+    assembler->Return(target);
+  }
+
+  assembler->Bind(&if_oldspace);
+  {
+    Node* target = assembler->AllocateFixedArray(
+        kind, length, mode, CodeStubAssembler::kPretenured);
+    assembler->CopyFixedArrayElements(kind, source, target, length,
+                                      UPDATE_WRITE_BARRIER, mode);
+    assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
+    assembler->Return(target);
+  }
+}
+
+void Builtins::Generate_GrowFastDoubleElements(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef GrowArrayElementsDescriptor Descriptor;
+
+  Node* object = assembler->Parameter(Descriptor::kObject);
+  Node* key = assembler->Parameter(Descriptor::kKey);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
+  Node* elements = assembler->LoadElements(object);
+  elements = assembler->CheckAndGrowElementsCapacity(
+      context, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
+  assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+  assembler->Return(elements);
+
+  assembler->Bind(&runtime);
+  assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+void Builtins::Generate_GrowFastSmiOrObjectElements(
+    CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef GrowArrayElementsDescriptor Descriptor;
+
+  Node* object = assembler->Parameter(Descriptor::kObject);
+  Node* key = assembler->Parameter(Descriptor::kKey);
+  Node* context = assembler->Parameter(Descriptor::kContext);
+
+  Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
+  Node* elements = assembler->LoadElements(object);
+  elements = assembler->CheckAndGrowElementsCapacity(
+      context, elements, FAST_ELEMENTS, key, &runtime);
+  assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+  assembler->Return(elements);
+
+  assembler->Bind(&runtime);
+  assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-interpreter.cc b/src/builtins/builtins-interpreter.cc
new file mode 100644
index 0000000..900172f
--- /dev/null
+++ b/src/builtins/builtins-interpreter.cc
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode,
+                                                  CallableType function_type) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      if (function_type == CallableType::kJSFunction) {
+        return InterpreterPushArgsAndCallFunction();
+      } else {
+        return InterpreterPushArgsAndCall();
+      }
+    case TailCallMode::kAllow:
+      if (function_type == CallableType::kJSFunction) {
+        return InterpreterPushArgsAndTailCallFunction();
+      } else {
+        return InterpreterPushArgsAndTailCall();
+      }
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
+                                                 CallableType::kAny);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndCallFunction(
+    MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
+                                                 CallableType::kJSFunction);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndTailCall(MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
+                                                 CallableType::kAny);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndTailCallFunction(
+    MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
+                                                 CallableType::kJSFunction);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-json.cc b/src/builtins/builtins-json.cc
new file mode 100644
index 0000000..4a8c7c5
--- /dev/null
+++ b/src/builtins/builtins-json.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/json-parser.h"
+#include "src/json-stringifier.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 24.3.1 JSON.parse.
+BUILTIN(JsonParse) {
+  HandleScope scope(isolate);
+  Handle<Object> source = args.atOrUndefined(isolate, 1);
+  Handle<Object> reviver = args.atOrUndefined(isolate, 2);
+  Handle<String> string;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
+                                     Object::ToString(isolate, source));
+  string = String::Flatten(string);
+  RETURN_RESULT_OR_FAILURE(
+      isolate, string->IsSeqOneByteString()
+                   ? JsonParser<true>::Parse(isolate, string, reviver)
+                   : JsonParser<false>::Parse(isolate, string, reviver));
+}
+
+// ES6 section 24.3.2 JSON.stringify.
+BUILTIN(JsonStringify) {
+  HandleScope scope(isolate);
+  JsonStringifier stringifier(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<Object> replacer = args.atOrUndefined(isolate, 2);
+  Handle<Object> indent = args.atOrUndefined(isolate, 3);
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           stringifier.Stringify(object, replacer, indent));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-math.cc b/src/builtins/builtins-math.cc
new file mode 100644
index 0000000..e8d429e
--- /dev/null
+++ b/src/builtins/builtins-math.cc
@@ -0,0 +1,561 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 20.2.2 Function Properties of the Math Object
+
+// ES6 section - 20.2.2.1 Math.abs ( x )
+void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
+  using compiler::Node;
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Abs(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.2 Math.acos ( x )
+void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Acos(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.3 Math.acosh ( x )
+void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Acosh(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Asin(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.5 Math.asinh ( x )
+void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Asinh(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Atan(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Atanh(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* y = assembler->Parameter(1);
+  Node* x = assembler->Parameter(2);
+  Node* context = assembler->Parameter(5);
+  Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Atan2(y_value, x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+namespace {
+
+void Generate_MathRoundingOperation(
+    CodeStubAssembler* assembler,
+    compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(4);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_x(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_x);
+  var_x.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {x} value.
+    Node* x = var_x.value();
+
+    // Check if {x} is a Smi or a HeapObject.
+    Label if_xissmi(assembler), if_xisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+    assembler->Bind(&if_xissmi);
+    {
+      // Nothing to do when {x} is a Smi.
+      assembler->Return(x);
+    }
+
+    assembler->Bind(&if_xisnotsmi);
+    {
+      // Check if {x} is a HeapNumber.
+      Label if_xisheapnumber(assembler),
+          if_xisnotheapnumber(assembler, Label::kDeferred);
+      assembler->Branch(
+          assembler->WordEqual(assembler->LoadMap(x),
+                               assembler->HeapNumberMapConstant()),
+          &if_xisheapnumber, &if_xisnotheapnumber);
+
+      assembler->Bind(&if_xisheapnumber);
+      {
+        Node* x_value = assembler->LoadHeapNumberValue(x);
+        Node* value = (assembler->*float64op)(x_value);
+        Node* result = assembler->ChangeFloat64ToTagged(value);
+        assembler->Return(result);
+      }
+
+      assembler->Bind(&if_xisnotheapnumber);
+      {
+        // Need to convert {x} to a Number first.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_x.Bind(assembler->CallStub(callable, context, x));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+}
+
+}  // namespace
+
+// ES6 section 20.2.2.10 Math.ceil ( x )
+void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
+}
+
+// ES6 section 20.2.2.9 Math.cbrt ( x )
+void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Cbrt(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* context = assembler->Parameter(4);
+
+  // Shared entry point for the clz32 operation.
+  Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
+  Label do_clz32(assembler);
+
+  // We might need to loop once for ToNumber conversion.
+  Variable var_x(assembler, MachineRepresentation::kTagged);
+  Label loop(assembler, &var_x);
+  var_x.Bind(assembler->Parameter(1));
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    // Load the current {x} value.
+    Node* x = var_x.value();
+
+    // Check if {x} is a Smi or a HeapObject.
+    Label if_xissmi(assembler), if_xisnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+    assembler->Bind(&if_xissmi);
+    {
+      var_clz32_x.Bind(assembler->SmiToWord32(x));
+      assembler->Goto(&do_clz32);
+    }
+
+    assembler->Bind(&if_xisnotsmi);
+    {
+      // Check if {x} is a HeapNumber.
+      Label if_xisheapnumber(assembler),
+          if_xisnotheapnumber(assembler, Label::kDeferred);
+      assembler->Branch(
+          assembler->WordEqual(assembler->LoadMap(x),
+                               assembler->HeapNumberMapConstant()),
+          &if_xisheapnumber, &if_xisnotheapnumber);
+
+      assembler->Bind(&if_xisheapnumber);
+      {
+        var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
+        assembler->Goto(&do_clz32);
+      }
+
+      assembler->Bind(&if_xisnotheapnumber);
+      {
+        // Need to convert {x} to a Number first.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_x.Bind(assembler->CallStub(callable, context, x));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_clz32);
+  {
+    Node* x_value = var_clz32_x.value();
+    Node* value = assembler->Word32Clz(x_value);
+    Node* result = assembler->ChangeInt32ToTagged(value);
+    assembler->Return(result);
+  }
+}
+
+// ES6 section 20.2.2.12 Math.cos ( x )
+void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Cos(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.13 Math.cosh ( x )
+void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Cosh(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.14 Math.exp ( x )
+void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Exp(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.16 Math.floor ( x )
+void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
+}
+
+// ES6 section 20.2.2.17 Math.fround ( x )
+void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
+  Node* value = assembler->ChangeFloat32ToFloat64(value32);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values )
+BUILTIN(MathHypot) {
+  HandleScope scope(isolate);
+  int const length = args.length() - 1;
+  if (length == 0) return Smi::FromInt(0);
+  DCHECK_LT(0, length);
+  double max = 0;
+  bool one_arg_is_nan = false;
+  List<double> abs_values(length);
+  for (int i = 0; i < length; i++) {
+    Handle<Object> x = args.at<Object>(i + 1);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+    double abs_value = std::abs(x->Number());
+
+    if (std::isnan(abs_value)) {
+      one_arg_is_nan = true;
+    } else {
+      abs_values.Add(abs_value);
+      if (max < abs_value) {
+        max = abs_value;
+      }
+    }
+  }
+
+  if (max == V8_INFINITY) {
+    return *isolate->factory()->NewNumber(V8_INFINITY);
+  }
+
+  if (one_arg_is_nan) {
+    return *isolate->factory()->nan_value();
+  }
+
+  if (max == 0) {
+    return Smi::FromInt(0);
+  }
+  DCHECK_GT(max, 0);
+
+  // Kahan summation to avoid rounding errors.
+  // Normalize the numbers to the largest one to avoid overflow.
+  double sum = 0;
+  double compensation = 0;
+  for (int i = 0; i < length; i++) {
+    double n = abs_values.at(i) / max;
+    double summand = n * n - compensation;
+    double preliminary = sum + summand;
+    compensation = (preliminary - sum) - summand;
+    sum = preliminary;
+  }
+
+  return *isolate->factory()->NewNumber(std::sqrt(sum) * max);
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+void Builtins::Generate_MathImul(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* y = assembler->Parameter(2);
+  Node* context = assembler->Parameter(5);
+  Node* x_value = assembler->TruncateTaggedToWord32(context, x);
+  Node* y_value = assembler->TruncateTaggedToWord32(context, y);
+  Node* value = assembler->Int32Mul(x_value, y_value);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.20 Math.log ( x )
+void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.21 Math.log1p ( x )
+void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log1p(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.22 Math.log10 ( x )
+void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log10(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.23 Math.log2 ( x )
+void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Log2(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Expm1(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.26 Math.pow ( x, y )
+void Builtins::Generate_MathPow(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* y = assembler->Parameter(2);
+  Node* context = assembler->Parameter(5);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
+  Node* value = assembler->Float64Pow(x_value, y_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.28 Math.round ( x )
+void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
+}
+
+// ES6 section 20.2.2.29 Math.sign ( x )
+void Builtins::Generate_MathSign(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  using compiler::Node;
+
+  // Convert the {x} value to a Number.
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+
+  // Return -1 if {x} is negative, 1 if {x} is positive, or {x} itself.
+  Label if_xisnegative(assembler), if_xispositive(assembler);
+  assembler->GotoIf(
+      assembler->Float64LessThan(x_value, assembler->Float64Constant(0.0)),
+      &if_xisnegative);
+  assembler->GotoIf(
+      assembler->Float64LessThan(assembler->Float64Constant(0.0), x_value),
+      &if_xispositive);
+  assembler->Return(assembler->ChangeFloat64ToTagged(x_value));
+
+  assembler->Bind(&if_xisnegative);
+  assembler->Return(assembler->SmiConstant(Smi::FromInt(-1)));
+
+  assembler->Bind(&if_xispositive);
+  assembler->Return(assembler->SmiConstant(Smi::FromInt(1)));
+}
+
+// ES6 section 20.2.2.30 Math.sin ( x )
+void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Sin(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.31 Math.sinh ( x )
+void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Sinh(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.32 Math.sqrt ( x )
+void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Sqrt(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.33 Math.tan ( x )
+void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Tan(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.34 Math.tanh ( x )
+void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* x = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* value = assembler->Float64Tanh(x_value);
+  Node* result = assembler->ChangeFloat64ToTagged(value);
+  assembler->Return(result);
+}
+
+// ES6 section 20.2.2.35 Math.trunc ( x )
+void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
+}
+
+void Builtins::Generate_MathMax(MacroAssembler* masm) {
+  Generate_MathMaxMin(masm, MathMaxMinKind::kMax);
+}
+
+void Builtins::Generate_MathMin(MacroAssembler* masm) {
+  Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-number.cc b/src/builtins/builtins-number.cc
new file mode 100644
index 0000000..c2af0fd
--- /dev/null
+++ b/src/builtins/builtins-number.cc
@@ -0,0 +1,235 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 20.1 Number Objects
+
+// ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
+BUILTIN(NumberPrototypeToExponential) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
+
+  // Unwrap the receiver {value}.
+  if (value->IsJSValue()) {
+    value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+  }
+  if (!value->IsNumber()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Number.prototype.toExponential")));
+  }
+  double const value_number = value->Number();
+
+  // Convert the {fraction_digits} to an integer first.
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, fraction_digits, Object::ToInteger(isolate, fraction_digits));
+  double const fraction_digits_number = fraction_digits->Number();
+
+  if (std::isnan(value_number)) return isolate->heap()->nan_string();
+  if (std::isinf(value_number)) {
+    return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+                                : isolate->heap()->infinity_string();
+  }
+  if (fraction_digits_number < 0.0 || fraction_digits_number > 20.0) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kNumberFormatRange,
+                               isolate->factory()->NewStringFromAsciiChecked(
+                                   "toExponential()")));
+  }
+  int const f = args.atOrUndefined(isolate, 1)->IsUndefined(isolate)
+                    ? -1
+                    : static_cast<int>(fraction_digits_number);
+  char* const str = DoubleToExponentialCString(value_number, f);
+  Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+  DeleteArray(str);
+  return *result;
+}
+
+// ES6 section 20.1.3.3 Number.prototype.toFixed ( fractionDigits )
+BUILTIN(NumberPrototypeToFixed) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
+
+  // Unwrap the receiver {value}.
+  if (value->IsJSValue()) {
+    value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+  }
+  if (!value->IsNumber()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Number.prototype.toFixed")));
+  }
+  double const value_number = value->Number();
+
+  // Convert the {fraction_digits} to an integer first.
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, fraction_digits, Object::ToInteger(isolate, fraction_digits));
+  double const fraction_digits_number = fraction_digits->Number();
+
+  // Check if the {fraction_digits} are in the supported range.
+  if (fraction_digits_number < 0.0 || fraction_digits_number > 20.0) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kNumberFormatRange,
+                               isolate->factory()->NewStringFromAsciiChecked(
+                                   "toFixed() digits")));
+  }
+
+  if (std::isnan(value_number)) return isolate->heap()->nan_string();
+  if (std::isinf(value_number)) {
+    return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+                                : isolate->heap()->infinity_string();
+  }
+  char* const str = DoubleToFixedCString(
+      value_number, static_cast<int>(fraction_digits_number));
+  Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+  DeleteArray(str);
+  return *result;
+}
+
+// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
+BUILTIN(NumberPrototypeToLocaleString) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.at<Object>(0);
+
+  // Unwrap the receiver {value}.
+  if (value->IsJSValue()) {
+    value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+  }
+  if (!value->IsNumber()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Number.prototype.toLocaleString")));
+  }
+
+  // Turn the {value} into a String.
+  return *isolate->factory()->NumberToString(value);
+}
+
+// ES6 section 20.1.3.5 Number.prototype.toPrecision ( precision )
+BUILTIN(NumberPrototypeToPrecision) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> precision = args.atOrUndefined(isolate, 1);
+
+  // Unwrap the receiver {value}.
+  if (value->IsJSValue()) {
+    value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+  }
+  if (!value->IsNumber()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Number.prototype.toPrecision")));
+  }
+  double const value_number = value->Number();
+
+  // If no {precision} was specified, just return ToString of {value}.
+  if (precision->IsUndefined(isolate)) {
+    return *isolate->factory()->NumberToString(value);
+  }
+
+  // Convert the {precision} to an integer first.
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, precision,
+                                     Object::ToInteger(isolate, precision));
+  double const precision_number = precision->Number();
+
+  if (std::isnan(value_number)) return isolate->heap()->nan_string();
+  if (std::isinf(value_number)) {
+    return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+                                : isolate->heap()->infinity_string();
+  }
+  if (precision_number < 1.0 || precision_number > 21.0) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kToPrecisionFormatRange));
+  }
+  char* const str = DoubleToPrecisionCString(
+      value_number, static_cast<int>(precision_number));
+  Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+  DeleteArray(str);
+  return *result;
+}
+
+// ES6 section 20.1.3.6 Number.prototype.toString ( [ radix ] )
+BUILTIN(NumberPrototypeToString) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> radix = args.atOrUndefined(isolate, 1);
+
+  // Unwrap the receiver {value}.
+  if (value->IsJSValue()) {
+    value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+  }
+  if (!value->IsNumber()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Number.prototype.toString")));
+  }
+  double const value_number = value->Number();
+
+  // If no {radix} was specified, just return ToString of {value}.
+  if (radix->IsUndefined(isolate)) {
+    return *isolate->factory()->NumberToString(value);
+  }
+
+  // Convert the {radix} to an integer first.
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix,
+                                     Object::ToInteger(isolate, radix));
+  double const radix_number = radix->Number();
+
+  // If {radix} is 10, just return ToString of {value}.
+  if (radix_number == 10.0) return *isolate->factory()->NumberToString(value);
+
+  // Make sure the {radix} is within the valid range.
+  if (radix_number < 2.0 || radix_number > 36.0) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kToRadixFormatRange));
+  }
+
+  // Fast case where the result is a one character string.
+  if (IsUint32Double(value_number) && value_number < radix_number) {
+    // Character array used for conversion.
+    static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+    return *isolate->factory()->LookupSingleCharacterStringFromCode(
+        kCharTable[static_cast<uint32_t>(value_number)]);
+  }
+
+  // Slow case.
+  if (std::isnan(value_number)) return isolate->heap()->nan_string();
+  if (std::isinf(value_number)) {
+    return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+                                : isolate->heap()->infinity_string();
+  }
+  char* const str =
+      DoubleToRadixCString(value_number, static_cast<int>(radix_number));
+  Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+  DeleteArray(str);
+  return *result;
+}
+
+// ES6 section 20.1.3.7 Number.prototype.valueOf ( )
+void Builtins::Generate_NumberPrototypeValueOf(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* result = assembler->ToThisValue(
+      context, receiver, PrimitiveType::kNumber, "Number.prototype.valueOf");
+  assembler->Return(result);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-object.cc b/src/builtins/builtins-object.cc
new file mode 100644
index 0000000..c422145
--- /dev/null
+++ b/src/builtins/builtins-object.cc
@@ -0,0 +1,914 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+#include "src/property-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.1 Object Objects
+
+void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* object = assembler->Parameter(0);
+  Node* key = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  Label call_runtime(assembler), return_true(assembler),
+      return_false(assembler);
+
+  // Smi receivers do not have own properties.
+  Label if_objectisnotsmi(assembler);
+  assembler->Branch(assembler->WordIsSmi(object), &return_false,
+                    &if_objectisnotsmi);
+  assembler->Bind(&if_objectisnotsmi);
+
+  Node* map = assembler->LoadMap(object);
+  Node* instance_type = assembler->LoadMapInstanceType(map);
+
+  Variable var_index(assembler, MachineRepresentation::kWord32);
+
+  Label keyisindex(assembler), if_iskeyunique(assembler);
+  assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
+                       &call_runtime);
+
+  assembler->Bind(&if_iskeyunique);
+  assembler->TryHasOwnProperty(object, map, instance_type, key, &return_true,
+                               &return_false, &call_runtime);
+
+  assembler->Bind(&keyisindex);
+  assembler->TryLookupElement(object, map, instance_type, var_index.value(),
+                              &return_true, &return_false, &call_runtime);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+
+  assembler->Bind(&call_runtime);
+  assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
+                                           context, object, key));
+}
+
+namespace {
+
+MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
+                                       Handle<Object> next_source) {
+  // Non-empty strings are the only non-JSReceivers that need to be handled
+  // explicitly by Object.assign.
+  if (!next_source->IsJSReceiver()) {
+    return Just(!next_source->IsString() ||
+                String::cast(*next_source)->length() == 0);
+  }
+
+  // If the target is deprecated, the object will be updated on first store. If
+  // the source for that store equals the target, this will invalidate the
+  // cached representation of the source. Preventively upgrade the target.
+  // Do this on each iteration since any property load could cause deprecation.
+  if (to->map()->is_deprecated()) {
+    JSObject::MigrateInstance(Handle<JSObject>::cast(to));
+  }
+
+  Isolate* isolate = to->GetIsolate();
+  Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
+
+  if (!map->IsJSObjectMap()) return Just(false);
+  if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+  Handle<JSObject> from = Handle<JSObject>::cast(next_source);
+  if (from->elements() != isolate->heap()->empty_fixed_array()) {
+    return Just(false);
+  }
+
+  Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+  int length = map->NumberOfOwnDescriptors();
+
+  bool stable = true;
+
+  for (int i = 0; i < length; i++) {
+    Handle<Name> next_key(descriptors->GetKey(i), isolate);
+    Handle<Object> prop_value;
+    // Directly decode from the descriptor array if |from| did not change shape.
+    if (stable) {
+      PropertyDetails details = descriptors->GetDetails(i);
+      if (!details.IsEnumerable()) continue;
+      if (details.kind() == kData) {
+        if (details.location() == kDescriptor) {
+          prop_value = handle(descriptors->GetValue(i), isolate);
+        } else {
+          Representation representation = details.representation();
+          FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+          prop_value = JSObject::FastPropertyAt(from, representation, index);
+        }
+      } else {
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, prop_value, JSReceiver::GetProperty(from, next_key),
+            Nothing<bool>());
+        stable = from->map() == *map;
+      }
+    } else {
+      // If the map did change, do a slower lookup. We are still guaranteed that
+      // the object has a simple shape, and that the key is a name.
+      LookupIterator it(from, next_key, from,
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
+      if (!it.IsFound()) continue;
+      DCHECK(it.state() == LookupIterator::DATA ||
+             it.state() == LookupIterator::ACCESSOR);
+      if (!it.IsEnumerable()) continue;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+    }
+    LookupIterator it(to, next_key, to);
+    bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
+    Maybe<bool> result = Object::SetProperty(
+        &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+    if (result.IsNothing()) return result;
+    if (stable && call_to_js) stable = from->map() == *map;
+  }
+
+  return Just(true);
+}
+
+}  // namespace
+
+// ES6 19.1.2.1 Object.assign
+BUILTIN(ObjectAssign) {
+  HandleScope scope(isolate);
+  Handle<Object> target = args.atOrUndefined(isolate, 1);
+
+  // 1. Let to be ? ToObject(target).
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target,
+                                     Object::ToObject(isolate, target));
+  Handle<JSReceiver> to = Handle<JSReceiver>::cast(target);
+  // 2. If only one argument was passed, return to.
+  if (args.length() == 2) return *to;
+  // 3. Let sources be the List of argument values starting with the
+  //    second argument.
+  // 4. For each element nextSource of sources, in ascending index order,
+  for (int i = 2; i < args.length(); ++i) {
+    Handle<Object> next_source = args.at<Object>(i);
+    Maybe<bool> fast_assign = FastAssign(to, next_source);
+    if (fast_assign.IsNothing()) return isolate->heap()->exception();
+    if (fast_assign.FromJust()) continue;
+    // 4a. If nextSource is undefined or null, let keys be an empty List.
+    // 4b. Else,
+    // 4b i. Let from be ToObject(nextSource).
+    // Only non-empty strings and JSReceivers have enumerable properties.
+    Handle<JSReceiver> from =
+        Object::ToObject(isolate, next_source).ToHandleChecked();
+    // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
+    Handle<FixedArray> keys;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, keys, KeyAccumulator::GetKeys(
+                           from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+                           GetKeysConversion::kKeepNumbers));
+    // 4c. Repeat for each element nextKey of keys in List order,
+    for (int j = 0; j < keys->length(); ++j) {
+      Handle<Object> next_key(keys->get(j), isolate);
+      // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
+      PropertyDescriptor desc;
+      Maybe<bool> found =
+          JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
+      if (found.IsNothing()) return isolate->heap()->exception();
+      // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
+      if (found.FromJust() && desc.enumerable()) {
+        // 4c ii 1. Let propValue be ? Get(from, nextKey).
+        Handle<Object> prop_value;
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+            isolate, prop_value,
+            Runtime::GetObjectProperty(isolate, from, next_key));
+        // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
+        Handle<Object> status;
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+            isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
+                                                        prop_value, STRICT));
+      }
+    }
+  }
+  // 5. Return to.
+  return *to;
+}
+
+// ES6 section 19.1.3.4 Object.prototype.propertyIsEnumerable ( V )
+BUILTIN(ObjectPrototypePropertyIsEnumerable) {
+  HandleScope scope(isolate);
+  Handle<JSReceiver> object;
+  Handle<Name> name;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, name, Object::ToName(isolate, args.atOrUndefined(isolate, 1)));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, object, JSReceiver::ToObject(isolate, args.receiver()));
+  Maybe<PropertyAttributes> maybe =
+      JSReceiver::GetOwnPropertyAttributes(object, name);
+  if (!maybe.IsJust()) return isolate->heap()->exception();
+  if (maybe.FromJust() == ABSENT) return isolate->heap()->false_value();
+  return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
+}
+
+namespace {  // anonymous namespace for ObjectProtoToString()
+
+void IsString(CodeStubAssembler* assembler, compiler::Node* object,
+              CodeStubAssembler::Label* if_string,
+              CodeStubAssembler::Label* if_notstring) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+
+  Label if_notsmi(assembler);
+  assembler->Branch(assembler->WordIsSmi(object), if_notstring, &if_notsmi);
+
+  assembler->Bind(&if_notsmi);
+  {
+    Node* instance_type = assembler->LoadInstanceType(object);
+
+    assembler->Branch(
+        assembler->Int32LessThan(
+            instance_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+        if_string, if_notstring);
+  }
+}
+
+void ReturnToStringFormat(CodeStubAssembler* assembler, compiler::Node* context,
+                          compiler::Node* string) {
+  typedef compiler::Node Node;
+
+  Node* lhs = assembler->HeapConstant(
+      assembler->factory()->NewStringFromStaticChars("[object "));
+  Node* rhs = assembler->HeapConstant(
+      assembler->factory()->NewStringFromStaticChars("]"));
+
+  Callable callable = CodeFactory::StringAdd(
+      assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+
+  assembler->Return(assembler->CallStub(
+      callable, context, assembler->CallStub(callable, context, lhs, string),
+      rhs));
+}
+
+void ReturnIfPrimitive(CodeStubAssembler* assembler,
+                       compiler::Node* instance_type,
+                       CodeStubAssembler::Label* return_string,
+                       CodeStubAssembler::Label* return_boolean,
+                       CodeStubAssembler::Label* return_number) {
+  assembler->GotoIf(
+      assembler->Int32LessThan(instance_type,
+                               assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+      return_string);
+
+  assembler->GotoIf(assembler->Word32Equal(
+                        instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
+                    return_boolean);
+
+  assembler->GotoIf(
+      assembler->Word32Equal(instance_type,
+                             assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+      return_number);
+}
+
+}  // namespace
+
+// ES6 section 19.1.3.6 Object.prototype.toString
+void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Label return_undefined(assembler, Label::kDeferred),
+      return_null(assembler, Label::kDeferred),
+      return_arguments(assembler, Label::kDeferred), return_array(assembler),
+      return_api(assembler, Label::kDeferred), return_object(assembler),
+      return_regexp(assembler), return_function(assembler),
+      return_error(assembler), return_date(assembler), return_string(assembler),
+      return_boolean(assembler), return_jsvalue(assembler),
+      return_jsproxy(assembler, Label::kDeferred), return_number(assembler);
+
+  Label if_isproxy(assembler, Label::kDeferred);
+
+  Label checkstringtag(assembler);
+  Label if_tostringtag(assembler), if_notostringtag(assembler);
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  assembler->GotoIf(
+      assembler->Word32Equal(receiver, assembler->UndefinedConstant()),
+      &return_undefined);
+
+  assembler->GotoIf(assembler->Word32Equal(receiver, assembler->NullConstant()),
+                    &return_null);
+
+  assembler->GotoIf(assembler->WordIsSmi(receiver), &return_number);
+
+  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+  ReturnIfPrimitive(assembler, receiver_instance_type, &return_string,
+                    &return_boolean, &return_number);
+
+  // for proxies, check IsArray before getting @@toStringTag
+  Variable var_proxy_is_array(assembler, MachineRepresentation::kTagged);
+  var_proxy_is_array.Bind(assembler->BooleanConstant(false));
+
+  assembler->Branch(
+      assembler->Word32Equal(receiver_instance_type,
+                             assembler->Int32Constant(JS_PROXY_TYPE)),
+      &if_isproxy, &checkstringtag);
+
+  assembler->Bind(&if_isproxy);
+  {
+    // This can throw
+    var_proxy_is_array.Bind(
+        assembler->CallRuntime(Runtime::kArrayIsArray, context, receiver));
+    assembler->Goto(&checkstringtag);
+  }
+
+  assembler->Bind(&checkstringtag);
+  {
+    Node* to_string_tag_symbol = assembler->HeapConstant(
+        assembler->isolate()->factory()->to_string_tag_symbol());
+
+    GetPropertyStub stub(assembler->isolate());
+    Callable get_property =
+        Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+    Node* to_string_tag_value = assembler->CallStub(
+        get_property, context, receiver, to_string_tag_symbol);
+
+    IsString(assembler, to_string_tag_value, &if_tostringtag,
+             &if_notostringtag);
+
+    assembler->Bind(&if_tostringtag);
+    ReturnToStringFormat(assembler, context, to_string_tag_value);
+  }
+  assembler->Bind(&if_notostringtag);
+  {
+    size_t const kNumCases = 11;
+    Label* case_labels[kNumCases];
+    int32_t case_values[kNumCases];
+    case_labels[0] = &return_api;
+    case_values[0] = JS_API_OBJECT_TYPE;
+    case_labels[1] = &return_api;
+    case_values[1] = JS_SPECIAL_API_OBJECT_TYPE;
+    case_labels[2] = &return_arguments;
+    case_values[2] = JS_ARGUMENTS_TYPE;
+    case_labels[3] = &return_array;
+    case_values[3] = JS_ARRAY_TYPE;
+    case_labels[4] = &return_function;
+    case_values[4] = JS_BOUND_FUNCTION_TYPE;
+    case_labels[5] = &return_function;
+    case_values[5] = JS_FUNCTION_TYPE;
+    case_labels[6] = &return_error;
+    case_values[6] = JS_ERROR_TYPE;
+    case_labels[7] = &return_date;
+    case_values[7] = JS_DATE_TYPE;
+    case_labels[8] = &return_regexp;
+    case_values[8] = JS_REGEXP_TYPE;
+    case_labels[9] = &return_jsvalue;
+    case_values[9] = JS_VALUE_TYPE;
+    case_labels[10] = &return_jsproxy;
+    case_values[10] = JS_PROXY_TYPE;
+
+    assembler->Switch(receiver_instance_type, &return_object, case_values,
+                      case_labels, arraysize(case_values));
+
+    assembler->Bind(&return_undefined);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->undefined_to_string()));
+
+    assembler->Bind(&return_null);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->null_to_string()));
+
+    assembler->Bind(&return_number);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->number_to_string()));
+
+    assembler->Bind(&return_string);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->string_to_string()));
+
+    assembler->Bind(&return_boolean);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->boolean_to_string()));
+
+    assembler->Bind(&return_arguments);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->arguments_to_string()));
+
+    assembler->Bind(&return_array);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->array_to_string()));
+
+    assembler->Bind(&return_function);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->function_to_string()));
+
+    assembler->Bind(&return_error);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->error_to_string()));
+
+    assembler->Bind(&return_date);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->date_to_string()));
+
+    assembler->Bind(&return_regexp);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->regexp_to_string()));
+
+    assembler->Bind(&return_api);
+    {
+      Node* class_name =
+          assembler->CallRuntime(Runtime::kClassOf, context, receiver);
+      ReturnToStringFormat(assembler, context, class_name);
+    }
+
+    assembler->Bind(&return_jsvalue);
+    {
+      Node* value = assembler->LoadJSValueValue(receiver);
+      assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
+
+      ReturnIfPrimitive(assembler, assembler->LoadInstanceType(value),
+                        &return_string, &return_boolean, &return_number);
+      assembler->Goto(&return_object);
+    }
+
+    assembler->Bind(&return_jsproxy);
+    {
+      assembler->GotoIf(assembler->WordEqual(var_proxy_is_array.value(),
+                                             assembler->BooleanConstant(true)),
+                        &return_array);
+
+      Node* map = assembler->LoadMap(receiver);
+
+      // Return object if the proxy {receiver} is not callable.
+      assembler->Branch(
+          assembler->Word32Equal(
+              assembler->Word32And(
+                  assembler->LoadMapBitField(map),
+                  assembler->Int32Constant(1 << Map::kIsCallable)),
+              assembler->Int32Constant(0)),
+          &return_object, &return_function);
+    }
+
+    // Default
+    assembler->Bind(&return_object);
+    assembler->Return(assembler->HeapConstant(
+        assembler->isolate()->factory()->object_to_string()));
+  }
+}
+
+// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
+// TODO(verwaest): Support the common cases with precached map directly in
+// an Object.create stub.
+BUILTIN(ObjectCreate) {
+  HandleScope scope(isolate);
+  Handle<Object> prototype = args.atOrUndefined(isolate, 1);
+  if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+  }
+
+  // Generate the map with the specified {prototype} based on the Object
+  // function's initial map from the current native context.
+  // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+  // slack tracking for Object.create.
+  Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+                  isolate);
+  if (map->prototype() != *prototype) {
+    if (prototype->IsNull(isolate)) {
+      map = isolate->object_with_null_prototype_map();
+    } else if (prototype->IsJSObject()) {
+      Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+      if (!js_prototype->map()->is_prototype_map()) {
+        JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+      }
+      Handle<PrototypeInfo> info =
+          Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+      // TODO(verwaest): Use inobject slack tracking for this map.
+      if (info->HasObjectCreateMap()) {
+        map = handle(info->ObjectCreateMap(), isolate);
+      } else {
+        map = Map::CopyInitialMap(map);
+        Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+        PrototypeInfo::SetObjectCreateMap(info, map);
+      }
+    } else {
+      map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+    }
+  }
+
+  // Actually allocate the object.
+  Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
+
+  // Define the properties if properties was specified and is not undefined.
+  Handle<Object> properties = args.atOrUndefined(isolate, 2);
+  if (!properties->IsUndefined(isolate)) {
+    RETURN_FAILURE_ON_EXCEPTION(
+        isolate, JSReceiver::DefineProperties(isolate, object, properties));
+  }
+
+  return *object;
+}
+
+// ES6 section 19.1.2.3 Object.defineProperties
+BUILTIN(ObjectDefineProperties) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> properties = args.at<Object>(2);
+
+  RETURN_RESULT_OR_FAILURE(
+      isolate, JSReceiver::DefineProperties(isolate, target, properties));
+}
+
+// ES6 section 19.1.2.4 Object.defineProperty
+BUILTIN(ObjectDefineProperty) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(4, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> attributes = args.at<Object>(3);
+
+  return JSReceiver::DefineProperty(isolate, target, key, attributes);
+}
+
+namespace {
+
+template <AccessorComponent which_accessor>
+Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
+                             Handle<Object> name, Handle<Object> accessor) {
+  // 1. Let O be ? ToObject(this value).
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, object));
+  // 2. If IsCallable(getter) is false, throw a TypeError exception.
+  if (!accessor->IsCallable()) {
+    MessageTemplate::Template message =
+        which_accessor == ACCESSOR_GETTER
+            ? MessageTemplate::kObjectGetterExpectingFunction
+            : MessageTemplate::kObjectSetterExpectingFunction;
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message));
+  }
+  // 3. Let desc be PropertyDescriptor{[[Get]]: getter, [[Enumerable]]: true,
+  //                                   [[Configurable]]: true}.
+  PropertyDescriptor desc;
+  if (which_accessor == ACCESSOR_GETTER) {
+    desc.set_get(accessor);
+  } else {
+    DCHECK(which_accessor == ACCESSOR_SETTER);
+    desc.set_set(accessor);
+  }
+  desc.set_enumerable(true);
+  desc.set_configurable(true);
+  // 4. Let key be ? ToPropertyKey(P).
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToPropertyKey(isolate, name));
+  // 5. Perform ? DefinePropertyOrThrow(O, key, desc).
+  // To preserve legacy behavior, we ignore errors silently rather than
+  // throwing an exception.
+  Maybe<bool> success = JSReceiver::DefineOwnProperty(
+      isolate, receiver, name, &desc, Object::DONT_THROW);
+  MAYBE_RETURN(success, isolate->heap()->exception());
+  if (!success.FromJust()) {
+    isolate->CountUsage(v8::Isolate::kDefineGetterOrSetterWouldThrow);
+  }
+  // 6. Return undefined.
+  return isolate->heap()->undefined_value();
+}
+
+Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
+                             Handle<Object> key, AccessorComponent component) {
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+                                     Object::ConvertReceiver(isolate, object));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+                                     Object::ToPropertyKey(isolate, key));
+  bool success = false;
+  LookupIterator it = LookupIterator::PropertyOrElement(
+      isolate, object, key, &success,
+      LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+  DCHECK(success);
+
+  for (; it.IsFound(); it.Next()) {
+    switch (it.state()) {
+      case LookupIterator::INTERCEPTOR:
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+
+      case LookupIterator::ACCESS_CHECK:
+        if (it.HasAccess()) continue;
+        isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+        RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+        return isolate->heap()->undefined_value();
+
+      case LookupIterator::JSPROXY:
+        return isolate->heap()->undefined_value();
+
+      case LookupIterator::INTEGER_INDEXED_EXOTIC:
+        return isolate->heap()->undefined_value();
+      case LookupIterator::DATA:
+        continue;
+      case LookupIterator::ACCESSOR: {
+        Handle<Object> maybe_pair = it.GetAccessors();
+        if (maybe_pair->IsAccessorPair()) {
+          return *AccessorPair::GetComponent(
+              Handle<AccessorPair>::cast(maybe_pair), component);
+        }
+      }
+    }
+  }
+
+  return isolate->heap()->undefined_value();
+}
+
+}  // namespace
+
+// ES6 B.2.2.2 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__defineGetter__
+BUILTIN(ObjectDefineGetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);  // Receiver.
+  Handle<Object> name = args.at<Object>(1);
+  Handle<Object> getter = args.at<Object>(2);
+  return ObjectDefineAccessor<ACCESSOR_GETTER>(isolate, object, name, getter);
+}
+
+// ES6 B.2.2.3 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__defineSetter__
+BUILTIN(ObjectDefineSetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);  // Receiver.
+  Handle<Object> name = args.at<Object>(1);
+  Handle<Object> setter = args.at<Object>(2);
+  return ObjectDefineAccessor<ACCESSOR_SETTER>(isolate, object, name, setter);
+}
+
+// ES6 B.2.2.4 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupGetter__
+BUILTIN(ObjectLookupGetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> name = args.at<Object>(1);
+  return ObjectLookupAccessor(isolate, object, name, ACCESSOR_GETTER);
+}
+
+// ES6 B.2.2.5 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupSetter__
+BUILTIN(ObjectLookupSetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> name = args.at<Object>(1);
+  return ObjectLookupAccessor(isolate, object, name, ACCESSOR_SETTER);
+}
+
+// ES6 section 19.1.2.5 Object.freeze ( O )
+BUILTIN(ObjectFreeze) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  if (object->IsJSReceiver()) {
+    MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+                                               FROZEN, Object::THROW_ON_ERROR),
+                 isolate->heap()->exception());
+  }
+  return *object;
+}
+
+// ES section 19.1.2.9 Object.getPrototypeOf ( O )
+BUILTIN(ObjectGetPrototypeOf) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           JSReceiver::GetPrototype(isolate, receiver));
+}
+
+// ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
+BUILTIN(ObjectGetOwnPropertyDescriptor) {
+  HandleScope scope(isolate);
+  // 1. Let obj be ? ToObject(O).
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  // 2. Let key be ? ToPropertyKey(P).
+  Handle<Object> property = args.atOrUndefined(isolate, 2);
+  Handle<Name> key;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+                                     Object::ToName(isolate, property));
+  // 3. Let desc be ? obj.[[GetOwnProperty]](key).
+  PropertyDescriptor desc;
+  Maybe<bool> found =
+      JSReceiver::GetOwnPropertyDescriptor(isolate, receiver, key, &desc);
+  MAYBE_RETURN(found, isolate->heap()->exception());
+  // 4. Return FromPropertyDescriptor(desc).
+  if (!found.FromJust()) return isolate->heap()->undefined_value();
+  return *desc.ToObject(isolate);
+}
+
+namespace {
+
+Object* GetOwnPropertyKeys(Isolate* isolate, BuiltinArguments args,
+                           PropertyFilter filter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, keys,
+      KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
+                              GetKeysConversion::kConvertToString));
+  return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+}  // namespace
+
+// ES6 section 19.1.2.7 Object.getOwnPropertyNames ( O )
+BUILTIN(ObjectGetOwnPropertyNames) {
+  return GetOwnPropertyKeys(isolate, args, SKIP_SYMBOLS);
+}
+
+// ES6 section 19.1.2.8 Object.getOwnPropertySymbols ( O )
+BUILTIN(ObjectGetOwnPropertySymbols) {
+  return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
+}
+
+// ES#sec-object.is Object.is ( value1, value2 )
+BUILTIN(ObjectIs) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> value1 = args.at<Object>(1);
+  Handle<Object> value2 = args.at<Object>(2);
+  return isolate->heap()->ToBoolean(value1->SameValue(*value2));
+}
+
+// ES6 section 19.1.2.11 Object.isExtensible ( O )
+BUILTIN(ObjectIsExtensible) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Maybe<bool> result =
+      object->IsJSReceiver()
+          ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
+          : Just(false);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 19.1.2.12 Object.isFrozen ( O )
+BUILTIN(ObjectIsFrozen) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Maybe<bool> result = object->IsJSReceiver()
+                           ? JSReceiver::TestIntegrityLevel(
+                                 Handle<JSReceiver>::cast(object), FROZEN)
+                           : Just(true);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 19.1.2.13 Object.isSealed ( O )
+BUILTIN(ObjectIsSealed) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Maybe<bool> result = object->IsJSReceiver()
+                           ? JSReceiver::TestIntegrityLevel(
+                                 Handle<JSReceiver>::cast(object), SEALED)
+                           : Just(true);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 19.1.2.14 Object.keys ( O )
+BUILTIN(ObjectKeys) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+
+  Handle<FixedArray> keys;
+  int enum_length = receiver->map()->EnumLength();
+  if (enum_length != kInvalidEnumCacheSentinel &&
+      JSObject::cast(*receiver)->elements() ==
+          isolate->heap()->empty_fixed_array()) {
+    DCHECK(receiver->IsJSObject());
+    DCHECK(!JSObject::cast(*receiver)->HasNamedInterceptor());
+    DCHECK(!JSObject::cast(*receiver)->IsAccessCheckNeeded());
+    DCHECK(!receiver->map()->has_hidden_prototype());
+    DCHECK(JSObject::cast(*receiver)->HasFastProperties());
+    if (enum_length == 0) {
+      keys = isolate->factory()->empty_fixed_array();
+    } else {
+      Handle<FixedArray> cache(
+          receiver->map()->instance_descriptors()->GetEnumCache());
+      keys = isolate->factory()->CopyFixedArrayUpTo(cache, enum_length);
+    }
+  } else {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, keys,
+        KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
+                                ENUMERABLE_STRINGS,
+                                GetKeysConversion::kConvertToString));
+  }
+  return *isolate->factory()->NewJSArrayWithElements(keys, FAST_ELEMENTS);
+}
+
+BUILTIN(ObjectValues) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  Handle<FixedArray> values;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
+  return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+BUILTIN(ObjectEntries) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  Handle<FixedArray> entries;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, entries,
+      JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
+  return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+BUILTIN(ObjectGetOwnPropertyDescriptors) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, keys, KeyAccumulator::GetKeys(
+                         receiver, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+                         GetKeysConversion::kConvertToString));
+
+  Handle<JSObject> descriptors =
+      isolate->factory()->NewJSObject(isolate->object_function());
+
+  for (int i = 0; i < keys->length(); ++i) {
+    Handle<Name> key = Handle<Name>::cast(FixedArray::get(*keys, i, isolate));
+    PropertyDescriptor descriptor;
+    Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+        isolate, receiver, key, &descriptor);
+    MAYBE_RETURN(did_get_descriptor, isolate->heap()->exception());
+
+    if (!did_get_descriptor.FromJust()) continue;
+    Handle<Object> from_descriptor = descriptor.ToObject(isolate);
+
+    LookupIterator it = LookupIterator::PropertyOrElement(
+        isolate, descriptors, key, descriptors, LookupIterator::OWN);
+    Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
+                                                         Object::DONT_THROW);
+    CHECK(success.FromJust());
+  }
+
+  return *descriptors;
+}
+
+// ES6 section 19.1.2.15 Object.preventExtensions ( O )
+BUILTIN(ObjectPreventExtensions) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  if (object->IsJSReceiver()) {
+    MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
+                                               Object::THROW_ON_ERROR),
+                 isolate->heap()->exception());
+  }
+  return *object;
+}
+
+// ES6 section 19.1.2.17 Object.seal ( O )
+BUILTIN(ObjectSeal) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  if (object->IsJSReceiver()) {
+    MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+                                               SEALED, Object::THROW_ON_ERROR),
+                 isolate->heap()->exception());
+  }
+  return *object;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-proxy.cc b/src/builtins/builtins-proxy.cc
new file mode 100644
index 0000000..05ba304
--- /dev/null
+++ b/src/builtins/builtins-proxy.cc
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
+BUILTIN(ProxyConstructor) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError(MessageTemplate::kConstructorNotFunction,
+                   isolate->factory()->NewStringFromAsciiChecked("Proxy")));
+}
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
+BUILTIN(ProxyConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  DCHECK(isolate->proxy_function()->IsConstructor());
+  Handle<Object> target = args.atOrUndefined(isolate, 1);
+  Handle<Object> handler = args.atOrUndefined(isolate, 2);
+  RETURN_RESULT_OR_FAILURE(isolate, JSProxy::New(isolate, target, handler));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-reflect.cc b/src/builtins/builtins-reflect.cc
new file mode 100644
index 0000000..b4d16c4
--- /dev/null
+++ b/src/builtins/builtins-reflect.cc
@@ -0,0 +1,274 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/property-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 26.1 The Reflect Object
+
+// ES6 section 26.1.3 Reflect.defineProperty
+BUILTIN(ReflectDefineProperty) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(4, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> attributes = args.at<Object>(3);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.defineProperty")));
+  }
+
+  Handle<Name> name;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToName(isolate, key));
+
+  PropertyDescriptor desc;
+  if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
+    return isolate->heap()->exception();
+  }
+
+  Maybe<bool> result =
+      JSReceiver::DefineOwnProperty(isolate, Handle<JSReceiver>::cast(target),
+                                    name, &desc, Object::DONT_THROW);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.4 Reflect.deleteProperty
+BUILTIN(ReflectDeleteProperty) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> key = args.at<Object>(2);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.deleteProperty")));
+  }
+
+  Handle<Name> name;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToName(isolate, key));
+
+  Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
+      Handle<JSReceiver>::cast(target), name, SLOPPY);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.6 Reflect.get
+BUILTIN(ReflectGet) {
+  HandleScope scope(isolate);
+  Handle<Object> target = args.atOrUndefined(isolate, 1);
+  Handle<Object> key = args.atOrUndefined(isolate, 2);
+  Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.get")));
+  }
+
+  Handle<Name> name;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToName(isolate, key));
+
+  RETURN_RESULT_OR_FAILURE(
+      isolate, Object::GetPropertyOrElement(receiver, name,
+                                            Handle<JSReceiver>::cast(target)));
+}
+
+// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
+BUILTIN(ReflectGetOwnPropertyDescriptor) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> key = args.at<Object>(2);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.getOwnPropertyDescriptor")));
+  }
+
+  Handle<Name> name;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToName(isolate, key));
+
+  PropertyDescriptor desc;
+  Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+      isolate, Handle<JSReceiver>::cast(target), name, &desc);
+  MAYBE_RETURN(found, isolate->heap()->exception());
+  if (!found.FromJust()) return isolate->heap()->undefined_value();
+  return *desc.ToObject(isolate);
+}
+
+// ES6 section 26.1.8 Reflect.getPrototypeOf
+BUILTIN(ReflectGetPrototypeOf) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> target = args.at<Object>(1);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.getPrototypeOf")));
+  }
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
+  RETURN_RESULT_OR_FAILURE(isolate,
+                           JSReceiver::GetPrototype(isolate, receiver));
+}
+
+// ES6 section 26.1.9 Reflect.has
+BUILTIN(ReflectHas) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> key = args.at<Object>(2);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.has")));
+  }
+
+  Handle<Name> name;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToName(isolate, key));
+
+  Maybe<bool> result =
+      JSReceiver::HasProperty(Handle<JSReceiver>::cast(target), name);
+  return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
+                         : isolate->heap()->exception();
+}
+
+// ES6 section 26.1.10 Reflect.isExtensible
+BUILTIN(ReflectIsExtensible) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> target = args.at<Object>(1);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.isExtensible")));
+  }
+
+  Maybe<bool> result =
+      JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.11 Reflect.ownKeys
+BUILTIN(ReflectOwnKeys) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> target = args.at<Object>(1);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.ownKeys")));
+  }
+
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, keys,
+      KeyAccumulator::GetKeys(Handle<JSReceiver>::cast(target),
+                              KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+                              GetKeysConversion::kConvertToString));
+  return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+// ES6 section 26.1.12 Reflect.preventExtensions
+BUILTIN(ReflectPreventExtensions) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> target = args.at<Object>(1);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.preventExtensions")));
+  }
+
+  Maybe<bool> result = JSReceiver::PreventExtensions(
+      Handle<JSReceiver>::cast(target), Object::DONT_THROW);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.13 Reflect.set
+BUILTIN(ReflectSet) {
+  HandleScope scope(isolate);
+  Handle<Object> target = args.atOrUndefined(isolate, 1);
+  Handle<Object> key = args.atOrUndefined(isolate, 2);
+  Handle<Object> value = args.atOrUndefined(isolate, 3);
+  Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.set")));
+  }
+
+  Handle<Name> name;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToName(isolate, key));
+
+  LookupIterator it = LookupIterator::PropertyOrElement(
+      isolate, receiver, name, Handle<JSReceiver>::cast(target));
+  Maybe<bool> result = Object::SetSuperProperty(
+      &it, value, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.14 Reflect.setPrototypeOf
+BUILTIN(ReflectSetPrototypeOf) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> proto = args.at<Object>(2);
+
+  if (!target->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Reflect.setPrototypeOf")));
+  }
+
+  if (!proto->IsJSReceiver() && !proto->IsNull(isolate)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
+  }
+
+  Maybe<bool> result = JSReceiver::SetPrototype(
+      Handle<JSReceiver>::cast(target), proto, true, Object::DONT_THROW);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc
new file mode 100644
index 0000000..23d4f43
--- /dev/null
+++ b/src/builtins/builtins-sharedarraybuffer.cc
@@ -0,0 +1,266 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+// ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength
+BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSArrayBuffer, array_buffer,
+                 "get SharedArrayBuffer.prototype.byteLength");
+  if (!array_buffer->is_shared()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "get SharedArrayBuffer.prototype.byteLength"),
+                              args.receiver()));
+  }
+  return array_buffer->byte_length();
+}
+
+namespace {
+
+void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
+                              compiler::Node* context,
+                              compiler::Node** out_instance_type,
+                              compiler::Node** out_backing_store) {
+  using namespace compiler;
+  CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
+      not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
+      not_float_or_clamped(a), invalid(a);
+
+  // Fail if it is not a heap object.
+  a->Branch(a->WordIsSmi(tagged), &is_smi, &not_smi);
+  a->Bind(&is_smi);
+  a->Goto(&invalid);
+
+  // Fail if the array's instance type is not JSTypedArray.
+  a->Bind(&not_smi);
+  a->Branch(a->WordEqual(a->LoadInstanceType(tagged),
+                         a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+            &is_typed_array, &not_typed_array);
+  a->Bind(&not_typed_array);
+  a->Goto(&invalid);
+
+  // Fail if the array's JSArrayBuffer is not shared.
+  a->Bind(&is_typed_array);
+  Node* array_buffer = a->LoadObjectField(tagged, JSTypedArray::kBufferOffset);
+  Node* is_buffer_shared = a->BitFieldDecode<JSArrayBuffer::IsShared>(
+      a->LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldSlot));
+  a->Branch(is_buffer_shared, &is_shared, &not_shared);
+  a->Bind(&not_shared);
+  a->Goto(&invalid);
+
+  // Fail if the array's element type is float32, float64 or clamped.
+  a->Bind(&is_shared);
+  Node* elements_instance_type = a->LoadInstanceType(
+      a->LoadObjectField(tagged, JSObject::kElementsOffset));
+  STATIC_ASSERT(FIXED_INT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  a->Branch(a->Int32LessThan(elements_instance_type,
+                             a->Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
+            &not_float_or_clamped, &is_float_or_clamped);
+  a->Bind(&is_float_or_clamped);
+  a->Goto(&invalid);
+
+  a->Bind(&invalid);
+  a->CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
+                 tagged);
+  a->Return(a->UndefinedConstant());
+
+  a->Bind(&not_float_or_clamped);
+  *out_instance_type = elements_instance_type;
+
+  Node* backing_store =
+      a->LoadObjectField(array_buffer, JSArrayBuffer::kBackingStoreOffset);
+  Node* byte_offset = a->ChangeUint32ToWord(a->TruncateTaggedToWord32(
+      context,
+      a->LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset)));
+  *out_backing_store = a->IntPtrAdd(backing_store, byte_offset);
+}
+
+// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
+compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
+                                                 compiler::Node* tagged,
+                                                 compiler::Node* context) {
+  using namespace compiler;
+  CodeStubAssembler::Variable var_result(a, MachineRepresentation::kWord32);
+
+  Callable to_number = CodeFactory::ToNumber(a->isolate());
+  Node* number_index = a->CallStub(to_number, context, tagged);
+  CodeStubAssembler::Label done(a, &var_result);
+
+  CodeStubAssembler::Label if_numberissmi(a), if_numberisnotsmi(a);
+  a->Branch(a->WordIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
+
+  a->Bind(&if_numberissmi);
+  {
+    var_result.Bind(a->SmiToWord32(number_index));
+    a->Goto(&done);
+  }
+
+  a->Bind(&if_numberisnotsmi);
+  {
+    Node* number_index_value = a->LoadHeapNumberValue(number_index);
+    Node* access_index = a->TruncateFloat64ToWord32(number_index_value);
+    Node* test_index = a->ChangeInt32ToFloat64(access_index);
+
+    CodeStubAssembler::Label if_indexesareequal(a), if_indexesarenotequal(a);
+    a->Branch(a->Float64Equal(number_index_value, test_index),
+              &if_indexesareequal, &if_indexesarenotequal);
+
+    a->Bind(&if_indexesareequal);
+    {
+      var_result.Bind(access_index);
+      a->Goto(&done);
+    }
+
+    a->Bind(&if_indexesarenotequal);
+    a->Return(
+        a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+  }
+
+  a->Bind(&done);
+  return var_result.value();
+}
+
+void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
+                         compiler::Node* array_length_word,
+                         compiler::Node* context) {
+  using namespace compiler;
+  // Check if the index is in bounds. If not, throw RangeError.
+  CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
+  a->Branch(
+      a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
+                a->Int32GreaterThanOrEqual(index_word, array_length_word)),
+      &if_notinbounds, &if_inbounds);
+  a->Bind(&if_notinbounds);
+  a->Return(
+      a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+  a->Bind(&if_inbounds);
+}
+
+}  // anonymous namespace
+
+void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
+  using namespace compiler;
+  Node* array = a->Parameter(1);
+  Node* index = a->Parameter(2);
+  Node* context = a->Parameter(3 + 2);
+
+  Node* instance_type;
+  Node* backing_store;
+  ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+
+  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
+  Node* array_length_word32 = a->TruncateTaggedToWord32(
+      context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
+  ValidateAtomicIndex(a, index_word32, array_length_word32, context);
+  Node* index_word = a->ChangeUint32ToWord(index_word32);
+
+  CodeStubAssembler::Label i8(a), u8(a), i16(a), u16(a), i32(a), u32(a),
+      other(a);
+  int32_t case_values[] = {
+      FIXED_INT8_ARRAY_TYPE,   FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
+      FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+  };
+  CodeStubAssembler::Label* case_labels[] = {
+      &i8, &u8, &i16, &u16, &i32, &u32,
+  };
+  a->Switch(instance_type, &other, case_values, case_labels,
+            arraysize(case_labels));
+
+  a->Bind(&i8);
+  a->Return(
+      a->SmiTag(a->AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+
+  a->Bind(&u8);
+  a->Return(a->SmiTag(
+      a->AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
+
+  a->Bind(&i16);
+  a->Return(a->SmiTag(a->AtomicLoad(MachineType::Int16(), backing_store,
+                                    a->WordShl(index_word, 1))));
+
+  a->Bind(&u16);
+  a->Return(a->SmiTag(a->AtomicLoad(MachineType::Uint16(), backing_store,
+                                    a->WordShl(index_word, 1))));
+
+  a->Bind(&i32);
+  a->Return(a->ChangeInt32ToTagged(a->AtomicLoad(
+      MachineType::Int32(), backing_store, a->WordShl(index_word, 2))));
+
+  a->Bind(&u32);
+  a->Return(a->ChangeUint32ToTagged(a->AtomicLoad(
+      MachineType::Uint32(), backing_store, a->WordShl(index_word, 2))));
+
+  // This shouldn't happen, we've already validated the type.
+  a->Bind(&other);
+  a->Return(a->Int32Constant(0));
+}
+
+void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
+  using namespace compiler;
+  Node* array = a->Parameter(1);
+  Node* index = a->Parameter(2);
+  Node* value = a->Parameter(3);
+  Node* context = a->Parameter(4 + 2);
+
+  Node* instance_type;
+  Node* backing_store;
+  ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+
+  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
+  Node* array_length_word32 = a->TruncateTaggedToWord32(
+      context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
+  ValidateAtomicIndex(a, index_word32, array_length_word32, context);
+  Node* index_word = a->ChangeUint32ToWord(index_word32);
+
+  Callable to_integer = CodeFactory::ToInteger(a->isolate());
+  Node* value_integer = a->CallStub(to_integer, context, value);
+  Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
+
+  CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
+  int32_t case_values[] = {
+      FIXED_INT8_ARRAY_TYPE,   FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
+      FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+  };
+  CodeStubAssembler::Label* case_labels[] = {
+      &u8, &u8, &u16, &u16, &u32, &u32,
+  };
+  a->Switch(instance_type, &other, case_values, case_labels,
+            arraysize(case_labels));
+
+  a->Bind(&u8);
+  a->AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
+                 value_word32);
+  a->Return(value_integer);
+
+  a->Bind(&u16);
+  a->SmiTag(a->AtomicStore(MachineRepresentation::kWord16, backing_store,
+                           a->WordShl(index_word, 1), value_word32));
+  a->Return(value_integer);
+
+  a->Bind(&u32);
+  a->AtomicStore(MachineRepresentation::kWord32, backing_store,
+                 a->WordShl(index_word, 2), value_word32);
+  a->Return(value_integer);
+
+  // This shouldn't happen, we've already validated the type.
+  a->Bind(&other);
+  a->Return(a->Int32Constant(0));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-string.cc b/src/builtins/builtins-string.cc
new file mode 100644
index 0000000..d38f6b0
--- /dev/null
+++ b/src/builtins/builtins-string.cc
@@ -0,0 +1,526 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 String Objects
+
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* code = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  // Check if we have exactly one argument (plus the implicit receiver), i.e.
+  // if the parent frame is not an arguments adaptor frame.
+  Label if_oneargument(assembler), if_notoneargument(assembler);
+  Node* parent_frame_pointer = assembler->LoadParentFramePointer();
+  Node* parent_frame_type =
+      assembler->Load(MachineType::Pointer(), parent_frame_pointer,
+                      assembler->IntPtrConstant(
+                          CommonFrameConstants::kContextOrFrameTypeOffset));
+  assembler->Branch(
+      assembler->WordEqual(
+          parent_frame_type,
+          assembler->SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
+      &if_notoneargument, &if_oneargument);
+
+  assembler->Bind(&if_oneargument);
+  {
+    // Single argument case, perform fast single character string cache lookup
+    // for one-byte code units, or fall back to creating a single character
+    // string on the fly otherwise.
+    Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+    Node* code16 = assembler->Word32And(
+        code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+    Node* result = assembler->StringFromCharCode(code16);
+    assembler->Return(result);
+  }
+
+  assembler->Bind(&if_notoneargument);
+  {
+    // Determine the resulting string length.
+    Node* length = assembler->LoadAndUntagSmi(
+        parent_frame_pointer, ArgumentsAdaptorFrameConstants::kLengthOffset);
+
+    // Assume that the resulting string contains only one-byte characters.
+    Node* result = assembler->AllocateSeqOneByteString(context, length);
+
+    // Truncate all input parameters and append them to the resulting string.
+    Variable var_offset(assembler, MachineType::PointerRepresentation());
+    Label loop(assembler, &var_offset), done_loop(assembler);
+    var_offset.Bind(assembler->IntPtrConstant(0));
+    assembler->Goto(&loop);
+    assembler->Bind(&loop);
+    {
+      // Load the current {offset}.
+      Node* offset = var_offset.value();
+
+      // Check if we're done with the string.
+      assembler->GotoIf(assembler->WordEqual(offset, length), &done_loop);
+
+      // Load the next code point and truncate it to a 16-bit value.
+      Node* code = assembler->Load(
+          MachineType::AnyTagged(), parent_frame_pointer,
+          assembler->IntPtrAdd(
+              assembler->WordShl(assembler->IntPtrSub(length, offset),
+                                 assembler->IntPtrConstant(kPointerSizeLog2)),
+              assembler->IntPtrConstant(
+                  CommonFrameConstants::kFixedFrameSizeAboveFp -
+                  kPointerSize)));
+      Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+      Node* code16 = assembler->Word32And(
+          code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+
+      // Check if {code16} fits into a one-byte string.
+      Label if_codeisonebyte(assembler), if_codeistwobyte(assembler);
+      assembler->Branch(
+          assembler->Int32LessThanOrEqual(
+              code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
+          &if_codeisonebyte, &if_codeistwobyte);
+
+      assembler->Bind(&if_codeisonebyte);
+      {
+        // The {code16} fits into the SeqOneByteString {result}.
+        assembler->StoreNoWriteBarrier(
+            MachineRepresentation::kWord8, result,
+            assembler->IntPtrAdd(
+                assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                          kHeapObjectTag),
+                offset),
+            code16);
+        var_offset.Bind(
+            assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+        assembler->Goto(&loop);
+      }
+
+      assembler->Bind(&if_codeistwobyte);
+      {
+        // Allocate a SeqTwoByteString to hold the resulting string.
+        Node* cresult = assembler->AllocateSeqTwoByteString(context, length);
+
+        // Copy all characters that were previously written to the
+        // SeqOneByteString in {result} over to the new {cresult}.
+        Variable var_coffset(assembler, MachineType::PointerRepresentation());
+        Label cloop(assembler, &var_coffset), done_cloop(assembler);
+        var_coffset.Bind(assembler->IntPtrConstant(0));
+        assembler->Goto(&cloop);
+        assembler->Bind(&cloop);
+        {
+          Node* coffset = var_coffset.value();
+          assembler->GotoIf(assembler->WordEqual(coffset, offset), &done_cloop);
+          Node* ccode = assembler->Load(
+              MachineType::Uint8(), result,
+              assembler->IntPtrAdd(
+                  assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                            kHeapObjectTag),
+                  coffset));
+          assembler->StoreNoWriteBarrier(
+              MachineRepresentation::kWord16, cresult,
+              assembler->IntPtrAdd(
+                  assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                            kHeapObjectTag),
+                  assembler->WordShl(coffset, 1)),
+              ccode);
+          var_coffset.Bind(
+              assembler->IntPtrAdd(coffset, assembler->IntPtrConstant(1)));
+          assembler->Goto(&cloop);
+        }
+
+        // Write the pending {code16} to {offset}.
+        assembler->Bind(&done_cloop);
+        assembler->StoreNoWriteBarrier(
+            MachineRepresentation::kWord16, cresult,
+            assembler->IntPtrAdd(
+                assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                          kHeapObjectTag),
+                assembler->WordShl(offset, 1)),
+            code16);
+
+        // Copy the remaining parameters to the SeqTwoByteString {cresult}.
+        Label floop(assembler, &var_offset), done_floop(assembler);
+        assembler->Goto(&floop);
+        assembler->Bind(&floop);
+        {
+          // Compute the next {offset}.
+          Node* offset = assembler->IntPtrAdd(var_offset.value(),
+                                              assembler->IntPtrConstant(1));
+
+          // Check if we're done with the string.
+          assembler->GotoIf(assembler->WordEqual(offset, length), &done_floop);
+
+          // Load the next code point and truncate it to a 16-bit value.
+          Node* code = assembler->Load(
+              MachineType::AnyTagged(), parent_frame_pointer,
+              assembler->IntPtrAdd(
+                  assembler->WordShl(
+                      assembler->IntPtrSub(length, offset),
+                      assembler->IntPtrConstant(kPointerSizeLog2)),
+                  assembler->IntPtrConstant(
+                      CommonFrameConstants::kFixedFrameSizeAboveFp -
+                      kPointerSize)));
+          Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+          Node* code16 = assembler->Word32And(
+              code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+
+          // Store the truncated {code} point at the next offset.
+          assembler->StoreNoWriteBarrier(
+              MachineRepresentation::kWord16, cresult,
+              assembler->IntPtrAdd(
+                  assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                            kHeapObjectTag),
+                  assembler->WordShl(offset, 1)),
+              code16);
+          var_offset.Bind(offset);
+          assembler->Goto(&floop);
+        }
+
+        // Return the SeqTwoByteString.
+        assembler->Bind(&done_floop);
+        assembler->Return(cresult);
+      }
+    }
+
+    assembler->Bind(&done_loop);
+    assembler->Return(result);
+  }
+}
+
+namespace {  // for String.fromCodePoint
+
+bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
+  if (!value->IsNumber() && !Object::ToNumber(value).ToHandle(&value)) {
+    return false;
+  }
+
+  if (Object::ToInteger(isolate, value).ToHandleChecked()->Number() !=
+      value->Number()) {
+    return false;
+  }
+
+  if (value->Number() < 0 || value->Number() > 0x10FFFF) {
+    return false;
+  }
+
+  return true;
+}
+
+uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
+  Handle<Object> value = args.at<Object>(1 + index);
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::ToNumber(value), -1);
+  if (!IsValidCodePoint(isolate, value)) {
+    isolate->Throw(*isolate->factory()->NewRangeError(
+        MessageTemplate::kInvalidCodePoint, value));
+    return -1;
+  }
+  return DoubleToUint32(value->Number());
+}
+
+}  // namespace
+
+// ES6 section 21.1.2.2 String.fromCodePoint ( ...codePoints )
+BUILTIN(StringFromCodePoint) {
+  HandleScope scope(isolate);
+  int const length = args.length() - 1;
+  if (length == 0) return isolate->heap()->empty_string();
+  DCHECK_LT(0, length);
+
+  // Optimistically assume that the resulting String contains only one byte
+  // characters.
+  List<uint8_t> one_byte_buffer(length);
+  uc32 code = 0;
+  int index;
+  for (index = 0; index < length; index++) {
+    code = NextCodePoint(isolate, args, index);
+    if (code < 0) {
+      return isolate->heap()->exception();
+    }
+    if (code > String::kMaxOneByteCharCode) {
+      break;
+    }
+    one_byte_buffer.Add(code);
+  }
+
+  if (index == length) {
+    RETURN_RESULT_OR_FAILURE(isolate, isolate->factory()->NewStringFromOneByte(
+                                          one_byte_buffer.ToConstVector()));
+  }
+
+  List<uc16> two_byte_buffer(length - index);
+
+  while (true) {
+    if (code <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      two_byte_buffer.Add(code);
+    } else {
+      two_byte_buffer.Add(unibrow::Utf16::LeadSurrogate(code));
+      two_byte_buffer.Add(unibrow::Utf16::TrailSurrogate(code));
+    }
+
+    if (++index == length) {
+      break;
+    }
+    code = NextCodePoint(isolate, args, index);
+    if (code < 0) {
+      return isolate->heap()->exception();
+    }
+  }
+
+  Handle<SeqTwoByteString> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      isolate->factory()->NewRawTwoByteString(one_byte_buffer.length() +
+                                              two_byte_buffer.length()));
+
+  CopyChars(result->GetChars(), one_byte_buffer.ToConstVector().start(),
+            one_byte_buffer.length());
+  CopyChars(result->GetChars() + one_byte_buffer.length(),
+            two_byte_buffer.ToConstVector().start(), two_byte_buffer.length());
+
+  return *result;
+}
+
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* position = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  // Check that {receiver} is coercible to Object and convert it to a String.
+  receiver =
+      assembler->ToThisString(context, receiver, "String.prototype.charAt");
+
+  // Convert the {position} to a Smi and check that it's in bounds of the
+  // {receiver}.
+  // TODO(bmeurer): Find an abstraction for this!
+  {
+    // Check if the {position} is already a Smi.
+    Variable var_position(assembler, MachineRepresentation::kTagged);
+    var_position.Bind(position);
+    Label if_positionissmi(assembler),
+        if_positionisnotsmi(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
+                      &if_positionisnotsmi);
+    assembler->Bind(&if_positionisnotsmi);
+    {
+      // Convert the {position} to an Integer via the ToIntegerStub.
+      Callable callable = CodeFactory::ToInteger(assembler->isolate());
+      Node* index = assembler->CallStub(callable, context, position);
+
+      // Check if the resulting {index} is now a Smi.
+      Label if_indexissmi(assembler, Label::kDeferred),
+          if_indexisnotsmi(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
+                        &if_indexisnotsmi);
+
+      assembler->Bind(&if_indexissmi);
+      {
+        var_position.Bind(index);
+        assembler->Goto(&if_positionissmi);
+      }
+
+      assembler->Bind(&if_indexisnotsmi);
+      {
+        // The ToIntegerStub canonicalizes everything in Smi range to Smi
+        // representation, so any HeapNumber returned is not in Smi range.
+        // The only exception here is -0.0, which we treat as 0.
+        Node* index_value = assembler->LoadHeapNumberValue(index);
+        Label if_indexiszero(assembler, Label::kDeferred),
+            if_indexisnotzero(assembler, Label::kDeferred);
+        assembler->Branch(assembler->Float64Equal(
+                              index_value, assembler->Float64Constant(0.0)),
+                          &if_indexiszero, &if_indexisnotzero);
+
+        assembler->Bind(&if_indexiszero);
+        {
+          var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
+          assembler->Goto(&if_positionissmi);
+        }
+
+        assembler->Bind(&if_indexisnotzero);
+        {
+          // The {index} is some other integral Number, that is definitely
+          // neither -0.0 nor in Smi range.
+          assembler->Return(assembler->EmptyStringConstant());
+        }
+      }
+    }
+    assembler->Bind(&if_positionissmi);
+    position = var_position.value();
+
+    // Determine the actual length of the {receiver} String.
+    Node* receiver_length =
+        assembler->LoadObjectField(receiver, String::kLengthOffset);
+
+    // Return "" if the Smi {position} is outside the bounds of the {receiver}.
+    Label if_positioninbounds(assembler),
+        if_positionnotinbounds(assembler, Label::kDeferred);
+    assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
+                      &if_positionnotinbounds, &if_positioninbounds);
+    assembler->Bind(&if_positionnotinbounds);
+    assembler->Return(assembler->EmptyStringConstant());
+    assembler->Bind(&if_positioninbounds);
+  }
+
+  // Load the character code at the {position} from the {receiver}.
+  Node* code = assembler->StringCharCodeAt(receiver, position);
+
+  // And return the single character string with only that {code}.
+  Node* result = assembler->StringFromCharCode(code);
+  assembler->Return(result);
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+void Builtins::Generate_StringPrototypeCharCodeAt(
+    CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* position = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  // Check that {receiver} is coercible to Object and convert it to a String.
+  receiver =
+      assembler->ToThisString(context, receiver, "String.prototype.charCodeAt");
+
+  // Convert the {position} to a Smi and check that it's in bounds of the
+  // {receiver}.
+  // TODO(bmeurer): Find an abstraction for this!
+  {
+    // Check if the {position} is already a Smi.
+    Variable var_position(assembler, MachineRepresentation::kTagged);
+    var_position.Bind(position);
+    Label if_positionissmi(assembler),
+        if_positionisnotsmi(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
+                      &if_positionisnotsmi);
+    assembler->Bind(&if_positionisnotsmi);
+    {
+      // Convert the {position} to an Integer via the ToIntegerStub.
+      Callable callable = CodeFactory::ToInteger(assembler->isolate());
+      Node* index = assembler->CallStub(callable, context, position);
+
+      // Check if the resulting {index} is now a Smi.
+      Label if_indexissmi(assembler, Label::kDeferred),
+          if_indexisnotsmi(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
+                        &if_indexisnotsmi);
+
+      assembler->Bind(&if_indexissmi);
+      {
+        var_position.Bind(index);
+        assembler->Goto(&if_positionissmi);
+      }
+
+      assembler->Bind(&if_indexisnotsmi);
+      {
+        // The ToIntegerStub canonicalizes everything in Smi range to Smi
+        // representation, so any HeapNumber returned is not in Smi range.
+        // The only exception here is -0.0, which we treat as 0.
+        Node* index_value = assembler->LoadHeapNumberValue(index);
+        Label if_indexiszero(assembler, Label::kDeferred),
+            if_indexisnotzero(assembler, Label::kDeferred);
+        assembler->Branch(assembler->Float64Equal(
+                              index_value, assembler->Float64Constant(0.0)),
+                          &if_indexiszero, &if_indexisnotzero);
+
+        assembler->Bind(&if_indexiszero);
+        {
+          var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
+          assembler->Goto(&if_positionissmi);
+        }
+
+        assembler->Bind(&if_indexisnotzero);
+        {
+          // The {index} is some other integral Number, that is definitely
+          // neither -0.0 nor in Smi range.
+          assembler->Return(assembler->NaNConstant());
+        }
+      }
+    }
+    assembler->Bind(&if_positionissmi);
+    position = var_position.value();
+
+    // Determine the actual length of the {receiver} String.
+    Node* receiver_length =
+        assembler->LoadObjectField(receiver, String::kLengthOffset);
+
+    // Return NaN if the Smi {position} is outside the bounds of the {receiver}.
+    Label if_positioninbounds(assembler),
+        if_positionnotinbounds(assembler, Label::kDeferred);
+    assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
+                      &if_positionnotinbounds, &if_positioninbounds);
+    assembler->Bind(&if_positionnotinbounds);
+    assembler->Return(assembler->NaNConstant());
+    assembler->Bind(&if_positioninbounds);
+  }
+
+  // Load the character at the {position} from the {receiver}.
+  Node* value = assembler->StringCharCodeAt(receiver, position);
+  Node* result = assembler->SmiFromWord32(value);
+  assembler->Return(result);
+}
+
+// ES6 section 21.1.3.25 String.prototype.toString ()
+void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* result = assembler->ToThisValue(
+      context, receiver, PrimitiveType::kString, "String.prototype.toString");
+  assembler->Return(result);
+}
+
+// ES6 section 21.1.3.27 String.prototype.trim ()
+BUILTIN(StringPrototypeTrim) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.trim");
+  return *String::Trim(string, String::kTrim);
+}
+
+// Non-standard WebKit extension
+BUILTIN(StringPrototypeTrimLeft) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.trimLeft");
+  return *String::Trim(string, String::kTrimLeft);
+}
+
+// Non-standard WebKit extension
+BUILTIN(StringPrototypeTrimRight) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.trimRight");
+  return *String::Trim(string, String::kTrimRight);
+}
+
+// ES6 section 21.1.3.28 String.prototype.valueOf ( )
+void Builtins::Generate_StringPrototypeValueOf(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* result = assembler->ToThisValue(
+      context, receiver, PrimitiveType::kString, "String.prototype.valueOf");
+  assembler->Return(result);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-symbol.cc b/src/builtins/builtins-symbol.cc
new file mode 100644
index 0000000..8dd8a1f
--- /dev/null
+++ b/src/builtins/builtins-symbol.cc
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.4 Symbol Objects
+
+// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
+BUILTIN(SymbolConstructor) {
+  HandleScope scope(isolate);
+  Handle<Symbol> result = isolate->factory()->NewSymbol();
+  Handle<Object> description = args.atOrUndefined(isolate, 1);
+  if (!description->IsUndefined(isolate)) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
+                                       Object::ToString(isolate, description));
+    result->set_name(*description);
+  }
+  return *result;
+}
+
+// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Construct]] case.
+BUILTIN(SymbolConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kNotConstructor,
+                            isolate->factory()->Symbol_string()));
+}
+
+// ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint )
+void Builtins::Generate_SymbolPrototypeToPrimitive(
+    CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(4);
+
+  Node* result =
+      assembler->ToThisValue(context, receiver, PrimitiveType::kSymbol,
+                             "Symbol.prototype [ @@toPrimitive ]");
+  assembler->Return(result);
+}
+
+// ES6 section 19.4.3.2 Symbol.prototype.toString ( )
+void Builtins::Generate_SymbolPrototypeToString(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* value = assembler->ToThisValue(
+      context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.toString");
+  Node* result =
+      assembler->CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+  assembler->Return(result);
+}
+
+// ES6 section 19.4.3.3 Symbol.prototype.valueOf ( )
+void Builtins::Generate_SymbolPrototypeValueOf(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  Node* result = assembler->ToThisValue(
+      context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.valueOf");
+  assembler->Return(result);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-typedarray.cc b/src/builtins/builtins-typedarray.cc
new file mode 100644
index 0000000..ede04f2
--- /dev/null
+++ b/src/builtins/builtins-typedarray.cc
@@ -0,0 +1,101 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 22.2 TypedArray Objects
+
+// ES6 section 22.2.3.1 get %TypedArray%.prototype.buffer
+BUILTIN(TypedArrayPrototypeBuffer) {
+  HandleScope scope(isolate);
+  CHECK_RECEIVER(JSTypedArray, typed_array, "get TypedArray.prototype.buffer");
+  return *typed_array->GetBuffer();
+}
+
+namespace {
+
+void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
+                                       const char* method_name,
+                                       int object_offset) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* context = assembler->Parameter(3);
+
+  // Check if the {receiver} is actually a JSTypedArray.
+  Label if_receiverisincompatible(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+  assembler->GotoUnless(
+      assembler->Word32Equal(receiver_instance_type,
+                             assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+      &if_receiverisincompatible);
+
+  // Check if the {receiver}'s JSArrayBuffer was neutered.
+  Node* receiver_buffer =
+      assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+  Node* receiver_buffer_bit_field = assembler->LoadObjectField(
+      receiver_buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
+  Label if_receiverisneutered(assembler, Label::kDeferred);
+  assembler->GotoUnless(
+      assembler->Word32Equal(
+          assembler->Word32And(
+              receiver_buffer_bit_field,
+              assembler->Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
+          assembler->Int32Constant(0)),
+      &if_receiverisneutered);
+  assembler->Return(assembler->LoadObjectField(receiver, object_offset));
+
+  assembler->Bind(&if_receiverisneutered);
+  {
+    // The {receiver}s buffer was neutered, default to zero.
+    assembler->Return(assembler->SmiConstant(0));
+  }
+
+  assembler->Bind(&if_receiverisincompatible);
+  {
+    // The {receiver} is not a valid JSGeneratorObject.
+    Node* result = assembler->CallRuntime(
+        Runtime::kThrowIncompatibleMethodReceiver, context,
+        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+            method_name, TENURED)),
+        receiver);
+    assembler->Return(result);  // Never reached.
+  }
+}
+
+}  // namespace
+
+// ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
+void Builtins::Generate_TypedArrayPrototypeByteLength(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayProtoypeGetter(assembler,
+                                    "get TypedArray.prototype.byteLength",
+                                    JSTypedArray::kByteLengthOffset);
+}
+
+// ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
+void Builtins::Generate_TypedArrayPrototypeByteOffset(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayProtoypeGetter(assembler,
+                                    "get TypedArray.prototype.byteOffset",
+                                    JSTypedArray::kByteOffsetOffset);
+}
+
+// ES6 section 22.2.3.18 get %TypedArray%.prototype.length
+void Builtins::Generate_TypedArrayPrototypeLength(
+    CodeStubAssembler* assembler) {
+  Generate_TypedArrayProtoypeGetter(assembler,
+                                    "get TypedArray.prototype.length",
+                                    JSTypedArray::kLengthOffset);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-utils.h b/src/builtins/builtins-utils.h
new file mode 100644
index 0000000..90b58c7
--- /dev/null
+++ b/src/builtins/builtins-utils.h
@@ -0,0 +1,137 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_UTILS_H_
+#define V8_BUILTINS_BUILTINS_UTILS_H_
+
+#include "src/arguments.h"
+#include "src/base/logging.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// Arguments object passed to C++ builtins.
+class BuiltinArguments : public Arguments {
+ public:
+  BuiltinArguments(int length, Object** arguments)
+      : Arguments(length, arguments) {
+    // Check we have at least the receiver.
+    DCHECK_LE(1, this->length());
+  }
+
+  Object*& operator[](int index) {
+    DCHECK_LT(index, length());
+    return Arguments::operator[](index);
+  }
+
+  template <class S>
+  Handle<S> at(int index) {
+    DCHECK_LT(index, length());
+    return Arguments::at<S>(index);
+  }
+
+  Handle<Object> atOrUndefined(Isolate* isolate, int index) {
+    if (index >= length()) {
+      return isolate->factory()->undefined_value();
+    }
+    return at<Object>(index);
+  }
+
+  Handle<Object> receiver() { return Arguments::at<Object>(0); }
+
+  static const int kNewTargetOffset = 0;
+  static const int kTargetOffset = 1;
+  static const int kArgcOffset = 2;
+  static const int kNumExtraArgs = 3;
+  static const int kNumExtraArgsWithReceiver = 4;
+
+  template <class S>
+  Handle<S> target() {
+    return Arguments::at<S>(Arguments::length() - 1 - kTargetOffset);
+  }
+  Handle<HeapObject> new_target() {
+    return Arguments::at<HeapObject>(Arguments::length() - 1 -
+                                     kNewTargetOffset);
+  }
+
+  // Gets the total number of arguments including the receiver (but
+  // excluding extra arguments).
+  int length() const { return Arguments::length() - kNumExtraArgs; }
+};
+
+// ----------------------------------------------------------------------------
+// Support macro for defining builtins in C++.
+// ----------------------------------------------------------------------------
+//
+// A builtin function is defined by writing:
+//
+//   BUILTIN(name) {
+//     ...
+//   }
+//
+// In the body of the builtin function the arguments can be accessed
+// through the BuiltinArguments object args.
+// TODO(cbruni): add global flag to check whether any tracing events have been
+// enabled.
+// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
+#define BUILTIN(name)                                                        \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,  \
+                                                     Isolate* isolate);      \
+                                                                             \
+  V8_NOINLINE static Object* Builtin_Impl_Stats_##name(                      \
+      int args_length, Object** args_object, Isolate* isolate) {             \
+    BuiltinArguments args(args_length, args_object);                         \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
+    TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(                           \
+        isolate, &tracing::TraceEventStatsTable::Builtin_##name);            \
+    return Builtin_Impl_##name(args, isolate);                               \
+  }                                                                          \
+                                                                             \
+  MUST_USE_RESULT Object* Builtin_##name(                                    \
+      int args_length, Object** args_object, Isolate* isolate) {             \
+    CHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
+    if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||      \
+                    FLAG_runtime_call_stats)) {                              \
+      return Builtin_Impl_Stats_##name(args_length, args_object, isolate);   \
+    }                                                                        \
+    BuiltinArguments args(args_length, args_object);                         \
+    return Builtin_Impl_##name(args, isolate);                               \
+  }                                                                          \
+                                                                             \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args,  \
+                                                     Isolate* isolate)
+
+// ----------------------------------------------------------------------------
+
+#define CHECK_RECEIVER(Type, name, method)                                  \
+  if (!args.receiver()->Is##Type()) {                                       \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                         \
+        isolate,                                                            \
+        NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,          \
+                     isolate->factory()->NewStringFromAsciiChecked(method), \
+                     args.receiver()));                                     \
+  }                                                                         \
+  Handle<Type> name = Handle<Type>::cast(args.receiver())
+
+// Throws a TypeError for {method} if the receiver is not coercible to Object,
+// or converts the receiver to a String otherwise and assigns it to a new var
+// with the given {name}.
+#define TO_THIS_STRING(name, method)                                          \
+  if (args.receiver()->IsNull(isolate) ||                                     \
+      args.receiver()->IsUndefined(isolate)) {                                \
+    THROW_NEW_ERROR_RETURN_FAILURE(                                           \
+        isolate,                                                              \
+        NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,               \
+                     isolate->factory()->NewStringFromAsciiChecked(method))); \
+  }                                                                           \
+  Handle<String> name;                                                        \
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                                         \
+      isolate, name, Object::ToString(isolate, args.receiver()))
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_BUILTINS_BUILTINS_UTILS_H_
diff --git a/src/builtins/builtins.cc b/src/builtins/builtins.cc
new file mode 100644
index 0000000..d5a0e17
--- /dev/null
+++ b/src/builtins/builtins.cc
@@ -0,0 +1,295 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/code-events.h"
+#include "src/code-stub-assembler.h"
+#include "src/ic/ic-state.h"
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations for C++ builtins.
+#define FORWARD_DECLARE(Name) \
+  Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
+BUILTIN_LIST_C(FORWARD_DECLARE)
+
+Builtins::Builtins() : initialized_(false) {
+  memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
+}
+
+Builtins::~Builtins() {}
+
+namespace {
+void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
+                                const char* name) {
+  PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
+                                   AbstractCode::cast(code), name));
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_print_builtin_code) {
+    CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+    OFStream os(trace_scope.file());
+    os << "Builtin: " << name << "\n";
+    code->Disassemble(name, os);
+    os << "\n";
+  }
+#endif
+}
+
+typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
+typedef void (*CodeAssemblerGenerator)(CodeStubAssembler*);
+
+Code* BuildWithMacroAssembler(Isolate* isolate,
+                              MacroAssemblerGenerator generator,
+                              Code::Flags flags, const char* s_name) {
+  HandleScope scope(isolate);
+  const size_t buffer_size = 32 * KB;
+  byte buffer[buffer_size];  // NOLINT(runtime/arrays)
+  MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+  DCHECK(!masm.has_frame());
+  generator(&masm);
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  Handle<Code> code =
+      isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+  PostBuildProfileAndTracing(isolate, *code, s_name);
+  return *code;
+}
+
+Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
+                   Builtins::ExitFrameType exit_frame_type, Code::Flags flags,
+                   const char* name) {
+  HandleScope scope(isolate);
+  const size_t buffer_size = 32 * KB;
+  byte buffer[buffer_size];  // NOLINT(runtime/arrays)
+  MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+  DCHECK(!masm.has_frame());
+  Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  Handle<Code> code =
+      isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+  PostBuildProfileAndTracing(isolate, *code, name);
+  return *code;
+}
+
+// Builder for builtins implemented in TurboFan with JS linkage.
+Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
+                                   CodeAssemblerGenerator generator, int argc,
+                                   Code::Flags flags, const char* name) {
+  HandleScope scope(isolate);
+  Zone zone(isolate->allocator());
+  CodeStubAssembler assembler(isolate, &zone, argc, flags, name);
+  generator(&assembler);
+  Handle<Code> code = assembler.GenerateCode();
+  PostBuildProfileAndTracing(isolate, *code, name);
+  return *code;
+}
+
+// Builder for builtins implemented in TurboFan with CallStub linkage.
+Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
+                                   CodeAssemblerGenerator generator,
+                                   CallDescriptors::Key interface_descriptor,
+                                   Code::Flags flags, const char* name) {
+  HandleScope scope(isolate);
+  Zone zone(isolate->allocator());
+  // The interface descriptor with given key must be initialized at this point
+  // and this construction just queries the details from the descriptors table.
+  CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
+  // Ensure descriptor is already initialized.
+  DCHECK_LE(0, descriptor.GetRegisterParameterCount());
+  CodeStubAssembler assembler(isolate, &zone, descriptor, flags, name);
+  generator(&assembler);
+  Handle<Code> code = assembler.GenerateCode();
+  PostBuildProfileAndTracing(isolate, *code, name);
+  return *code;
+}
+}  // anonymous namespace
+
+void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
+  DCHECK(!initialized_);
+
+  // Create a scope for the handles in the builtins.
+  HandleScope scope(isolate);
+
+  if (create_heap_objects) {
+    int index = 0;
+    const Code::Flags kBuiltinFlags = Code::ComputeFlags(Code::BUILTIN);
+    Code* code;
+#define BUILD_CPP(Name)                                                     \
+  code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), BUILTIN_EXIT, \
+                      kBuiltinFlags, #Name);                                \
+  builtins_[index++] = code;
+#define BUILD_API(Name)                                             \
+  code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), EXIT, \
+                      kBuiltinFlags, #Name);                        \
+  builtins_[index++] = code;
+#define BUILD_TFJ(Name, Argc)                                          \
+  code = BuildWithCodeStubAssemblerJS(isolate, &Generate_##Name, Argc, \
+                                      kBuiltinFlags, #Name);           \
+  builtins_[index++] = code;
+#define BUILD_TFS(Name, Kind, Extra, InterfaceDescriptor)              \
+  { InterfaceDescriptor##Descriptor descriptor(isolate); }             \
+  code = BuildWithCodeStubAssemblerCS(                                 \
+      isolate, &Generate_##Name, CallDescriptors::InterfaceDescriptor, \
+      Code::ComputeFlags(Code::Kind, Extra), #Name);                   \
+  builtins_[index++] = code;
+#define BUILD_ASM(Name)                                                        \
+  code =                                                                       \
+      BuildWithMacroAssembler(isolate, Generate_##Name, kBuiltinFlags, #Name); \
+  builtins_[index++] = code;
+#define BUILD_ASH(Name, Kind, Extra)                                           \
+  code = BuildWithMacroAssembler(                                              \
+      isolate, Generate_##Name, Code::ComputeFlags(Code::Kind, Extra), #Name); \
+  builtins_[index++] = code;
+
+    BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFS, BUILD_ASM,
+                 BUILD_ASH, BUILD_ASM);
+
+#undef BUILD_CPP
+#undef BUILD_API
+#undef BUILD_TFJ
+#undef BUILD_TFS
+#undef BUILD_ASM
+#undef BUILD_ASH
+    CHECK_EQ(builtin_count, index);
+    for (int i = 0; i < builtin_count; i++) {
+      Code::cast(builtins_[i])->set_builtin_index(i);
+    }
+  }
+
+  // Mark as initialized.
+  initialized_ = true;
+}
+
+void Builtins::TearDown() { initialized_ = false; }
+
+void Builtins::IterateBuiltins(ObjectVisitor* v) {
+  v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
+}
+
+const char* Builtins::Lookup(byte* pc) {
+  // may be called during initialization (disassembler!)
+  if (initialized_) {
+    for (int i = 0; i < builtin_count; i++) {
+      Code* entry = Code::cast(builtins_[i]);
+      if (entry->contains(pc)) return name(i);
+    }
+  }
+  return NULL;
+}
+
+// static
+const char* Builtins::name(int index) {
+  switch (index) {
+#define CASE(Name, ...) \
+  case k##Name:         \
+    return #Name;
+    BUILTIN_LIST_ALL(CASE)
+#undef CASE
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return "";
+}
+
+// static
+Address Builtins::CppEntryOf(int index) {
+  DCHECK(0 <= index && index < builtin_count);
+  switch (index) {
+#define CASE(Name, ...) \
+  case k##Name:         \
+    return FUNCTION_ADDR(Builtin_##Name);
+    BUILTIN_LIST_C(CASE)
+#undef CASE
+    default:
+      return nullptr;
+  }
+  UNREACHABLE();
+}
+
+// static
+bool Builtins::IsCpp(int index) {
+  DCHECK(0 <= index && index < builtin_count);
+  switch (index) {
+#define CASE(Name, ...) \
+  case k##Name:         \
+    return true;
+#define BUILTIN_LIST_CPP(V)                                       \
+  BUILTIN_LIST(V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+               IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+    BUILTIN_LIST_CPP(CASE)
+#undef BUILTIN_LIST_CPP
+#undef CASE
+    default:
+      return false;
+  }
+  UNREACHABLE();
+}
+
+// static
+bool Builtins::IsApi(int index) {
+  DCHECK(0 <= index && index < builtin_count);
+  switch (index) {
+#define CASE(Name, ...) \
+  case k##Name:         \
+    return true;
+#define BUILTIN_LIST_API(V)                                       \
+  BUILTIN_LIST(IGNORE_BUILTIN, V, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+               IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+    BUILTIN_LIST_API(CASE);
+#undef BUILTIN_LIST_API
+#undef CASE
+    default:
+      return false;
+  }
+  UNREACHABLE();
+}
+
+// static
+bool Builtins::HasCppImplementation(int index) {
+  DCHECK(0 <= index && index < builtin_count);
+  switch (index) {
+#define CASE(Name, ...) \
+  case k##Name:         \
+    return true;
+    BUILTIN_LIST_C(CASE)
+#undef CASE
+    default:
+      return false;
+  }
+  UNREACHABLE();
+}
+
+#define DEFINE_BUILTIN_ACCESSOR(Name, ...)                                    \
+  Handle<Code> Builtins::Name() {                                             \
+    Code** code_address = reinterpret_cast<Code**>(builtin_address(k##Name)); \
+    return Handle<Code>(code_address);                                        \
+  }
+BUILTIN_LIST_ALL(DEFINE_BUILTIN_ACCESSOR)
+#undef DEFINE_BUILTIN_ACCESSOR
+
+// static
+bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
+                                    Handle<JSObject> target_global_proxy) {
+  if (FLAG_allow_unsafe_function_constructor) return true;
+  HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+  Handle<Context> responsible_context =
+      impl->MicrotaskContextIsLastEnteredContext() ? impl->MicrotaskContext()
+                                                   : impl->LastEnteredContext();
+  // TODO(jochen): Remove this.
+  if (responsible_context.is_null()) {
+    return true;
+  }
+  if (*responsible_context == target->context()) return true;
+  return isolate->MayAccess(responsible_context, target_global_proxy);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins.h b/src/builtins/builtins.h
new file mode 100644
index 0000000..f8ce2e6
--- /dev/null
+++ b/src/builtins/builtins.h
@@ -0,0 +1,677 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_H_
+#define V8_BUILTINS_BUILTINS_H_
+
+#include "src/base/flags.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+#define CODE_AGE_LIST_WITH_ARG(V, A) \
+  V(Quadragenarian, A)               \
+  V(Quinquagenarian, A)              \
+  V(Sexagenarian, A)                 \
+  V(Septuagenarian, A)               \
+  V(Octogenarian, A)
+
+#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
+
+#define CODE_AGE_LIST(V) CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
+#define CODE_AGE_LIST_COMPLETE(V) \
+  V(ToBeExecutedOnce)             \
+  V(NotExecuted)                  \
+  V(ExecutedOnce)                 \
+  V(NoAge)                        \
+  CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
+#define DECLARE_CODE_AGE_BUILTIN(C, V) \
+  V(Make##C##CodeYoungAgainOddMarking) \
+  V(Make##C##CodeYoungAgainEvenMarking)
+
+// CPP: Builtin in C++. Entered via BUILTIN_EXIT frame.
+//      Args: name
+// API: Builtin in C++ for API callbacks. Entered via EXIT frame.
+//      Args: name
+// TFJ: Builtin in Turbofan, with JS linkage (callable as Javascript function).
+//      Args: name, arguments count
+// TFS: Builtin in Turbofan, with CodeStub linkage.
+//      Args: name, code kind, extra IC state, interface descriptor
+// ASM: Builtin in platform-dependent assembly.
+//      Args: name
+// ASH: Handlers implemented in platform-dependent assembly.
+//      Args: name, code kind, extra IC state
+// DBG: Builtin in platform-dependent assembly, used by the debugger.
+//      Args: name
+#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG)                       \
+  ASM(Abort)                                                                  \
+  /* Handlers */                                                              \
+  ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState)                \
+  ASM(KeyedLoadIC_Miss)                                                       \
+  ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC)                         \
+  ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState)              \
+  ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC,                        \
+      StoreICState::kStrictModeState)                                         \
+  ASM(KeyedStoreIC_Miss)                                                      \
+  ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC)                       \
+  TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector)      \
+  TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
+  ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState)                       \
+  TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector)                  \
+  ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC)                                  \
+  TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector)                    \
+  TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector)                \
+  ASH(StoreIC_Normal, HANDLER, Code::STORE_IC)                                \
+  ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState)      \
+  TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector)           \
+  TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector)           \
+                                                                              \
+  /* Code aging */                                                            \
+  CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM)                       \
+                                                                              \
+  /* Calls */                                                                 \
+  ASM(ArgumentsAdaptorTrampoline)                                             \
+  /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */             \
+  ASM(CallFunction_ReceiverIsNullOrUndefined)                                 \
+  ASM(CallFunction_ReceiverIsNotNullOrUndefined)                              \
+  ASM(CallFunction_ReceiverIsAny)                                             \
+  ASM(TailCallFunction_ReceiverIsNullOrUndefined)                             \
+  ASM(TailCallFunction_ReceiverIsNotNullOrUndefined)                          \
+  ASM(TailCallFunction_ReceiverIsAny)                                         \
+  /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */           \
+  ASM(CallBoundFunction)                                                      \
+  ASM(TailCallBoundFunction)                                                  \
+  /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */                        \
+  ASM(Call_ReceiverIsNullOrUndefined)                                         \
+  ASM(Call_ReceiverIsNotNullOrUndefined)                                      \
+  ASM(Call_ReceiverIsAny)                                                     \
+  ASM(TailCall_ReceiverIsNullOrUndefined)                                     \
+  ASM(TailCall_ReceiverIsNotNullOrUndefined)                                  \
+  ASM(TailCall_ReceiverIsAny)                                                 \
+                                                                              \
+  /* Construct */                                                             \
+  /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */           \
+  ASM(ConstructFunction)                                                      \
+  /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */          \
+  ASM(ConstructBoundFunction)                                                 \
+  ASM(ConstructedNonConstructable)                                            \
+  /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */          \
+  ASM(ConstructProxy)                                                         \
+  /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */        \
+  ASM(Construct)                                                              \
+  ASM(JSConstructStubApi)                                                     \
+  ASM(JSConstructStubGeneric)                                                 \
+  ASM(JSBuiltinsConstructStub)                                                \
+  ASM(JSBuiltinsConstructStubForDerived)                                      \
+                                                                              \
+  /* Apply and entries */                                                     \
+  ASM(Apply)                                                                  \
+  ASM(JSEntryTrampoline)                                                      \
+  ASM(JSConstructEntryTrampoline)                                             \
+  ASM(ResumeGeneratorTrampoline)                                              \
+                                                                              \
+  /* Stack and interrupt check */                                             \
+  ASM(InterruptCheck)                                                         \
+  ASM(StackCheck)                                                             \
+                                                                              \
+  /* Interpreter */                                                           \
+  ASM(InterpreterEntryTrampoline)                                             \
+  ASM(InterpreterMarkBaselineOnReturn)                                        \
+  ASM(InterpreterPushArgsAndCall)                                             \
+  ASM(InterpreterPushArgsAndCallFunction)                                     \
+  ASM(InterpreterPushArgsAndConstruct)                                        \
+  ASM(InterpreterPushArgsAndTailCall)                                         \
+  ASM(InterpreterPushArgsAndTailCallFunction)                                 \
+  ASM(InterpreterEnterBytecodeDispatch)                                       \
+  ASM(InterpreterOnStackReplacement)                                          \
+                                                                              \
+  /* Code life-cycle */                                                       \
+  ASM(CompileLazy)                                                            \
+  ASM(CompileBaseline)                                                        \
+  ASM(CompileOptimized)                                                       \
+  ASM(CompileOptimizedConcurrent)                                             \
+  ASM(InOptimizationQueue)                                                    \
+  ASM(InstantiateAsmJs)                                                       \
+  ASM(MarkCodeAsToBeExecutedOnce)                                             \
+  ASM(MarkCodeAsExecutedOnce)                                                 \
+  ASM(MarkCodeAsExecutedTwice)                                                \
+  ASM(NotifyDeoptimized)                                                      \
+  ASM(NotifySoftDeoptimized)                                                  \
+  ASM(NotifyLazyDeoptimized)                                                  \
+  ASM(NotifyStubFailure)                                                      \
+  ASM(NotifyStubFailureSaveDoubles)                                           \
+  ASM(OnStackReplacement)                                                     \
+                                                                              \
+  /* API callback handling */                                                 \
+  API(HandleApiCall)                                                          \
+  API(HandleApiCallAsFunction)                                                \
+  API(HandleApiCallAsConstructor)                                             \
+  ASM(HandleFastApiCall)                                                      \
+                                                                              \
+  /* Adapters for Turbofan into runtime */                                    \
+  ASM(AllocateInNewSpace)                                                     \
+  ASM(AllocateInOldSpace)                                                     \
+                                                                              \
+  /* TurboFan support builtins */                                             \
+  TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                  \
+      CopyFastSmiOrObjectElements)                                            \
+  TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements)    \
+  TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                  \
+      GrowArrayElements)                                                      \
+                                                                              \
+  /* Debugger */                                                              \
+  DBG(FrameDropper_LiveEdit)                                                  \
+  DBG(Return_DebugBreak)                                                      \
+  DBG(Slot_DebugBreak)                                                        \
+                                                                              \
+  /* Type conversions */                                                      \
+  TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion)                    \
+  TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion)   \
+  TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion)   \
+  TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState,              \
+      TypeConversion)                                                         \
+  TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState,               \
+      TypeConversion)                                                         \
+  TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState,               \
+      TypeConversion)                                                         \
+  TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion)               \
+  TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion)            \
+  ASM(ToNumber)                                                               \
+                                                                              \
+  /* Built-in functions for Javascript */                                     \
+  /* Special internal builtins */                                             \
+  CPP(EmptyFunction)                                                          \
+  CPP(Illegal)                                                                \
+  CPP(RestrictedFunctionPropertiesThrower)                                    \
+  CPP(RestrictedStrictArgumentsPropertiesThrower)                             \
+  CPP(UnsupportedThrower)                                                     \
+                                                                              \
+  /* Array */                                                                 \
+  ASM(ArrayCode)                                                              \
+  ASM(InternalArrayCode)                                                      \
+  CPP(ArrayConcat)                                                            \
+  /* ES6 section 22.1.2.2 Array.isArray */                                    \
+  TFJ(ArrayIsArray, 2)                                                        \
+  /* ES7 #sec-array.prototype.includes */                                     \
+  TFJ(ArrayIncludes, 3)                                                       \
+  TFJ(ArrayIndexOf, 3)                                                        \
+  CPP(ArrayPop)                                                               \
+  CPP(ArrayPush)                                                              \
+  CPP(ArrayShift)                                                             \
+  CPP(ArraySlice)                                                             \
+  CPP(ArraySplice)                                                            \
+  CPP(ArrayUnshift)                                                           \
+                                                                              \
+  /* ArrayBuffer */                                                           \
+  CPP(ArrayBufferConstructor)                                                 \
+  CPP(ArrayBufferConstructor_ConstructStub)                                   \
+  CPP(ArrayBufferPrototypeGetByteLength)                                      \
+  CPP(ArrayBufferIsView)                                                      \
+                                                                              \
+  /* Boolean */                                                               \
+  CPP(BooleanConstructor)                                                     \
+  CPP(BooleanConstructor_ConstructStub)                                       \
+  /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */                   \
+  TFJ(BooleanPrototypeToString, 1)                                            \
+  /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */                    \
+  TFJ(BooleanPrototypeValueOf, 1)                                             \
+                                                                              \
+  /* CallSite */                                                              \
+  CPP(CallSitePrototypeGetColumnNumber)                                       \
+  CPP(CallSitePrototypeGetEvalOrigin)                                         \
+  CPP(CallSitePrototypeGetFileName)                                           \
+  CPP(CallSitePrototypeGetFunction)                                           \
+  CPP(CallSitePrototypeGetFunctionName)                                       \
+  CPP(CallSitePrototypeGetLineNumber)                                         \
+  CPP(CallSitePrototypeGetMethodName)                                         \
+  CPP(CallSitePrototypeGetPosition)                                           \
+  CPP(CallSitePrototypeGetScriptNameOrSourceURL)                              \
+  CPP(CallSitePrototypeGetThis)                                               \
+  CPP(CallSitePrototypeGetTypeName)                                           \
+  CPP(CallSitePrototypeIsConstructor)                                         \
+  CPP(CallSitePrototypeIsEval)                                                \
+  CPP(CallSitePrototypeIsNative)                                              \
+  CPP(CallSitePrototypeIsToplevel)                                            \
+  CPP(CallSitePrototypeToString)                                              \
+                                                                              \
+  /* DataView */                                                              \
+  CPP(DataViewConstructor)                                                    \
+  CPP(DataViewConstructor_ConstructStub)                                      \
+  CPP(DataViewPrototypeGetBuffer)                                             \
+  CPP(DataViewPrototypeGetByteLength)                                         \
+  CPP(DataViewPrototypeGetByteOffset)                                         \
+                                                                              \
+  /* Date */                                                                  \
+  CPP(DateConstructor)                                                        \
+  CPP(DateConstructor_ConstructStub)                                          \
+  /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */                       \
+  ASM(DatePrototypeGetDate)                                                   \
+  /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */                        \
+  ASM(DatePrototypeGetDay)                                                    \
+  /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */                   \
+  ASM(DatePrototypeGetFullYear)                                               \
+  /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */                      \
+  ASM(DatePrototypeGetHours)                                                  \
+  /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */               \
+  ASM(DatePrototypeGetMilliseconds)                                           \
+  /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */                    \
+  ASM(DatePrototypeGetMinutes)                                                \
+  /* ES6 section 20.3.4.8 Date.prototype.getMonth */                          \
+  ASM(DatePrototypeGetMonth)                                                  \
+  /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */                    \
+  ASM(DatePrototypeGetSeconds)                                                \
+  /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */                      \
+  ASM(DatePrototypeGetTime)                                                   \
+  /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */            \
+  ASM(DatePrototypeGetTimezoneOffset)                                         \
+  /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */                   \
+  ASM(DatePrototypeGetUTCDate)                                                \
+  /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */                    \
+  ASM(DatePrototypeGetUTCDay)                                                 \
+  /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */               \
+  ASM(DatePrototypeGetUTCFullYear)                                            \
+  /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */                  \
+  ASM(DatePrototypeGetUTCHours)                                               \
+  /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */           \
+  ASM(DatePrototypeGetUTCMilliseconds)                                        \
+  /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */                \
+  ASM(DatePrototypeGetUTCMinutes)                                             \
+  /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */                  \
+  ASM(DatePrototypeGetUTCMonth)                                               \
+  /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */                \
+  ASM(DatePrototypeGetUTCSeconds)                                             \
+  CPP(DatePrototypeGetYear)                                                   \
+  CPP(DatePrototypeSetYear)                                                   \
+  CPP(DateNow)                                                                \
+  CPP(DateParse)                                                              \
+  CPP(DatePrototypeSetDate)                                                   \
+  CPP(DatePrototypeSetFullYear)                                               \
+  CPP(DatePrototypeSetHours)                                                  \
+  CPP(DatePrototypeSetMilliseconds)                                           \
+  CPP(DatePrototypeSetMinutes)                                                \
+  CPP(DatePrototypeSetMonth)                                                  \
+  CPP(DatePrototypeSetSeconds)                                                \
+  CPP(DatePrototypeSetTime)                                                   \
+  CPP(DatePrototypeSetUTCDate)                                                \
+  CPP(DatePrototypeSetUTCFullYear)                                            \
+  CPP(DatePrototypeSetUTCHours)                                               \
+  CPP(DatePrototypeSetUTCMilliseconds)                                        \
+  CPP(DatePrototypeSetUTCMinutes)                                             \
+  CPP(DatePrototypeSetUTCMonth)                                               \
+  CPP(DatePrototypeSetUTCSeconds)                                             \
+  CPP(DatePrototypeToDateString)                                              \
+  CPP(DatePrototypeToISOString)                                               \
+  CPP(DatePrototypeToPrimitive)                                               \
+  CPP(DatePrototypeToUTCString)                                               \
+  CPP(DatePrototypeToString)                                                  \
+  CPP(DatePrototypeToTimeString)                                              \
+  CPP(DatePrototypeValueOf)                                                   \
+  CPP(DatePrototypeToJson)                                                    \
+  CPP(DateUTC)                                                                \
+                                                                              \
+  /* Error */                                                                 \
+  CPP(ErrorConstructor)                                                       \
+  CPP(ErrorCaptureStackTrace)                                                 \
+  CPP(ErrorPrototypeToString)                                                 \
+  CPP(MakeError)                                                              \
+  CPP(MakeRangeError)                                                         \
+  CPP(MakeSyntaxError)                                                        \
+  CPP(MakeTypeError)                                                          \
+  CPP(MakeURIError)                                                           \
+                                                                              \
+  /* Function */                                                              \
+  CPP(FunctionConstructor)                                                    \
+  ASM(FunctionPrototypeApply)                                                 \
+  CPP(FunctionPrototypeBind)                                                  \
+  ASM(FunctionPrototypeCall)                                                  \
+  /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */       \
+  TFJ(FunctionPrototypeHasInstance, 2)                                        \
+  CPP(FunctionPrototypeToString)                                              \
+                                                                              \
+  /* Generator and Async */                                                   \
+  CPP(GeneratorFunctionConstructor)                                           \
+  /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */               \
+  TFJ(GeneratorPrototypeNext, 2)                                              \
+  /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */             \
+  TFJ(GeneratorPrototypeReturn, 2)                                            \
+  /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */          \
+  TFJ(GeneratorPrototypeThrow, 2)                                             \
+  CPP(AsyncFunctionConstructor)                                               \
+                                                                              \
+  /* Encode and decode */                                                     \
+  CPP(GlobalDecodeURI)                                                        \
+  CPP(GlobalDecodeURIComponent)                                               \
+  CPP(GlobalEncodeURI)                                                        \
+  CPP(GlobalEncodeURIComponent)                                               \
+  CPP(GlobalEscape)                                                           \
+  CPP(GlobalUnescape)                                                         \
+                                                                              \
+  /* Eval */                                                                  \
+  CPP(GlobalEval)                                                             \
+                                                                              \
+  /* JSON */                                                                  \
+  CPP(JsonParse)                                                              \
+  CPP(JsonStringify)                                                          \
+                                                                              \
+  /* Math */                                                                  \
+  /* ES6 section 20.2.2.1 Math.abs ( x ) */                                   \
+  TFJ(MathAbs, 2)                                                             \
+  /* ES6 section 20.2.2.2 Math.acos ( x ) */                                  \
+  TFJ(MathAcos, 2)                                                            \
+  /* ES6 section 20.2.2.3 Math.acosh ( x ) */                                 \
+  TFJ(MathAcosh, 2)                                                           \
+  /* ES6 section 20.2.2.4 Math.asin ( x ) */                                  \
+  TFJ(MathAsin, 2)                                                            \
+  /* ES6 section 20.2.2.5 Math.asinh ( x ) */                                 \
+  TFJ(MathAsinh, 2)                                                           \
+  /* ES6 section 20.2.2.6 Math.atan ( x ) */                                  \
+  TFJ(MathAtan, 2)                                                            \
+  /* ES6 section 20.2.2.7 Math.atanh ( x ) */                                 \
+  TFJ(MathAtanh, 2)                                                           \
+  /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */                              \
+  TFJ(MathAtan2, 3)                                                           \
+  /* ES6 section 20.2.2.9 Math.cbrt ( x ) */                                  \
+  TFJ(MathCbrt, 2)                                                            \
+  /* ES6 section 20.2.2.10 Math.ceil ( x ) */                                 \
+  TFJ(MathCeil, 2)                                                            \
+  /* ES6 section 20.2.2.11 Math.clz32 ( x ) */                                \
+  TFJ(MathClz32, 2)                                                           \
+  /* ES6 section 20.2.2.12 Math.cos ( x ) */                                  \
+  TFJ(MathCos, 2)                                                             \
+  /* ES6 section 20.2.2.13 Math.cosh ( x ) */                                 \
+  TFJ(MathCosh, 2)                                                            \
+  /* ES6 section 20.2.2.14 Math.exp ( x ) */                                  \
+  TFJ(MathExp, 2)                                                             \
+  /* ES6 section 20.2.2.15 Math.expm1 ( x ) */                                \
+  TFJ(MathExpm1, 2)                                                           \
+  /* ES6 section 20.2.2.16 Math.floor ( x ) */                                \
+  TFJ(MathFloor, 2)                                                           \
+  /* ES6 section 20.2.2.17 Math.fround ( x ) */                               \
+  TFJ(MathFround, 2)                                                          \
+  /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */        \
+  CPP(MathHypot)                                                              \
+  /* ES6 section 20.2.2.19 Math.imul ( x, y ) */                              \
+  TFJ(MathImul, 3)                                                            \
+  /* ES6 section 20.2.2.20 Math.log ( x ) */                                  \
+  TFJ(MathLog, 2)                                                             \
+  /* ES6 section 20.2.2.21 Math.log1p ( x ) */                                \
+  TFJ(MathLog1p, 2)                                                           \
+  /* ES6 section 20.2.2.22 Math.log10 ( x ) */                                \
+  TFJ(MathLog10, 2)                                                           \
+  /* ES6 section 20.2.2.23 Math.log2 ( x ) */                                 \
+  TFJ(MathLog2, 2)                                                            \
+  /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */         \
+  ASM(MathMax)                                                                \
+  /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */         \
+  ASM(MathMin)                                                                \
+  /* ES6 section 20.2.2.26 Math.pow ( x, y ) */                               \
+  TFJ(MathPow, 3)                                                             \
+  /* ES6 section 20.2.2.28 Math.round ( x ) */                                \
+  TFJ(MathRound, 2)                                                           \
+  /* ES6 section 20.2.2.29 Math.sign ( x ) */                                 \
+  TFJ(MathSign, 2)                                                            \
+  /* ES6 section 20.2.2.30 Math.sin ( x ) */                                  \
+  TFJ(MathSin, 2)                                                             \
+  /* ES6 section 20.2.2.31 Math.sinh ( x ) */                                 \
+  TFJ(MathSinh, 2)                                                            \
+  /* ES6 section 20.2.2.32 Math.sqrt ( x ) */                                 \
+  TFJ(MathTan, 2)                                                             \
+  /* ES6 section 20.2.2.33 Math.tan ( x ) */                                  \
+  TFJ(MathTanh, 2)                                                            \
+  /* ES6 section 20.2.2.34 Math.tanh ( x ) */                                 \
+  TFJ(MathSqrt, 2)                                                            \
+  /* ES6 section 20.2.2.35 Math.trunc ( x ) */                                \
+  TFJ(MathTrunc, 2)                                                           \
+                                                                              \
+  /* Number */                                                                \
+  /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */       \
+  ASM(NumberConstructor)                                                      \
+  /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */  \
+  ASM(NumberConstructor_ConstructStub)                                        \
+  CPP(NumberPrototypeToExponential)                                           \
+  CPP(NumberPrototypeToFixed)                                                 \
+  CPP(NumberPrototypeToLocaleString)                                          \
+  CPP(NumberPrototypeToPrecision)                                             \
+  CPP(NumberPrototypeToString)                                                \
+  /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */                     \
+  TFJ(NumberPrototypeValueOf, 1)                                              \
+                                                                              \
+  /* Object */                                                                \
+  CPP(ObjectAssign)                                                           \
+  CPP(ObjectCreate)                                                           \
+  CPP(ObjectDefineGetter)                                                     \
+  CPP(ObjectDefineProperties)                                                 \
+  CPP(ObjectDefineProperty)                                                   \
+  CPP(ObjectDefineSetter)                                                     \
+  CPP(ObjectEntries)                                                          \
+  CPP(ObjectFreeze)                                                           \
+  CPP(ObjectGetOwnPropertyDescriptor)                                         \
+  CPP(ObjectGetOwnPropertyDescriptors)                                        \
+  CPP(ObjectGetOwnPropertyNames)                                              \
+  CPP(ObjectGetOwnPropertySymbols)                                            \
+  CPP(ObjectGetPrototypeOf)                                                   \
+  /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */                  \
+  TFJ(ObjectHasOwnProperty, 2)                                                \
+  CPP(ObjectIs)                                                               \
+  CPP(ObjectIsExtensible)                                                     \
+  CPP(ObjectIsFrozen)                                                         \
+  CPP(ObjectIsSealed)                                                         \
+  CPP(ObjectKeys)                                                             \
+  CPP(ObjectLookupGetter)                                                     \
+  CPP(ObjectLookupSetter)                                                     \
+  CPP(ObjectPreventExtensions)                                                \
+  /* ES6 section 19.1.3.6 Object.prototype.toString () */                     \
+  TFJ(ObjectProtoToString, 1)                                                 \
+  CPP(ObjectPrototypePropertyIsEnumerable)                                    \
+  CPP(ObjectSeal)                                                             \
+  CPP(ObjectValues)                                                           \
+                                                                              \
+  /* Proxy */                                                                 \
+  CPP(ProxyConstructor)                                                       \
+  CPP(ProxyConstructor_ConstructStub)                                         \
+                                                                              \
+  /* Reflect */                                                               \
+  ASM(ReflectApply)                                                           \
+  ASM(ReflectConstruct)                                                       \
+  CPP(ReflectDefineProperty)                                                  \
+  CPP(ReflectDeleteProperty)                                                  \
+  CPP(ReflectGet)                                                             \
+  CPP(ReflectGetOwnPropertyDescriptor)                                        \
+  CPP(ReflectGetPrototypeOf)                                                  \
+  CPP(ReflectHas)                                                             \
+  CPP(ReflectIsExtensible)                                                    \
+  CPP(ReflectOwnKeys)                                                         \
+  CPP(ReflectPreventExtensions)                                               \
+  CPP(ReflectSet)                                                             \
+  CPP(ReflectSetPrototypeOf)                                                  \
+                                                                              \
+  /* SharedArrayBuffer */                                                     \
+  CPP(SharedArrayBufferPrototypeGetByteLength)                                \
+  TFJ(AtomicsLoad, 3)                                                         \
+  TFJ(AtomicsStore, 4)                                                        \
+                                                                              \
+  /* String */                                                                \
+  ASM(StringConstructor)                                                      \
+  ASM(StringConstructor_ConstructStub)                                        \
+  CPP(StringFromCodePoint)                                                    \
+  /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */             \
+  TFJ(StringFromCharCode, 2)                                                  \
+  /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */                  \
+  TFJ(StringPrototypeCharAt, 2)                                               \
+  /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */              \
+  TFJ(StringPrototypeCharCodeAt, 2)                                           \
+  /* ES6 section 21.1.3.25 String.prototype.toString () */                    \
+  TFJ(StringPrototypeToString, 1)                                             \
+  CPP(StringPrototypeTrim)                                                    \
+  CPP(StringPrototypeTrimLeft)                                                \
+  CPP(StringPrototypeTrimRight)                                               \
+  /* ES6 section 21.1.3.28 String.prototype.valueOf () */                     \
+  TFJ(StringPrototypeValueOf, 1)                                              \
+                                                                              \
+  /* Symbol */                                                                \
+  CPP(SymbolConstructor)                                                      \
+  CPP(SymbolConstructor_ConstructStub)                                        \
+  /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */      \
+  TFJ(SymbolPrototypeToPrimitive, 2)                                          \
+  /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */                    \
+  TFJ(SymbolPrototypeToString, 1)                                             \
+  /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */                     \
+  TFJ(SymbolPrototypeValueOf, 1)                                              \
+                                                                              \
+  /* TypedArray */                                                            \
+  CPP(TypedArrayPrototypeBuffer)                                              \
+  /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */            \
+  TFJ(TypedArrayPrototypeByteLength, 1)                                       \
+  /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */            \
+  TFJ(TypedArrayPrototypeByteOffset, 1)                                       \
+  /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */               \
+  TFJ(TypedArrayPrototypeLength, 1)
+
+#define IGNORE_BUILTIN(...)
+
+#define BUILTIN_LIST_ALL(V) BUILTIN_LIST(V, V, V, V, V, V, V)
+
+#define BUILTIN_LIST_C(V)                                            \
+  BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+               IGNORE_BUILTIN, IGNORE_BUILTIN)
+
+#define BUILTIN_LIST_A(V)                                                      \
+  BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+               V, V, V)
+
+#define BUILTIN_LIST_DBG(V)                                                    \
+  BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+               IGNORE_BUILTIN, IGNORE_BUILTIN, V)
+
+// Forward declarations.
+class CodeStubAssembler;
+class ObjectVisitor;
+
+class Builtins {
+ public:
+  ~Builtins();
+
+  // Generate all builtin code objects. Should be called once during
+  // isolate initialization.
+  void SetUp(Isolate* isolate, bool create_heap_objects);
+  void TearDown();
+
+  // Garbage collection support.
+  void IterateBuiltins(ObjectVisitor* v);
+
+  // Disassembler support.
+  const char* Lookup(byte* pc);
+
+  enum Name {
+#define DEF_ENUM(Name, ...) k##Name,
+    BUILTIN_LIST_ALL(DEF_ENUM)
+#undef DEF_ENUM
+        builtin_count
+  };
+
+#define DECLARE_BUILTIN_ACCESSOR(Name, ...) Handle<Code> Name();
+  BUILTIN_LIST_ALL(DECLARE_BUILTIN_ACCESSOR)
+#undef DECLARE_BUILTIN_ACCESSOR
+
+  // Convenience wrappers.
+  Handle<Code> CallFunction(
+      ConvertReceiverMode = ConvertReceiverMode::kAny,
+      TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny,
+                    TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  Handle<Code> CallBoundFunction(TailCallMode tail_call_mode);
+  Handle<Code> NonPrimitiveToPrimitive(
+      ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+  Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
+  Handle<Code> InterpreterPushArgsAndCall(
+      TailCallMode tail_call_mode,
+      CallableType function_type = CallableType::kAny);
+
+  Code* builtin(Name name) {
+    // Code::cast cannot be used here since we access builtins
+    // during the marking phase of mark sweep. See IC::Clear.
+    return reinterpret_cast<Code*>(builtins_[name]);
+  }
+
+  Address builtin_address(Name name) {
+    return reinterpret_cast<Address>(&builtins_[name]);
+  }
+
+  static const char* name(int index);
+
+  // Returns the C++ entry point for builtins implemented in C++, and the null
+  // Address otherwise.
+  static Address CppEntryOf(int index);
+
+  static bool IsCpp(int index);
+  static bool IsApi(int index);
+  static bool HasCppImplementation(int index);
+
+  bool is_initialized() const { return initialized_; }
+
+  MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
+      Isolate* isolate, bool is_construct, Handle<HeapObject> function,
+      Handle<Object> receiver, int argc, Handle<Object> args[],
+      Handle<HeapObject> new_target);
+
+  enum ExitFrameType { EXIT, BUILTIN_EXIT };
+
+  static void Generate_Adaptor(MacroAssembler* masm, Address builtin_address,
+                               ExitFrameType exit_frame_type);
+
+  static bool AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
+                                   Handle<JSObject> target_global_proxy);
+
+ private:
+  Builtins();
+
+  static void Generate_CallFunction(MacroAssembler* masm,
+                                    ConvertReceiverMode mode,
+                                    TailCallMode tail_call_mode);
+
+  static void Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                             TailCallMode tail_call_mode);
+
+  static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                            TailCallMode tail_call_mode);
+
+  static void Generate_InterpreterPushArgsAndCallImpl(
+      MacroAssembler* masm, TailCallMode tail_call_mode,
+      CallableType function_type);
+
+  static void Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                              int field_index);
+
+  enum class MathMaxMinKind { kMax, kMin };
+  static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
+
+#define DECLARE_ASM(Name, ...) \
+  static void Generate_##Name(MacroAssembler* masm);
+#define DECLARE_TF(Name, ...) \
+  static void Generate_##Name(CodeStubAssembler* csasm);
+
+  BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
+               DECLARE_ASM, DECLARE_ASM, DECLARE_ASM)
+
+#undef DECLARE_ASM
+#undef DECLARE_TF
+
+  // Note: These are always Code objects, but to conform with
+  // IterateBuiltins() above which assumes Object**'s for the callback
+  // function f, we use an Object* array here.
+  Object* builtins_[builtin_count];
+  bool initialized_;
+
+  friend class Isolate;
+
+  DISALLOW_COPY_AND_ASSIGN(Builtins);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_BUILTINS_BUILTINS_H_
diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc
new file mode 100644
index 0000000..f31ba6f
--- /dev/null
+++ b/src/builtins/ia32/builtins-ia32.cc
@@ -0,0 +1,3039 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/code-factory.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ia32/frames-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- eax                : number of arguments excluding receiver
+  //  -- edi                : target
+  //  -- edx                : new.target
+  //  -- esp[0]             : return address
+  //  -- esp[4]             : last argument
+  //  -- ...
+  //  -- esp[4 * argc]      : first argument
+  //  -- esp[4 * (argc +1)] : receiver
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects eax to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ add(eax, Immediate(num_extra_args + 1));
+
+  // Insert extra arguments.
+  __ PopReturnAddressTo(ecx);
+  __ SmiTag(eax);
+  __ Push(eax);
+  __ SmiUntag(eax);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             exit_frame_type == BUILTIN_EXIT);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ SmiTag(eax);
+    __ push(eax);
+    // Push a copy of the target function and the new target.
+    __ push(edi);
+    __ push(edx);
+    // Function is also the parameter to the runtime call.
+    __ push(edi);
+
+    __ CallRuntime(function_id, 1);
+    __ mov(ebx, eax);
+
+    // Restore target function and new target.
+    __ pop(edx);
+    __ pop(edi);
+    __ pop(eax);
+    __ SmiUntag(eax);
+  }
+
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(masm->isolate());
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, Label::kNear);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  //  -- esi: context
+  //  -- edi: constructor function
+  //  -- ebx: allocation site or undefined
+  //  -- edx: new target
+  // -----------------------------------
+
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(ebx);
+    __ push(esi);
+    __ push(ebx);
+    __ SmiTag(eax);
+    __ push(eax);
+
+    if (create_implicit_receiver) {
+      // Allocate the new receiver object.
+      __ Push(edi);
+      __ Push(edx);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(edi);
+
+      // ----------- S t a t e -------------
+      //  -- edi: constructor function
+      //  -- ebx: newly allocated object
+      //  -- edx: new target
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ mov(eax, Operand(esp, 0));
+    }
+
+    __ SmiUntag(eax);
+
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ push(ebx);
+      __ push(ebx);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
+
+    // Set up pointer to last argument.
+    __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ mov(ecx, eax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(ebx, ecx, times_4, 0));
+    __ bind(&entry);
+    __ dec(ecx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      __ JumpIfSmi(eax, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+      __ j(above_equal, &exit);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ mov(eax, Operand(esp, 0));
+
+      // Restore the arguments count and leave the construct frame. The
+      // arguments count is stored below the receiver.
+      __ bind(&exit);
+      __ mov(ebx, Operand(esp, 1 * kPointerSize));
+    } else {
+      __ mov(ebx, Operand(esp, 0));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(eax, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  // Remove caller arguments from the stack and return.
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+  if (create_implicit_receiver) {
+    __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+  }
+  __ ret(0);
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ push(edi);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
+
+// Clobbers ecx, edx, edi; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+                                        IsTagged eax_is_tagged) {
+  // eax   : the number of items to be pushed to the stack
+  //
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(edi, Operand::StaticVariable(real_stack_limit));
+  // Make ecx the space we have left. The stack might already be overflowed
+  // here which will cause ecx to become negative.
+  __ mov(ecx, esp);
+  __ sub(ecx, edi);
+  // Make edx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(edx, eax);
+  int smi_tag = eax_is_tagged == kEaxIsSmiTagged ? kSmiTagSize : 0;
+  __ shl(edx, kPointerSizeLog2 - smi_tag);
+  // Check if the arguments will overflow the stack.
+  __ cmp(ecx, edx);
+  __ j(greater, &okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ mov(esi, Operand::StaticVariable(context_address));
+
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
+
+    // Push the function and the receiver onto the stack.
+    __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+    __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+
+    // Load the number of arguments and setup pointer to the arguments.
+    __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+    __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+
+    // Check if we have enough stack space to push all arguments.
+    // Expects argument count in eax. Clobbers ecx, edx, edi.
+    Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
+
+    // Copy arguments to the stack in a loop.
+    Label loop, entry;
+    __ Move(ecx, Immediate(0));
+    __ jmp(&entry, Label::kNear);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+    __ push(Operand(edx, 0));                    // dereference handle
+    __ inc(ecx);
+    __ bind(&entry);
+    __ cmp(ecx, eax);
+    __ j(not_equal, &loop);
+
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
+
+    // Get the new.target and function from the frame.
+    __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
+    __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the internal frame. Notice that this also removes the empty.
+    // context and the function left on the stack by the code
+    // invocation.
+  }
+  __ ret(kPointerSize);  // Remove receiver.
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : the value to pass to the generator
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(ebx);
+
+  // Store input value into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
+                      kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
+
+  // Load suspended function and context.
+  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+  __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+  __ j(greater_equal, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
+  __ j(equal, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
+
+  // Pop return address.
+  __ PopReturnAddressTo(eax);
+
+  // Push receiver.
+  __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+  // ----------- S t a t e -------------
+  //  -- eax    : return address
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- edi    : generator function
+  //  -- esi    : generator context
+  //  -- esp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ sub(ecx, Immediate(Smi::FromInt(1)));
+    __ j(carry, &done_loop, Label::kNear);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+  __ j(not_equal, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ PushReturnAddressFrom(eax);
+    __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(eax,
+           FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ mov(edx, ebx);
+    __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushReturnAddressFrom(eax);  // Return address.
+    __ Push(ebp);                   // Caller's frame pointer.
+    __ Move(ebp, esp);
+    __ Push(esi);  // Callee's context.
+    __ Push(edi);  // Callee's JS Function.
+
+    // Restore the operand stack.
+    __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+    {
+      Label done_loop, loop;
+      __ Move(ecx, Smi::FromInt(0));
+      __ bind(&loop);
+      __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+      __ j(equal, &done_loop, Label::kNear);
+      __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+      __ add(ecx, Immediate(Smi::FromInt(1)));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
+           Immediate(masm->isolate()->factory()->empty_fixed_array()));
+
+    // Resume the generator function at the continuation.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(ecx);
+    __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ mov(eax, ebx);  // Continuation expects generator object in eax.
+    __ jmp(edx);
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+                                  Register scratch2) {
+  Register args_count = scratch1;
+  Register return_pc = scratch2;
+
+  // Get the arguments + reciever count.
+  __ mov(args_count,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(args_count,
+         FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments.
+  __ pop(return_pc);
+  __ add(esp, args_count);
+  __ push(return_pc);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o edi: the JS function object being called
+//   o edx: the new target
+//   o esi: our context
+//   o ebp: the caller's frame pointer
+//   o esp: stack pointer (pointing to return address)
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS function.
+  __ push(edx);  // Callee's new target.
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+         Immediate(DebugInfo::uninitialized()));
+  __ j(not_equal, &load_debug_bytecode_array);
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ Move(ecx, masm->CodeObject());  // Self-reference to this code.
+  __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+  __ j(not_equal, &switch_to_different_code_kind);
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     eax);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Push bytecode array.
+  __ push(kInterpreterBytecodeArrayRegister);
+  // Push Smi tagged initial bytecode array offset.
+  __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size from the BytecodeArray object.
+    __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+                             BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ mov(ecx, esp);
+    __ sub(ecx, ebx);
+    ExternalReference stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ cmp(ecx, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    Label loop_header;
+    Label loop_check;
+    __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+    __ jmp(&loop_check);
+    __ bind(&loop_header);
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    __ push(eax);
+    // Continue loop if not done.
+    __ bind(&loop_check);
+    __ sub(ebx, Immediate(kPointerSize));
+    __ j(greater_equal, &loop_header);
+  }
+
+  // Load accumulator, bytecode offset and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
+  __ call(ebx);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in eax.
+  LeaveInterpreterFrame(masm, ebx, ecx);
+  __ ret(0);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ jmp(&bytecode_array_loaded);
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ pop(edx);  // Callee's new target.
+  __ pop(edi);  // Callee's JS function.
+  __ pop(esi);  // Callee's context.
+  __ leave();   // Leave the frame so we can tail call.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
+  __ RecordWriteCodeEntryField(edi, ecx, ebx);
+  __ jmp(ecx);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+  __ mov(kContextRegister,
+         Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, ebx, ecx);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(eax);
+
+    // Push function as argument and compile for baseline.
+    __ push(edi);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(eax);
+  }
+  __ ret(0);
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register array_limit) {
+  // ----------- S t a t e -------------
+  //  -- ebx : Pointer to the last argument in the args array.
+  //  -- array_limit : Pointer to one before the first argument in the
+  //                   args array.
+  // -----------------------------------
+  Label loop_header, loop_check;
+  __ jmp(&loop_check);
+  __ bind(&loop_header);
+  __ Push(Operand(ebx, 0));
+  __ sub(ebx, Immediate(kPointerSize));
+  __ bind(&loop_check);
+  __ cmp(ebx, array_limit);
+  __ j(greater, &loop_header, Label::kNear);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  //  -- edi : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ Pop(edx);
+
+  // Find the address of the last argument.
+  __ mov(ecx, eax);
+  __ add(ecx, Immediate(1));  // Add one for receiver.
+  __ shl(ecx, kPointerSizeLog2);
+  __ neg(ecx);
+  __ add(ecx, ebx);
+
+  Generate_InterpreterPushArgs(masm, ecx);
+
+  // Call the target.
+  __ Push(edx);  // Re-push return address.
+
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target
+  //  -- edi : the constructor
+  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ Pop(ecx);
+
+  // Push edi in the slot meant for receiver. We need an extra register
+  // so store edi temporarily on stack.
+  __ Push(edi);
+
+  // Find the address of the last argument.
+  __ mov(edi, eax);
+  __ neg(edi);
+  __ shl(edi, kPointerSizeLog2);
+  __ add(edi, ebx);
+
+  Generate_InterpreterPushArgs(masm, edi);
+
+  // Restore the constructor from slot on stack. It was pushed at the slot
+  // meant for receiver.
+  __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+
+  // Re-push return address.
+  __ Push(ecx);
+
+  // Call the constructor with unmodified eax, edi, ebi values.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ LoadHeapObject(ebx,
+                    masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
+                        Code::kHeaderSize - kHeapObjectTag));
+  __ push(ebx);
+
+  // Initialize the dispatch table register.
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ mov(kInterpreterBytecodeArrayRegister,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     ebx);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
+  __ jmp(ebx);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = edi;
+  Register new_target = edx;
+  Register argument_count = eax;
+
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = argument_count;
+  Register index = ebx;
+  __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
+  __ cmp(index, Immediate(Smi::FromInt(2)));
+  __ j(less, &gotta_call_runtime);
+
+  // Find literals.
+  // edx : native context
+  // ebx : length / index
+  // eax : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = edx;
+  __ mov(native_context, NativeContextOperand());
+
+  __ bind(&loop_top);
+  Register temp = edi;
+
+  // Does the native context match?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousContext));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ j(not_equal, &loop_bottom);
+  // OSR id set to none?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
+  __ j(not_equal, &loop_bottom);
+  // Literals available?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ mov(ecx, Operand(esp, 0));
+  __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
+  __ push(index);
+  __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = ecx;
+  __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, eax);
+
+  // Link the closure into the optimized function list.
+  // ecx : code entry
+  // edx : native context
+  // edi : closure
+  __ mov(ebx,
+         ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+         closure);
+  // Save closure before the write barrier.
+  __ mov(ebx, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
+                            kDontSaveFPRegs);
+  __ mov(closure, ebx);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ jmp(entry);
+
+  __ bind(&loop_bottom);
+  __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ cmp(index, Immediate(Smi::FromInt(1)));
+  __ j(greater, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
+                                      SharedFunctionInfo::kSharedCodeIndex));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
+  __ and_(ebx, Code::KindField::kMask);
+  __ shr(ebx, Code::KindField::kShift);
+  __ cmp(ebx, Immediate(Code::BUILTIN));
+  __ j(equal, &gotta_call_runtime_no_stack);
+  // Yes, install the full code.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, ebx);
+  __ jmp(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ mov(ecx, eax);
+    // Push the number of arguments to the callee.
+    __ SmiTag(eax);
+    __ push(eax);
+    // Push a copy of the target function and the new target.
+    __ push(edi);
+    __ push(edx);
+
+    // The function.
+    __ push(edi);
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ cmp(ecx, Immediate(j));
+        __ j(not_equal, &over, Label::kNear);
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ Push(Operand(
+            ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done, Label::kNear);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(eax, &failed, Label::kNear);
+
+    __ Drop(2);
+    __ Pop(ecx);
+    __ SmiUntag(ecx);
+    scope.GenerateLeaveFrame();
+
+    __ PopReturnAddressTo(ebx);
+    __ inc(ecx);
+    __ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
+    __ PushReturnAddressFrom(ebx);
+    __ ret(0);
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ pop(edx);
+    __ pop(edi);
+    __ pop(eax);
+    __ SmiUntag(eax);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Re-execute the code that was patched back to the young age when
+  // the stub returns.
+  __ sub(Operand(esp, 0), Immediate(5));
+  __ pushad();
+  __ mov(eax, Operand(esp, 8 * kPointerSize));
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(masm->isolate())));
+    __ mov(Operand(esp, 0), eax);
+    __ CallCFunction(
+        ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  }
+  __ popad();
+  __ ret(0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+  __ pushad();
+  __ mov(eax, Operand(esp, 8 * kPointerSize));
+  __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+  {  // NOLINT
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(masm->isolate())));
+    __ mov(Operand(esp, 0), eax);
+    __ CallCFunction(
+        ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+        2);
+  }
+  __ popad();
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ pop(eax);   // Pop return address into scratch register.
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS Function.
+  __ push(eax);  // Push return address after frame prologue.
+
+  // Jump to point after the code-age stub.
+  __ ret(0);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ pushad();
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ popad();
+    // Tear down internal frame.
+  }
+
+  __ pop(MemOperand(esp, 0));  // Ignore state offset
+  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass deoptimization type to the runtime system.
+    __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+    // Tear down internal frame.
+  }
+
+  // Get the full codegen state from the stack and untag it.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  __ SmiUntag(ecx);
+
+  // Switch on the state.
+  Label not_no_registers, not_tos_eax;
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
+  __ j(not_equal, &not_no_registers, Label::kNear);
+  __ ret(1 * kPointerSize);  // Remove state.
+
+  __ bind(&not_no_registers);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
+  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
+  __ j(not_equal, &not_tos_eax, Label::kNear);
+  __ ret(2 * kPointerSize);  // Remove state, eax.
+
+  __ bind(&not_tos_eax);
+  __ Abort(kNoCasesLeft);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- eax    : number of arguments
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into eax and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ mov(eax, Operand(esp, kPointerSize));
+    __ JumpIfSmi(eax, &receiver_not_date);
+    __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+    __ j(not_equal, &receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ mov(edx, Operand::StaticVariable(
+                      ExternalReference::date_cache_stamp(masm->isolate())));
+      __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
+      __ j(not_equal, &stamp_mismatch, Label::kNear);
+      __ mov(eax, FieldOperand(
+                      eax, JSDate::kValueOffset + field_index * kPointerSize));
+      __ ret(1 * kPointerSize);
+      __ bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 0), eax);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(Smi::FromInt(field_index)));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ ret(1 * kPointerSize);
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Move(ebx, Immediate(0));
+    __ EnterBuiltinFrame(esi, edi, ebx);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : argArray
+  //  -- esp[8]  : thisArg
+  //  -- esp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into edi, argArray into eax (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label no_arg_array, no_this_arg;
+    __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ mov(ebx, edx);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ test(eax, eax);
+    __ j(zero, &no_this_arg, Label::kNear);
+    {
+      __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+      __ cmp(eax, Immediate(1));
+      __ j(equal, &no_arg_array, Label::kNear);
+      __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+      __ bind(&no_arg_array);
+    }
+    __ bind(&no_this_arg);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ Push(edx);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argArray
+  //  -- edi    : receiver
+  //  -- esp[0] : return address
+  //  -- esp[4] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
+  __ j(zero, &receiver_not_callable, Label::kNear);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+  __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+                Label::kNear);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ Set(eax, 0);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // Stack Layout:
+  // esp[0]           : Return address
+  // esp[8]           : Argument n
+  // esp[16]          : Argument n-1
+  //  ...
+  // esp[8 * n]       : Argument 1
+  // esp[8 * (n + 1)] : Receiver (callable to call)
+  //
+  // eax contains the number of arguments, n, not counting the receiver.
+  //
+  // 1. Make sure we have at least one argument.
+  {
+    Label done;
+    __ test(eax, eax);
+    __ j(not_zero, &done, Label::kNear);
+    __ PopReturnAddressTo(ebx);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(ebx);
+    __ inc(eax);
+    __ bind(&done);
+  }
+
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  {
+    Label loop;
+    __ mov(ecx, eax);
+    __ bind(&loop);
+    __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+    __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
+    __ dec(ecx);
+    __ j(not_sign, &loop);  // While non-negative (to copy return address).
+    __ pop(ebx);            // Discard copy of return address.
+    __ dec(eax);  // One fewer argument (first argument is new receiver).
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : argumentsList
+  //  -- esp[8]  : thisArgument
+  //  -- esp[12] : target
+  //  -- esp[16] : receiver
+  // -----------------------------------
+
+  // 1. Load target into edi (if present), argumentsList into eax (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label done;
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ mov(edx, edi);
+    __ mov(ebx, edi);
+    __ cmp(eax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+    __ j(equal, &done, Label::kNear);
+    __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+    __ cmp(eax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ Push(edx);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edi    : target
+  //  -- esp[0] : return address
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
+  __ j(zero, &target_not_callable, Label::kNear);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : new.target (optional)
+  //  -- esp[8]  : argumentsList
+  //  -- esp[12] : target
+  //  -- esp[16] : receiver
+  // -----------------------------------
+
+  // 1. Load target into edi (if present), argumentsList into eax (if present),
+  // new.target into edx (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label done;
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ mov(edx, edi);
+    __ mov(ebx, edi);
+    __ cmp(eax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+    __ mov(edx, edi);
+    __ j(equal, &done, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+    __ cmp(eax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edx    : new.target
+  //  -- edi    : target
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &target_not_constructor, Label::kNear);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &new_target_not_constructor, Label::kNear);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ mov(Operand(esp, kPointerSize), edx);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the InternalArray function.
+  __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray function should be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
+  __ mov(edx, edi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function should be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ mov(ebx, masm->isolate()->factory()->undefined_value());
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : function
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in edx and the double value in xmm0.
+  __ LoadRoot(edx, root_index);
+  __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  __ Move(ecx, eax);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ test(ecx, ecx);
+    __ j(zero, &done_loop);
+
+    // Load the next parameter tagged value into ebx.
+    __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+
+    // Load the double value of the parameter into xmm1, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(ebx, &convert_smi);
+    __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+                  Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(eax);
+      __ SmiTag(ecx);
+      __ EnterBuiltinFrame(esi, edi, eax);
+      __ Push(ecx);
+      __ Push(edx);
+      __ mov(eax, ebx);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(ecx);
+      __ LeaveBuiltinFrame(esi, edi, eax);
+      __ SmiUntag(ecx);
+      __ SmiUntag(eax);
+      {
+        // Restore the double accumulator value (xmm0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(edx, &restore_smi, Label::kNear);
+        __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+        __ jmp(&done_restore, Label::kNear);
+        __ bind(&restore_smi);
+        __ SmiUntag(edx);
+        __ Cvtsi2sd(xmm0, edx);
+        __ SmiTag(edx);
+        __ bind(&done_restore);
+      }
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ movsd(xmm1, FieldOperand(ebx, HeapNumber::kValueOffset));
+    __ jmp(&done_convert, Label::kNear);
+    __ bind(&convert_smi);
+    __ SmiUntag(ebx);
+    __ Cvtsi2sd(xmm1, ebx);
+    __ SmiTag(ebx);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (xmm0) and the next parameter value on the right hand side (xmm1).
+    Label compare_equal, compare_nan, compare_swap, done_compare;
+    __ ucomisd(xmm0, xmm1);
+    __ j(parity_even, &compare_nan, Label::kNear);
+    __ j(cc, &done_compare, Label::kNear);
+    __ j(equal, &compare_equal, Label::kNear);
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ movaps(xmm0, xmm1);
+    __ mov(edx, ebx);
+    __ jmp(&done_compare, Label::kNear);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(edx, Heap::kNanValueRootIndex);
+    __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ jmp(&done_compare, Label::kNear);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    __ Push(edi);  // Preserve function in edi.
+    __ movmskpd(edi, reg);
+    __ test(edi, Immediate(1));
+    __ Pop(edi);
+    __ j(not_zero, &compare_swap);
+
+    __ bind(&done_compare);
+    __ dec(ecx);
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ PopReturnAddressTo(ecx);
+  __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+  __ PushReturnAddressFrom(ecx);
+  __ mov(eax, edx);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into ebx.
+  Label no_arguments;
+  {
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+  }
+
+  // 2a. Convert the first argument to a number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(eax);
+    __ EnterBuiltinFrame(esi, edi, eax);
+    __ mov(eax, ebx);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(esi, edi, ebx);  // Argc popped to ebx.
+    __ SmiUntag(ebx);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ Ret();
+  }
+
+  // 2b. No arguments, return +0 (already in eax).
+  __ bind(&no_arguments);
+  __ ret(1 * kPointerSize);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- edx                 : new target
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Store argc in r8.
+  __ mov(ecx, eax);
+  __ SmiTag(ecx);
+
+  // 2. Load the first argument into ebx.
+  {
+    Label no_arguments, done;
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ Move(ebx, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure ebx is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(ebx, &done_convert);
+    __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(equal, &done_convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ EnterBuiltinFrame(esi, edi, ecx);
+      __ Push(edx);
+      __ Move(eax, ebx);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Move(ebx, eax);
+      __ Pop(edx);
+      __ LeaveBuiltinFrame(esi, edi, ecx);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, done_alloc, new_object;
+  __ cmp(edx, edi);
+  __ j(not_equal, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(eax, edi, ebx, esi, &done_alloc);
+  __ jmp(&drop_frame_and_ret);
+
+  __ bind(&done_alloc);
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));  // Restore esi.
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ EnterBuiltinFrame(esi, edi, ecx);
+    __ Push(ebx);  // the first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+    __ LeaveBuiltinFrame(esi, edi, ecx);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(esi);
+    __ SmiUntag(ecx);
+    __ lea(esp, Operand(esp, ecx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(esi);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into eax.
+  Label no_arguments;
+  {
+    __ mov(ebx, eax);  // Store argc in ebx.
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(eax, Operand(esp, eax, times_pointer_size, 0));
+  }
+
+  // 2a. At least one argument, return eax if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(eax, &to_string, Label::kNear);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+    __ j(above, &to_string, Label::kNear);
+    __ j(equal, &symbol_descriptive_string, Label::kNear);
+    __ jmp(&drop_frame_and_ret, Label::kNear);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(eax, Heap::kempty_stringRootIndex);
+    __ ret(1 * kPointerSize);
+  }
+
+  // 3a. Convert eax to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(ebx);
+    __ EnterBuiltinFrame(esi, edi, ebx);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(esi, edi, ebx);
+    __ SmiUntag(ebx);
+  }
+  __ jmp(&drop_frame_and_ret, Label::kNear);
+
+  // 3b. Convert symbol in eax to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ Push(eax);
+    __ PushReturnAddressFrom(ecx);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- edx                 : new target
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  __ mov(ebx, eax);
+
+  // 2. Load the first argument into eax.
+  {
+    Label no_arguments, done;
+    __ test(ebx, ebx);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(eax, Operand(esp, ebx, times_pointer_size, 0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ LoadRoot(eax, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure eax is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(eax, &convert, Label::kNear);
+    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+    __ j(below, &done_convert);
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(ebx);
+      __ EnterBuiltinFrame(esi, edi, ebx);
+      __ Push(edx);
+      __ CallStub(&stub);
+      __ Pop(edx);
+      __ LeaveBuiltinFrame(esi, edi, ebx);
+      __ SmiUntag(ebx);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, done_alloc, new_object;
+  __ cmp(edx, edi);
+  __ j(not_equal, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  // AllocateJSValue can't handle src == dst register. Reuse esi and restore it
+  // as needed after the call.
+  __ mov(esi, eax);
+  __ AllocateJSValue(eax, edi, esi, ecx, &done_alloc);
+  __ jmp(&drop_frame_and_ret);
+
+  __ bind(&done_alloc);
+  {
+    // Restore eax to the first argument and esi to the context.
+    __ mov(eax, esi);
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  }
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(ebx);
+    __ EnterBuiltinFrame(esi, edi, ebx);
+    __ Push(eax);  // the first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+    __ LeaveBuiltinFrame(esi, edi, ebx);
+    __ SmiUntag(ebx);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ Ret();
+  }
+}
+
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+                                       Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- eax : actual number of arguments
+  //  -- ebx : expected number of arguments
+  //  -- edx : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(edi, Operand::StaticVariable(real_stack_limit));
+  // Make ecx the space we have left. The stack might already be overflowed
+  // here which will cause ecx to become negative.
+  __ mov(ecx, esp);
+  __ sub(ecx, edi);
+  // Make edi the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(edi, ebx);
+  __ shl(edi, kPointerSizeLog2);
+  // Check if the arguments will overflow the stack.
+  __ cmp(ecx, edi);
+  __ j(less_equal, stack_overflow);  // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ push(ebp);
+  __ mov(ebp, esp);
+
+  // Store the arguments adaptor context sentinel.
+  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Push the function on the stack.
+  __ push(edi);
+
+  // Preserve the number of arguments on the stack. Must preserve eax,
+  // ebx and ecx because these registers are used when copying the
+  // arguments and the receiver.
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
+  __ push(edi);
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack.
+  __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ leave();
+
+  // Remove caller arguments from the stack.
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edi    : target
+  //  -- edx    : new.target (checked to be constructor or undefined)
+  //  -- esp[0] : return address.
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(eax, &create_runtime);
+
+    // Load the map of argumentsList into ecx.
+    __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+
+    // Load native context into ebx.
+    __ mov(ebx, NativeContextOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+    __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
+    __ j(equal, &create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(edi);
+      __ Push(edx);
+      __ Push(eax);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(edx);
+      __ Pop(edi);
+      __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
+      __ SmiUntag(ebx);
+    }
+    __ jmp(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
+    __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
+    __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+    __ j(not_equal, &create_runtime);
+    __ SmiUntag(ebx);
+    __ mov(eax, ecx);
+    __ jmp(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(ecx);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ cmp(ecx, Immediate(FAST_ELEMENTS));
+    __ j(above, &create_runtime);
+    __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+    __ j(equal, &create_runtime);
+    __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+    __ SmiUntag(ebx);
+    __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    ExternalReference real_stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ mov(ecx, Operand::StaticVariable(real_stack_limit));
+    // Make ecx the space we have left. The stack might already be overflowed
+    // here which will cause ecx to become negative.
+    __ neg(ecx);
+    __ add(ecx, esp);
+    __ sar(ecx, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(ecx, ebx);
+    __ j(greater, &done, Label::kNear);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- edi    : target
+  //  -- eax    : args (a FixedArray built from argumentsList)
+  //  -- ebx    : len (number of elements to push from args)
+  //  -- edx    : new.target (checked to be constructor or undefined)
+  //  -- esp[0] : return address.
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    __ movd(xmm0, edx);
+    __ PopReturnAddressTo(edx);
+    __ Move(ecx, Immediate(0));
+    Label done, loop;
+    __ bind(&loop);
+    __ cmp(ecx, ebx);
+    __ j(equal, &done, Label::kNear);
+    __ Push(
+        FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+    __ inc(ecx);
+    __ jmp(&loop);
+    __ bind(&done);
+    __ PushReturnAddressFrom(edx);
+    __ movd(edx, xmm0);
+    __ Move(eax, ebx);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg
+// |  f()'s caller pc      <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ movzx_b(scratch1,
+             Operand::StaticVariable(is_tail_call_elimination_enabled));
+  __ cmp(scratch1, Immediate(0));
+  __ j(equal, &done, Label::kNear);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
+           Immediate(Smi::FromInt(StackFrame::STUB)));
+    __ j(not_equal, &no_interpreter_frame, Label::kNear);
+    __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(ebp, scratch2);
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(scratch1,
+         FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(
+      caller_args_count_reg,
+      FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, 0);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
+            Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+  __ j(not_zero, &class_constructor);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+                SharedFunctionInfo::kStrictModeByteOffset);
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
+            Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                      (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+  __ j(not_zero, &done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- eax : the number of arguments (not including the receiver)
+    //  -- edx : the shared function info.
+    //  -- edi : the function to call (checked to be a JSFunction)
+    //  -- esi : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(ecx);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+      __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+      __ j(above_equal, &done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy, Label::kNear);
+        __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+                         Label::kNear);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(ecx);
+        }
+        __ jmp(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(eax);
+        __ Push(eax);
+        __ Push(edi);
+        __ mov(eax, ecx);
+        __ Push(esi);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(esi);
+        __ mov(ecx, eax);
+        __ Pop(edi);
+        __ Pop(eax);
+        __ SmiUntag(eax);
+      }
+      __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the shared function info.
+  //  -- edi : the function to call (checked to be a JSFunction)
+  //  -- esi : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+    // Reload shared function info.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  }
+
+  __ mov(ebx,
+         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(ebx);
+  ParameterCount actual(eax);
+  ParameterCount expected(ebx);
+  __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ push(edi);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : new.target (only in case of [[Construct]])
+  //  -- edi : target (checked to be a JSBoundFunction)
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into ecx and length of that into ebx.
+  Label no_bound_arguments;
+  __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+  __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ SmiUntag(ebx);
+  __ test(ebx, ebx);
+  __ j(zero, &no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- eax : the number of arguments (not including the receiver)
+    //  -- edx : new.target (only in case of [[Construct]])
+    //  -- edi : target (checked to be a JSBoundFunction)
+    //  -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- ebx : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+      __ sub(esp, ecx);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+      __ j(greater, &done, Label::kNear);  // Signed comparison.
+      // Restore the stack pointer.
+      __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Adjust effective number of arguments to include return address.
+    __ inc(eax);
+
+    // Relocate arguments and return address down the stack.
+    {
+      Label loop;
+      __ Set(ecx, 0);
+      __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+      __ bind(&loop);
+      __ movd(xmm0, Operand(ebx, ecx, times_pointer_size, 0));
+      __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm0);
+      __ inc(ecx);
+      __ cmp(ecx, eax);
+      __ j(less, &loop);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+      __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+      __ SmiUntag(ebx);
+      __ bind(&loop);
+      __ dec(ebx);
+      __ movd(xmm0, FieldOperand(ecx, ebx, times_pointer_size,
+                                 FixedArray::kHeaderSize));
+      __ movd(Operand(esp, eax, times_pointer_size, 0), xmm0);
+      __ lea(eax, Operand(eax, 1));
+      __ j(greater, &loop);
+    }
+
+    // Adjust effective number of arguments (eax contains the number of
+    // arguments from the call plus return address plus the number of
+    // [[BoundArguments]]), so we need to subtract one for the return address.
+    __ dec(eax);
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(edi);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ecx, Operand::StaticVariable(ExternalReference(
+                  Builtins::kCall_ReceiverIsAny, masm->isolate())));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(edi, &non_callable);
+  __ bind(&non_smi);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+       RelocInfo::CODE_TARGET);
+  __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+       RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Call]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
+  __ j(zero, &non_callable);
+
+  __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+  __ j(not_equal, &non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ PushReturnAddressFrom(ecx);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ add(eax, Immediate(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver with the (original) target.
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (checked to be a constructor)
+  //  -- edi : the constructor to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // Calling convention for function specific ConstructStubs require
+  // ebx to contain either an AllocationSite or undefined.
+  __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (checked to be a constructor)
+  //  -- edi : the constructor to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(edi);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  {
+    Label done;
+    __ cmp(edi, edx);
+    __ j(not_equal, &done, Label::kNear);
+    __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+    __ bind(&done);
+  }
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ecx, Operand::StaticVariable(
+                  ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the constructor to call (checked to be a JSProxy)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  // Include the pushed new_target, constructor and the receiver.
+  __ add(eax, Immediate(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- edi : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(edi, &non_constructor, Label::kNear);
+
+  // Dispatch based on instance type.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Construct]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &non_constructor, Label::kNear);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
+       RelocInfo::CODE_TARGET);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : message_id as Smi
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in eax.
+  Label not_smi;
+  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+  __ Ret();
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : actual number of arguments
+  //  -- ebx : expected number of arguments
+  //  -- edx : new target (passed through to callee)
+  //  -- edi : function (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+  __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
+
+  Label enough, too_few;
+  __ cmp(eax, ebx);
+  __ j(less, &too_few);
+  __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(edi, Operand(ebp, eax, times_4, offset));
+    __ mov(eax, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(eax);
+    __ push(Operand(edi, 0));
+    __ sub(edi, Immediate(kPointerSize));
+    __ cmp(eax, ebx);
+    __ j(less, &copy);
+    // eax now contains the expected number of arguments.
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+    // Remember expected arguments in ecx.
+    __ mov(ecx, ebx);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(edi, Operand(ebp, eax, times_4, offset));
+    // ebx = expected - actual.
+    __ sub(ebx, eax);
+    // eax = -actual - 1
+    __ neg(eax);
+    __ sub(eax, Immediate(1));
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(eax);
+    __ push(Operand(edi, 0));
+    __ sub(edi, Immediate(kPointerSize));
+    __ test(eax, eax);
+    __ j(not_zero, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ bind(&fill);
+    __ inc(eax);
+    __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+    __ cmp(eax, ebx);
+    __ j(less, &fill);
+
+    // Restore expected arguments.
+    __ mov(eax, ecx);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  // Restore function pointer.
+  __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  // eax : expected number of arguments
+  // edx : new target (passed through to callee)
+  // edi : function (passed through to callee)
+  __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  __ call(ecx);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  __ jmp(ecx);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ int3();
+  }
+}
+
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Register scratch0, Register scratch1,
+                                    Label* receiver_check_failed) {
+  // If there is no signature, return the holder.
+  __ CompareRoot(FieldOperand(function_template_info,
+                              FunctionTemplateInfo::kSignatureOffset),
+                 Heap::kUndefinedValueRootIndex);
+  Label receiver_check_passed;
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // Walk the prototype chain.
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(scratch0, scratch0, scratch1);
+  __ CmpInstanceType(scratch1, JS_FUNCTION_TYPE);
+  Label next_prototype;
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Get the constructor's signature.
+  __ mov(scratch0,
+         FieldOperand(scratch0, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(scratch0,
+         FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ cmp(scratch0, FieldOperand(function_template_info,
+                                FunctionTemplateInfo::kSignatureOffset));
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(scratch0, &next_prototype, Label::kNear);
+  __ CmpObjectType(scratch0, FUNCTION_TEMPLATE_INFO_TYPE, scratch1);
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Otherwise load the parent function template and iterate.
+  __ mov(scratch0,
+         FieldOperand(scratch0, FunctionTemplateInfo::kParentTemplateOffset));
+  __ jmp(&function_template_loop, Label::kNear);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ test(FieldOperand(receiver, Map::kBitField3Offset),
+          Immediate(Map::HasHiddenPrototype::kMask));
+  __ j(zero, receiver_check_failed);
+
+  __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ jmp(&prototype_loop_start, Label::kNear);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                : number of arguments (not including the receiver)
+  //  -- edi                : callee
+  //  -- esi                : context
+  //  -- esp[0]             : return address
+  //  -- esp[4]             : last argument
+  //  -- ...
+  //  -- esp[eax * 4]       : first argument
+  //  -- esp[(eax + 1) * 4] : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ mov(ecx, Operand(esp, eax, times_pointer_size, kPCOnStackSize));
+  __ Push(eax);
+  CompatibleReceiverCheck(masm, ecx, ebx, edx, eax, &receiver_check_failed);
+  __ Pop(eax);
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ mov(edx, FieldOperand(ebx, FunctionTemplateInfo::kCallCodeOffset));
+  __ mov(edx, FieldOperand(edx, CallHandlerInfo::kFastHandlerOffset));
+  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(edx);
+
+  // Compatible receiver check failed: pop return address, arguments and
+  // receiver and throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  __ Pop(eax);
+  __ PopReturnAddressTo(ebx);
+  __ lea(eax, Operand(eax, times_pointer_size, 1 * kPointerSize));
+  __ add(esp, eax);
+  __ PushReturnAddressFrom(ebx);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+  }
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(eax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  Label skip;
+  // If the code object is null, just return to the caller.
+  __ cmp(eax, Immediate(0));
+  __ j(not_equal, &skip, Label::kNear);
+  __ ret(0);
+
+  __ bind(&skip);
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ leave();
+  }
+
+  // Load deoptimization data from the code object.
+  __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+                               DeoptimizationInputData::kOsrPcOffsetIndex) -
+                               kHeapObjectTag));
+  __ SmiUntag(ebx);
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+  // Overwrite the return address on the stack.
+  __ mov(Operand(esp, 0), eax);
+
+  // And "return" to the OSR entry point of the function.
+  __ ret(0);
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/builtins/mips/OWNERS b/src/builtins/mips/OWNERS
new file mode 100644
index 0000000..89455a4
--- /dev/null
+++ b/src/builtins/mips/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc
new file mode 100644
index 0000000..003eeb2
--- /dev/null
+++ b/src/builtins/mips/builtins-mips.cc
@@ -0,0 +1,3014 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : target
+  //  -- a3                 : new.target
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * agrc]       : receiver
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects a0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ Addu(a0, a0, num_extra_args + 1);
+
+  // Insert extra arguments.
+  __ SmiTag(a0);
+  __ Push(a0, a1, a3);
+  __ SmiUntag(a0);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             PROTECT, exit_frame_type == BUILTIN_EXIT);
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the InternalArray function from the native context.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the Array function from the native context.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(a2, t0);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, t0,
+              Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, t0,
+              Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // Tail call a stub.
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(a2, t0);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, t0,
+              Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, t0,
+              Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  // Tail call a stub.
+  __ mov(a3, a1);
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : function
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in t2 and the double value in f0.
+  __ LoadRoot(t2, root_index);
+  __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
+
+  Label done_loop, loop;
+  __ mov(a3, a0);
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ Subu(a3, a3, Operand(1));
+    __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+
+    // Load the next parameter tagged value into a2.
+    __ Lsa(at, sp, a3, kPointerSizeLog2);
+    __ lw(a2, MemOperand(at));
+
+    // Load the double value of the parameter into f2, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(a2, &convert_smi);
+    __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(a0);
+      __ SmiTag(a3);
+      __ EnterBuiltinFrame(cp, a1, a0);
+      __ Push(t2, a3);
+      __ mov(a0, a2);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mov(a2, v0);
+      __ Pop(t2, a3);
+      __ LeaveBuiltinFrame(cp, a1, a0);
+      __ SmiUntag(a3);
+      __ SmiUntag(a0);
+      {
+        // Restore the double accumulator value (f0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(t2, &restore_smi);
+        __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
+        __ jmp(&done_restore);
+        __ bind(&restore_smi);
+        __ SmiToDoubleFPURegister(t2, f0, t0);
+        __ bind(&done_restore);
+      }
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+    __ jmp(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDoubleFPURegister(a2, f2, t0);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with using Min/Max macro instructions the
+    // accumulator value on the left hand side (f0) and the next parameter value
+    // on the right hand side (f2).
+    // We need to work out which HeapNumber (or smi) the result came from.
+    Label compare_nan, set_value;
+    __ BranchF(nullptr, &compare_nan, eq, f0, f2);
+    __ Move(t0, t1, f0);
+    if (kind == MathMaxMinKind::kMin) {
+      __ MinNaNCheck_d(f0, f0, f2);
+    } else {
+      DCHECK(kind == MathMaxMinKind::kMax);
+      __ MaxNaNCheck_d(f0, f0, f2);
+    }
+    __ Move(at, t8, f0);
+    __ Branch(&set_value, ne, t0, Operand(at));
+    __ Branch(&set_value, ne, t1, Operand(t8));
+    __ jmp(&loop);
+    __ bind(&set_value);
+    __ mov(t2, a2);
+    __ jmp(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(t2, Heap::kNanValueRootIndex);
+    __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  // Drop all slots, including the receiver.
+  __ Addu(a0, a0, Operand(1));
+  __ Lsa(sp, sp, a0, kPointerSizeLog2);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, t2);  // In delay slot.
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into a0.
+  Label no_arguments;
+  {
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Subu(t1, a0, Operand(1));  // In delay slot.
+    __ mov(t0, a0);               // Store argc in t0.
+    __ Lsa(at, sp, t1, kPointerSizeLog2);
+    __ lw(a0, MemOperand(at));
+  }
+
+  // 2a. Convert first argument to number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ Lsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+
+  // 2b. No arguments, return +0.
+  __ bind(&no_arguments);
+  __ Move(v0, Smi::FromInt(0));
+  __ DropAndRet(1);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- a3                     : new target
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into a0.
+  {
+    Label no_arguments, done;
+    __ mov(t0, a0);  // Store argc in t0.
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Subu(t1, a0, Operand(1));  // In delay slot.
+    __ Lsa(at, sp, t1, kPointerSizeLog2);
+    __ lw(a0, MemOperand(at));
+    __ jmp(&done);
+    __ bind(&no_arguments);
+    __ Move(a0, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure a0 is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(a0, &done_convert);
+    __ GetObjectType(a0, a2, a2);
+    __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(t0);
+      __ EnterBuiltinFrame(cp, a1, t0);
+      __ Push(a3);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Move(a0, v0);
+      __ Pop(a3);
+      __ LeaveBuiltinFrame(cp, a1, t0);
+      __ SmiUntag(t0);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ Branch(&new_object, ne, a1, Operand(a3));
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+  __ jmp(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ Push(a0);  // first argument
+    __ CallStub(&stub);
+    __ Pop(a0);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+  __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Lsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into a0.
+  Label no_arguments;
+  {
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Subu(t1, a0, Operand(1));
+    __ mov(t0, a0);  // Store argc in t0.
+    __ Lsa(at, sp, t1, kPointerSizeLog2);
+    __ lw(a0, MemOperand(at));
+  }
+
+  // 2a. At least one argument, return a0 if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(a0, &to_string);
+    __ GetObjectType(a0, t1, t1);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ Subu(t1, t1, Operand(FIRST_NONSTRING_TYPE));
+    __ Branch(&symbol_descriptive_string, eq, t1, Operand(zero_reg));
+    __ Branch(&to_string, gt, t1, Operand(zero_reg));
+    __ mov(v0, a0);
+    __ jmp(&drop_frame_and_ret);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(v0, Heap::kempty_stringRootIndex);
+    __ DropAndRet(1);
+  }
+
+  // 3a. Convert a0 to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+  __ jmp(&drop_frame_and_ret);
+
+  // 3b. Convert symbol in a0 to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ Lsa(sp, sp, t0, kPointerSizeLog2);
+    __ Drop(1);
+    __ Push(a0);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Lsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- a3                     : new target
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into a0.
+  {
+    Label no_arguments, done;
+    __ mov(t0, a0);  // Store argc in t0.
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Subu(t1, a0, Operand(1));
+    __ Lsa(at, sp, t1, kPointerSizeLog2);
+    __ lw(a0, MemOperand(at));
+    __ jmp(&done);
+    __ bind(&no_arguments);
+    __ LoadRoot(a0, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure a0 is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(a0, &convert);
+    __ GetObjectType(a0, a2, a2);
+    __ And(t1, a2, Operand(kIsNotStringMask));
+    __ Branch(&done_convert, eq, t1, Operand(zero_reg));
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(t0);
+      __ EnterBuiltinFrame(cp, a1, t0);
+      __ Push(a3);
+      __ CallStub(&stub);
+      __ Move(a0, v0);
+      __ Pop(a3);
+      __ LeaveBuiltinFrame(cp, a1, t0);
+      __ SmiUntag(t0);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ Branch(&new_object, ne, a1, Operand(a3));
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+  __ jmp(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ Push(a0);  // first argument
+    __ CallStub(&stub);
+    __ Pop(a0);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+  __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Lsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+  __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(a0);
+    __ Push(a0, a1, a3, a1);
+
+    __ CallRuntime(function_id, 1);
+
+    // Restore target function and new target.
+    __ Pop(a0, a1, a3);
+    __ SmiUntag(a0);
+  }
+
+  __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(t0));
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- a2     : allocation site or undefined
+  //  -- a3     : new target
+  //  -- cp     : context
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(a2, t0);
+    __ SmiTag(a0);
+    __ Push(cp, a2, a0);
+
+    if (create_implicit_receiver) {
+      // Allocate the new receiver object.
+      __ Push(a1, a3);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(t4, v0);
+      __ Pop(a1, a3);
+
+      // ----------- S t a t e -------------
+      //  -- a1: constructor function
+      //  -- a3: new target
+      //  -- t0: newly allocated object
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ lw(a0, MemOperand(sp));
+    }
+
+    __ SmiUntag(a0);
+
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ Push(t4, t4);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
+
+    // Set up pointer to last argument.
+    __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    // a0: number of arguments
+    // a1: constructor function
+    // a2: address of last argument (caller sp)
+    // a3: new target
+    // t4: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ SmiTag(t4, a0);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ Lsa(t0, a2, t4, kPointerSizeLog2 - kSmiTagSize);
+    __ lw(t1, MemOperand(t0));
+    __ push(t1);
+    __ bind(&entry);
+    __ Addu(t4, t4, Operand(-2));
+    __ Branch(&loop, greater_equal, t4, Operand(zero_reg));
+
+    // Call the function.
+    // a0: number of arguments
+    // a1: constructor function
+    // a3: new target
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      // v0: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ JumpIfSmi(v0, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ GetObjectType(v0, a1, a3);
+      __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ lw(v0, MemOperand(sp));
+
+      // Remove receiver from the stack, remove caller arguments, and
+      // return.
+      __ bind(&exit);
+      // v0: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+    } else {
+      __ lw(a1, MemOperand(sp));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(v0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  __ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
+  __ Addu(sp, sp, kPointerSize);
+  if (create_implicit_receiver) {
+    __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+  }
+  __ Ret();
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ Push(a1);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers a2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+                                        IsTagged argc_is_tagged) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+  // Make a2 the space we have left. The stack might already be overflowed
+  // here which will cause a2 to become negative.
+  __ Subu(a2, sp, a2);
+  // Check if the arguments will overflow the stack.
+  if (argc_is_tagged == kArgcIsSmiTagged) {
+    __ sll(t3, argc, kPointerSizeLog2 - kSmiTagSize);
+  } else {
+    DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+    __ sll(t3, argc, kPointerSizeLog2);
+  }
+  // Signed comparison.
+  __ Branch(&okay, gt, a2, Operand(t3));
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from JSEntryStub::GenerateBody
+
+  // ----------- S t a t e -------------
+  //  -- a0: new.target
+  //  -- a1: function
+  //  -- a2: receiver_pointer
+  //  -- a3: argc
+  //  -- s0: argv
+  // -----------------------------------
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ li(cp, Operand(context_address));
+    __ lw(cp, MemOperand(cp));
+
+    // Push the function and the receiver onto the stack.
+    __ Push(a1, a2);
+
+    // Check if we have enough stack space to push all arguments.
+    // Clobbers a2.
+    Generate_CheckStackOverflow(masm, a3, kArgcIsUntaggedInt);
+
+    // Remember new.target.
+    __ mov(t1, a0);
+
+    // Copy arguments to the stack in a loop.
+    // a3: argc
+    // s0: argv, i.e. points to first arg
+    Label loop, entry;
+    __ Lsa(t2, s0, a3, kPointerSizeLog2);
+    __ b(&entry);
+    __ nop();  // Branch delay slot nop.
+    // t2 points past last arg.
+    __ bind(&loop);
+    __ lw(t0, MemOperand(s0));  // Read next parameter.
+    __ addiu(s0, s0, kPointerSize);
+    __ lw(t0, MemOperand(t0));  // Dereference handle.
+    __ push(t0);                // Push parameter.
+    __ bind(&entry);
+    __ Branch(&loop, ne, s0, Operand(t2));
+
+    // Setup new.target and argc.
+    __ mov(a0, a3);
+    __ mov(a3, t1);
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ mov(s1, t0);
+    __ mov(s2, t0);
+    __ mov(s3, t0);
+    __ mov(s4, t0);
+    __ mov(s5, t0);
+    // s6 holds the root address. Do not clobber.
+    // s7 is cp. Do not init.
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Leave internal frame.
+  }
+
+  __ Jump(ra);
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : the value to pass to the generator
+  //  -- a1 : the JSGeneratorObject to resume
+  //  -- a2 : the resume mode (tagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(a1);
+
+  // Store input value into generator object.
+  __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ li(t1, Operand(last_step_action));
+  __ lb(t1, MemOperand(t1));
+  __ Branch(&prepare_step_in_if_stepping, ge, t1, Operand(StepIn));
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ li(t1, Operand(debug_suspended_generator));
+  __ lw(t1, MemOperand(t1));
+  __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1));
+  __ bind(&stepping_prepared);
+
+  // Push receiver.
+  __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+  __ Push(t1);
+
+  // ----------- S t a t e -------------
+  //  -- a1    : the JSGeneratorObject to resume
+  //  -- a2    : the resume mode (tagged)
+  //  -- t0    : generator function
+  //  -- cp    : generator context
+  //  -- ra    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3,
+        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+    __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+  __ GetObjectType(a3, a3, a3);
+  __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+
+  // New-style (ignition/turbofan) generator object.
+  {
+    __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+          FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ SmiUntag(a0);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(a3, a1);
+    __ Move(a1, t0);
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    __ Jump(a2);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(ra, fp);
+    __ Move(fp, sp);
+    __ Push(cp, t0);
+
+    // Restore the operand stack.
+    __ lw(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+    __ lw(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
+    __ Addu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ Lsa(a3, a0, a3, kPointerSizeLog2 - 1);
+    {
+      Label done_loop, loop;
+      __ bind(&loop);
+      __ Branch(&done_loop, eq, a0, Operand(a3));
+      __ lw(t1, MemOperand(a0));
+      __ Push(t1);
+      __ Branch(USE_DELAY_SLOT, &loop);
+      __ addiu(a0, a0, kPointerSize);  // In delay slot.
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+    __ sw(t1, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+    __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(a2);
+    __ Addu(a3, a3, Operand(a2));
+    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ Move(v0, a1);  // Continuation expects generator object in v0.
+    __ Jump(a3);
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2, t0);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ lw(args_count,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lw(args_count,
+        FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ Addu(sp, sp, args_count);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o a1: the JS function object being called.
+//   o a3: the new target
+//   o cp: our context
+//   o fp: the caller's frame pointer
+//   o sp: stack pointer
+//   o ra: return address
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ PushStandardFrame(a1);
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  DCHECK(!debug_info.is(a0));
+  __ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+  __ Branch(&load_debug_bytecode_array, ne, debug_info,
+            Operand(DebugInfo::uninitialized()));
+  __ lw(kInterpreterBytecodeArrayRegister,
+        FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ lw(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
+  __ Branch(&switch_to_different_code_kind, ne, a0,
+            Operand(masm->CodeObject()));  // Self-reference to this code.
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ SmiTst(kInterpreterBytecodeArrayRegister, t0);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
+              Operand(zero_reg));
+    __ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
+              Operand(BYTECODE_ARRAY_TYPE));
+  }
+
+  // Load initial bytecode offset.
+  __ li(kInterpreterBytecodeOffsetRegister,
+        Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(t0, kInterpreterBytecodeOffsetRegister);
+  __ Push(a3, kInterpreterBytecodeArrayRegister, t0);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size from the BytecodeArray object.
+    __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                              BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ Subu(t1, sp, Operand(t0));
+    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+    __ Branch(&ok, hs, t1, Operand(a2));
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    Label loop_header;
+    Label loop_check;
+    __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+    __ Branch(&loop_check);
+    __ bind(&loop_header);
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    __ push(t1);
+    // Continue loop if not done.
+    __ bind(&loop_check);
+    __ Subu(t0, t0, Operand(kPointerSize));
+    __ Branch(&loop_header, ge, t0, Operand(zero_reg));
+  }
+
+  // Load accumulator and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ Addu(a0, kInterpreterBytecodeArrayRegister,
+          kInterpreterBytecodeOffsetRegister);
+  __ lbu(a0, MemOperand(a0));
+  __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
+  __ lw(at, MemOperand(at));
+  __ Call(at);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in v0.
+  LeaveInterpreterFrame(masm, t0);
+  __ Jump(ra);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  __ lw(kInterpreterBytecodeArrayRegister,
+        FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ Branch(&bytecode_array_loaded);
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
+  __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(a1, t0, t1);
+  __ Jump(t0);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ lw(kContextRegister,
+        MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, t0);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(v0);
+
+    // Push function as argument and compile for baseline.
+    __ push(a1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(v0);
+  }
+  __ Jump(ra);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a2 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- a1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ Addu(a3, a0, Operand(1));  // Add one for receiver.
+  __ sll(a3, a3, kPointerSizeLog2);
+  __ Subu(a3, a2, Operand(a3));
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Branch(&loop_check);
+  __ bind(&loop_header);
+  __ lw(t0, MemOperand(a2));
+  __ Addu(a2, a2, Operand(-kPointerSize));
+  __ push(t0);
+  __ bind(&loop_check);
+  __ Branch(&loop_header, gt, a2, Operand(a3));
+
+  // Call the target.
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- a0 : argument count (not including receiver)
+  // -- a3 : new target
+  // -- a1 : constructor to call
+  // -- a2 : address of the first argument
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ sll(t0, a0, kPointerSizeLog2);
+  __ Subu(t0, a2, Operand(t0));
+
+  // Push a slot for the receiver.
+  __ push(zero_reg);
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Branch(&loop_check);
+  __ bind(&loop_header);
+  __ lw(t1, MemOperand(a2));
+  __ Addu(a2, a2, Operand(-kPointerSize));
+  __ push(t1);
+  __ bind(&loop_check);
+  __ Branch(&loop_header, gt, a2, Operand(t0));
+
+  // Call the constructor with a0, a1, and a3 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+  __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
+                          Code::kHeaderSize - kHeapObjectTag));
+
+  // Initialize the dispatch table register.
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ lw(kInterpreterBytecodeArrayRegister,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ SmiTst(kInterpreterBytecodeArrayRegister, at);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
+              Operand(zero_reg));
+    __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
+              Operand(BYTECODE_ARRAY_TYPE));
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ lw(kInterpreterBytecodeOffsetRegister,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ Addu(a1, kInterpreterBytecodeArrayRegister,
+          kInterpreterBytecodeOffsetRegister);
+  __ lbu(a1, MemOperand(a1));
+  __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
+  __ lw(a1, MemOperand(a1));
+  __ Jump(a1);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register argument_count = a0;
+  Register closure = a1;
+  Register new_target = a3;
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = a0;
+  Register index = a2;
+  __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+
+  // Find literals.
+  // a3  : native context
+  // a2  : length / index
+  // a0  : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = a3;
+  __ lw(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = a1;
+  Register array_pointer = t1;
+
+  // Does the native context match?
+  __ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(array_pointer, map, Operand(at));
+  __ lw(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousContext));
+  __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+  // OSR id set to none?
+  __ lw(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+  // Literals available?
+  __ lw(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ lw(t0, MemOperand(sp, 0));
+  __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+  __ push(index);
+  __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = t0;
+  __ lw(entry,
+        FieldMemOperand(array_pointer,
+                        SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, t1);
+
+  // Link the closure into the optimized function list.
+  // t0 : code entry
+  // a3 : native context
+  // a1 : closure
+  __ lw(t1,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ sw(closure,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Save closure before the write barrier.
+  __ mov(t1, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
+                            kRAHasNotBeenSaved, kDontSaveFPRegs);
+  __ mov(closure, t1);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ Jump(entry);
+
+  __ bind(&loop_bottom);
+  __ Subu(index, index,
+          Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                        SharedFunctionInfo::kSharedCodeIndex));
+  __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ And(t1, t1, Operand(Code::KindField::kMask));
+  __ srl(t1, t1, Code::KindField::kShift);
+  __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
+  // Yes, install the full code.
+  __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, t1);
+  __ Jump(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a1 : new target (preserved for callee)
+  //  -- a3 : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ Move(t4, a0);
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(a0);
+    __ Push(a0, a1, a3, a1);
+
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ Branch(&over, ne, t4, Operand(j));
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ lw(t4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+                                     i * kPointerSize));
+        __ push(t4);
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(v0, &failed);
+
+    __ Drop(2);
+    __ pop(t4);
+    __ SmiUntag(t4);
+    scope.GenerateLeaveFrame();
+
+    __ Addu(t4, t4, Operand(1));
+    __ Lsa(sp, sp, t4, kPointerSizeLog2);
+    __ Ret();
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ Pop(a0, a1, a3);
+    __ SmiUntag(a0);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Set a0 to point to the head of the PlatformCodeAge sequence.
+  __ Subu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   a0 - contains return address (beginning of patch sequence)
+  //   a1 - isolate
+  //   a3 - new target
+  RegList saved_regs =
+      (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(saved_regs);
+  __ PrepareCallCFunction(2, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ MultiPop(saved_regs);
+  __ Jump(a0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+
+  // Set a0 to point to the head of the PlatformCodeAge sequence.
+  __ Subu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   a0 - contains return address (beginning of patch sequence)
+  //   a1 - isolate
+  //   a3 - new target
+  RegList saved_regs =
+      (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(saved_regs);
+  __ PrepareCallCFunction(2, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(saved_regs);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PushStandardFrame(a1);
+
+  // Jump to point after the code-age stub.
+  __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
+  __ Jump(a0);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ MultiPush(kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ MultiPop(kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ Addu(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ Jump(ra);                             // Jump to miss handler
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+  }
+
+  // Get the full codegen state from the stack and untag it -> t2.
+  __ lw(t2, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(t2);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ Branch(&with_tos_register, ne, t2,
+            Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
+  __ Ret(USE_DELAY_SLOT);
+  // Safe to fill delay slot Addu will emit one instruction.
+  __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+
+  __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
+  __ lw(v0, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(&unknown_state, ne, t2,
+            Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
+
+  __ Ret(USE_DELAY_SLOT);
+  // Safe to fill delay slot Addu will emit one instruction.
+  __ Addu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// Clobbers {t2, t3, t4, t5}.
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Label* receiver_check_failed) {
+  Register signature = t2;
+  Register map = t3;
+  Register constructor = t4;
+  Register scratch = t5;
+
+  // If there is no signature, return the holder.
+  __ lw(signature, FieldMemOperand(function_template_info,
+                                   FunctionTemplateInfo::kSignatureOffset));
+  Label receiver_check_passed;
+  __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+                &receiver_check_passed);
+
+  // Walk the prototype chain.
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(constructor, map, scratch, scratch);
+  Label next_prototype;
+  __ Branch(&next_prototype, ne, scratch, Operand(JS_FUNCTION_TYPE));
+  Register type = constructor;
+  __ lw(type,
+        FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ Branch(&receiver_check_passed, eq, signature, Operand(type),
+            USE_DELAY_SLOT);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype);
+  __ GetObjectType(type, scratch, scratch);
+  __ Branch(&next_prototype, ne, scratch, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+
+  // Otherwise load the parent function template and iterate.
+  __ lw(type,
+        FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+  __ Branch(&function_template_loop);
+
+  // Load the next prototype and iterate.
+  __ bind(&next_prototype);
+  __ lw(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+  __ DecodeField<Map::HasHiddenPrototype>(scratch);
+  __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+  __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+  __ Branch(&prototype_loop_start);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : callee
+  //  -- ra                 : return address
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ lw(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(t1, FieldMemOperand(t1, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ Lsa(t8, sp, a0, kPointerSizeLog2);
+  __ lw(t0, MemOperand(t8));
+  CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ lw(t2, FieldMemOperand(t1, FunctionTemplateInfo::kCallCodeOffset));
+  __ lw(t2, FieldMemOperand(t2, CallHandlerInfo::kFastHandlerOffset));
+  __ Addu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(t2);
+
+  // Compatible receiver check failed: throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  // Drop the arguments (including the receiver);
+  __ Addu(t8, t8, Operand(kPointerSize));
+  __ addu(sp, t8, zero_reg);
+  __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(a0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  // If the code object is null, just return to the caller.
+  __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ LeaveFrame(StackFrame::STUB);
+  }
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+  __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+                               DeoptimizationInputData::kOsrPcOffsetIndex) -
+                               kHeapObjectTag));
+  __ SmiUntag(a1);
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+  __ addu(v0, v0, a1);
+  __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+
+  // And "return" to the OSR entry point of the function.
+  __ Ret();
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- a0    : number of arguments
+  //  -- a1    : function
+  //  -- cp    : context
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // 1. Pop receiver into a0 and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ Pop(a0);
+    __ JumpIfSmi(a0, &receiver_not_date);
+    __ GetObjectType(a0, t0, t0);
+    __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ Ret(USE_DELAY_SLOT);
+    __ lw(v0, FieldMemOperand(a0, JSDate::kValueOffset));  // In delay slot.
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+      __ lw(a1, MemOperand(a1));
+      __ lw(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
+      __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
+      __ Ret(USE_DELAY_SLOT);
+      __ lw(v0, FieldMemOperand(
+                    a0, JSDate::kValueOffset +
+                            field_index * kPointerSize));  // In delay slot.
+      __ bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, t0);
+    __ li(a1, Operand(Smi::FromInt(field_index)));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ Ret();
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(a0);
+    __ Move(a0, Smi::FromInt(0));
+    __ EnterBuiltinFrame(cp, a1, a0);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : argc
+  //  -- sp[0] : argArray
+  //  -- sp[4] : thisArg
+  //  -- sp[8] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into a1, argArray into a0 (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label no_arg;
+    Register scratch = t0;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ mov(a3, a2);
+    // Lsa() cannot be used hare as scratch value used later.
+    __ sll(scratch, a0, kPointerSizeLog2);
+    __ Addu(a0, sp, Operand(scratch));
+    __ lw(a1, MemOperand(a0));  // receiver
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ lw(a2, MemOperand(a0));  // thisArg
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ lw(a3, MemOperand(a0));  // argArray
+    __ bind(&no_arg);
+    __ Addu(sp, sp, Operand(scratch));
+    __ sw(a2, MemOperand(sp));
+    __ mov(a0, a3);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a0    : argArray
+  //  -- a1    : receiver
+  //  -- sp[0] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(a1, &receiver_not_callable);
+  __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+  __ And(t0, t0, Operand(1 << Map::kIsCallable));
+  __ Branch(&receiver_not_callable, eq, t0, Operand(zero_reg));
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
+  __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ mov(a0, zero_reg);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ sw(a1, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // a0: actual number of arguments
+  {
+    Label done;
+    __ Branch(&done, ne, a0, Operand(zero_reg));
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ Addu(a0, a0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack.
+  // a0: actual number of arguments
+  __ Lsa(at, sp, a0, kPointerSizeLog2);
+  __ lw(a1, MemOperand(at));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // a0: actual number of arguments
+  // a1: function
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ Lsa(a2, sp, a0, kPointerSizeLog2);
+
+    __ bind(&loop);
+    __ lw(at, MemOperand(a2, -kPointerSize));
+    __ sw(at, MemOperand(a2));
+    __ Subu(a2, a2, Operand(kPointerSize));
+    __ Branch(&loop, ne, a2, Operand(sp));
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ Subu(a0, a0, Operand(1));
+    __ Pop();
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : argc
+  //  -- sp[0]  : argumentsList
+  //  -- sp[4]  : thisArgument
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label no_arg;
+    Register scratch = t0;
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ mov(a2, a1);
+    __ mov(a3, a1);
+    __ sll(scratch, a0, kPointerSizeLog2);
+    __ mov(a0, scratch);
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(zero_reg));
+    __ Addu(a0, sp, Operand(a0));
+    __ lw(a1, MemOperand(a0));  // target
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ lw(a2, MemOperand(a0));  // thisArgument
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ lw(a3, MemOperand(a0));  // argumentsList
+    __ bind(&no_arg);
+    __ Addu(sp, sp, Operand(scratch));
+    __ sw(a2, MemOperand(sp));
+    __ mov(a0, a3);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a0    : argumentsList
+  //  -- a1    : target
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(a1, &target_not_callable);
+  __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+  __ And(t0, t0, Operand(1 << Map::kIsCallable));
+  __ Branch(&target_not_callable, eq, t0, Operand(zero_reg));
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ sw(a1, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : argc
+  //  -- sp[0]  : new.target (optional)
+  //  -- sp[4]  : argumentsList
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+  // new.target into a3 (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label no_arg;
+    Register scratch = t0;
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ mov(a2, a1);
+    // Lsa() cannot be used hare as scratch value used later.
+    __ sll(scratch, a0, kPointerSizeLog2);
+    __ Addu(a0, sp, Operand(scratch));
+    __ sw(a2, MemOperand(a0));  // receiver
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ lw(a1, MemOperand(a0));  // target
+    __ mov(a3, a1);             // new.target defaults to target
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ lw(a2, MemOperand(a0));  // argumentsList
+    __ Subu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ lw(a3, MemOperand(a0));  // new.target
+    __ bind(&no_arg);
+    __ Addu(sp, sp, Operand(scratch));
+    __ mov(a0, a2);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a0    : argumentsList
+  //  -- a3    : new.target
+  //  -- a1    : target
+  //  -- sp[0] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(a1, &target_not_constructor);
+  __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+  __ And(t0, t0, Operand(1 << Map::kIsConstructor));
+  __ Branch(&target_not_constructor, eq, t0, Operand(zero_reg));
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(a3, &new_target_not_constructor);
+  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+  __ And(t0, t0, Operand(1 << Map::kIsConstructor));
+  __ Branch(&new_target_not_constructor, eq, t0, Operand(zero_reg));
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ sw(a1, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ sw(a3, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- a0 : actual number of arguments
+  //  -- a1 : function (passed through to callee)
+  //  -- a2 : expected number of arguments
+  //  -- a3 : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
+  // Make t1 the space we have left. The stack might already be overflowed
+  // here which will cause t1 to become negative.
+  __ subu(t1, sp, t1);
+  // Check if the arguments will overflow the stack.
+  __ sll(at, a2, kPointerSizeLog2);
+  // Signed comparison.
+  __ Branch(stack_overflow, le, t1, Operand(at));
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ sll(a0, a0, kSmiTagSize);
+  __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
+  __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                          kPointerSize));
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                             kPointerSize)));
+  __ mov(sp, fp);
+  __ MultiPop(fp.bit() | ra.bit());
+  __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
+  // Adjust for the receiver.
+  __ Addu(sp, sp, Operand(kPointerSize));
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : argumentsList
+  //  -- a1    : target
+  //  -- a3    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(a0, &create_runtime);
+
+    // Load the map of argumentsList into a2.
+    __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+
+    // Load native context into t0.
+    __ lw(t0, NativeContextMemOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ lw(at, ContextMemOperand(t0, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ Branch(&create_arguments, eq, a2, Operand(at));
+    __ lw(at, ContextMemOperand(t0, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ Branch(&create_arguments, eq, a2, Operand(at));
+
+    // Check if argumentsList is a fast JSArray.
+    __ lw(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(a1, a3, a0);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ mov(a0, v0);
+      __ Pop(a1, a3);
+      __ lw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+      __ SmiUntag(a2);
+    }
+    __ Branch(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ lw(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
+    __ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
+    __ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
+    __ Branch(&create_runtime, ne, a2, Operand(at));
+    __ SmiUntag(a2);
+    __ mov(a0, t0);
+    __ Branch(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ lw(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(a2);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
+    __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+    __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+    __ SmiUntag(a2);
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(t0, Heap::kRealStackLimitRootIndex);
+    // Make ip the space we have left. The stack might already be overflowed
+    // here which will cause ip to become negative.
+    __ Subu(t0, sp, t0);
+    // Check if the arguments will overflow the stack.
+    __ sll(at, a2, kPointerSizeLog2);
+    __ Branch(&done, gt, t0, Operand(at));  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a1    : target
+  //  -- a0    : args (a FixedArray built from argumentsList)
+  //  -- a2    : len (number of elements to push from args)
+  //  -- a3    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    __ mov(t0, zero_reg);
+    Label done, loop;
+    __ bind(&loop);
+    __ Branch(&done, eq, t0, Operand(a2));
+    __ Lsa(at, a0, t0, kPointerSizeLog2);
+    __ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+    __ Push(at);
+    __ Addu(t0, t0, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done);
+    __ Move(a0, t0);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    Label construct;
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&construct, ne, a3, Operand(at));
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+    __ bind(&construct);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ li(at, Operand(is_tail_call_elimination_enabled));
+  __ lb(scratch1, MemOperand(at));
+  __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ lw(scratch3,
+          MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ Branch(&no_interpreter_frame, ne, scratch3,
+              Operand(Smi::FromInt(StackFrame::STUB)));
+    __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(scratch3,
+        MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ Branch(&no_arguments_adaptor, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ lw(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ Branch(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ lw(scratch1,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  __ lw(scratch1,
+        FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(caller_args_count_reg,
+        FieldMemOperand(scratch1,
+                        SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
+  __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+  __ Branch(&class_constructor, ne, at, Operand(zero_reg));
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+                SharedFunctionInfo::kStrictModeByteOffset);
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+  __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                         (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+  __ Branch(&done_convert, ne, at, Operand(zero_reg));
+  {
+    // ----------- S t a t e -------------
+    //  -- a0 : the number of arguments (not including the receiver)
+    //  -- a1 : the function to call (checked to be a JSFunction)
+    //  -- a2 : the shared function info.
+    //  -- cp : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(a3);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ Lsa(at, sp, a0, kPointerSizeLog2);
+      __ lw(a3, MemOperand(at));
+      __ JumpIfSmi(a3, &convert_to_object);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ GetObjectType(a3, t0, t0);
+      __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy);
+        __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(a3);
+        }
+        __ Branch(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameScope scope(masm, StackFrame::INTERNAL);
+        __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+        __ Push(a0, a1);
+        __ mov(a0, a3);
+        __ Push(cp);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(cp);
+        __ mov(a3, v0);
+        __ Pop(a0, a1);
+        __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+      }
+      __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ Lsa(at, sp, a0, kPointerSizeLog2);
+    __ sw(a3, MemOperand(at));
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSFunction)
+  //  -- a2 : the shared function info.
+  //  -- cp : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
+  __ lw(a2,
+        FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ sra(a2, a2, kSmiTagSize);  // Un-tag.
+  ParameterCount actual(a0);
+  ParameterCount expected(a2);
+  __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ Push(a1);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(a1);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  {
+    __ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+    __ Lsa(t0, sp, a0, kPointerSizeLog2);
+    __ sw(at, MemOperand(t0));
+  }
+
+  // Load [[BoundArguments]] into a2 and length of that into t0.
+  __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+  __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ SmiUntag(t0);
+
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+  //  -- t0 : the number of [[BoundArguments]]
+  // -----------------------------------
+
+  // Reserve stack space for the [[BoundArguments]].
+  {
+    Label done;
+    __ sll(t1, t0, kPointerSizeLog2);
+    __ Subu(sp, sp, Operand(t1));
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+    __ Branch(&done, gt, sp, Operand(at));  // Signed comparison.
+    // Restore the stack pointer.
+    __ Addu(sp, sp, Operand(t1));
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ EnterFrame(StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowStackOverflow);
+    }
+    __ bind(&done);
+  }
+
+  // Relocate arguments down the stack.
+  {
+    Label loop, done_loop;
+    __ mov(t1, zero_reg);
+    __ bind(&loop);
+    __ Branch(&done_loop, gt, t1, Operand(a0));
+    __ Lsa(t2, sp, t0, kPointerSizeLog2);
+    __ lw(at, MemOperand(t2));
+    __ Lsa(t2, sp, t1, kPointerSizeLog2);
+    __ sw(at, MemOperand(t2));
+    __ Addu(t0, t0, Operand(1));
+    __ Addu(t1, t1, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Copy [[BoundArguments]] to the stack (below the arguments).
+  {
+    Label loop, done_loop;
+    __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+    __ SmiUntag(t0);
+    __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ bind(&loop);
+    __ Subu(t0, t0, Operand(1));
+    __ Branch(&done_loop, lt, t0, Operand(zero_reg));
+    __ Lsa(t1, a2, t0, kPointerSizeLog2);
+    __ lw(at, MemOperand(t1));
+    __ Lsa(t1, sp, a0, kPointerSizeLog2);
+    __ sw(at, MemOperand(t1));
+    __ Addu(a0, a0, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+                                      masm->isolate())));
+  __ lw(at, MemOperand(at));
+  __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(a1, &non_callable);
+  __ bind(&non_smi);
+  __ GetObjectType(a1, t1, t2);
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+  // Check if target has a [[Call]] internal method.
+  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+  __ And(t1, t1, Operand(1 << Map::kIsCallable));
+  __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
+  __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ Push(a1);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ Addu(a0, a0, 2);
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver with the (original) target.
+  __ Lsa(at, sp, a0, kPointerSizeLog2);
+  __ sw(a1, MemOperand(at));
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (checked to be a JSFunction)
+  //  -- a3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // Calling convention for function specific ConstructStubs require
+  // a2 to contain either an AllocationSite or undefined.
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
+  __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  //  -- a3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertBoundFunction(a1);
+
+  // Load [[BoundArguments]] into a2 and length of that into t0.
+  __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+  __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ SmiUntag(t0);
+
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+  //  -- a3 : the new target (checked to be a constructor)
+  //  -- t0 : the number of [[BoundArguments]]
+  // -----------------------------------
+
+  // Reserve stack space for the [[BoundArguments]].
+  {
+    Label done;
+    __ sll(t1, t0, kPointerSizeLog2);
+    __ Subu(sp, sp, Operand(t1));
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+    __ Branch(&done, gt, sp, Operand(at));  // Signed comparison.
+    // Restore the stack pointer.
+    __ Addu(sp, sp, Operand(t1));
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ EnterFrame(StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowStackOverflow);
+    }
+    __ bind(&done);
+  }
+
+  // Relocate arguments down the stack.
+  {
+    Label loop, done_loop;
+    __ mov(t1, zero_reg);
+    __ bind(&loop);
+    __ Branch(&done_loop, ge, t1, Operand(a0));
+    __ Lsa(t2, sp, t0, kPointerSizeLog2);
+    __ lw(at, MemOperand(t2));
+    __ Lsa(t2, sp, t1, kPointerSizeLog2);
+    __ sw(at, MemOperand(t2));
+    __ Addu(t0, t0, Operand(1));
+    __ Addu(t1, t1, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Copy [[BoundArguments]] to the stack (below the arguments).
+  {
+    Label loop, done_loop;
+    __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+    __ SmiUntag(t0);
+    __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ bind(&loop);
+    __ Subu(t0, t0, Operand(1));
+    __ Branch(&done_loop, lt, t0, Operand(zero_reg));
+    __ Lsa(t1, a2, t0, kPointerSizeLog2);
+    __ lw(at, MemOperand(t1));
+    __ Lsa(t1, sp, a0, kPointerSizeLog2);
+    __ sw(at, MemOperand(t1));
+    __ Addu(a0, a0, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  {
+    Label skip_load;
+    __ Branch(&skip_load, ne, a1, Operand(a3));
+    __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+    __ bind(&skip_load);
+  }
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ lw(at, MemOperand(at));
+  __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (checked to be a JSProxy)
+  //  -- a3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ Push(a1, a3);
+  // Include the pushed new_target, constructor and the receiver.
+  __ Addu(a0, a0, Operand(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (can be any Object)
+  //  -- a3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(a1, &non_constructor);
+
+  // Dispatch based on instance type.
+  __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
+  // Check if target has a [[Construct]] internal method.
+  __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+  __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+  __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+          eq, t2, Operand(JS_PROXY_TYPE));
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ Lsa(at, sp, a0, kPointerSizeLog2);
+    __ sw(a1, MemOperand(at));
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Push(a0);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(a0, a1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : message_id as Smi
+  //  -- ra : return address
+  // -----------------------------------
+  __ Push(a0);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in a0.
+  Label not_smi;
+  __ JumpIfNotSmi(a0, &not_smi);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // State setup as expected by MacroAssembler::InvokePrologue.
+  // ----------- S t a t e -------------
+  //  -- a0: actual arguments count
+  //  -- a1: function (passed through to callee)
+  //  -- a2: expected arguments count
+  //  -- a3: new target (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+
+  Label enough, too_few;
+  __ Branch(&dont_adapt_arguments, eq, a2,
+            Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  // We use Uless as the number of argument should always be greater than 0.
+  __ Branch(&too_few, Uless, a0, Operand(a2));
+
+  {  // Enough parameters: actual >= expected.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into a0 and copy end address into t1.
+    __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address.
+    __ sll(t1, a2, kPointerSizeLog2);
+    __ subu(t1, a0, t1);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    // t1: copy end address
+
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));
+    __ push(t0);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t1));
+    __ addiu(a0, a0, -kPointerSize);  // In delay slot.
+
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into a0 and copy end address into t3.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address. Also adjust for return address.
+    __ Addu(t3, fp, kPointerSize);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    // t3: copy end address
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));  // Adjusted above for return addr and receiver.
+    __ Subu(sp, sp, kPointerSize);
+    __ Subu(a0, a0, kPointerSize);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
+    __ sw(t0, MemOperand(sp));  // In the delay slot.
+
+    // Fill the remaining expected arguments with undefined.
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ sll(t2, a2, kPointerSizeLog2);
+    __ Subu(t1, fp, Operand(t2));
+    // Adjust for frame.
+    __ Subu(t1, t1, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                            2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ Subu(sp, sp, kPointerSize);
+    __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(t1));
+    __ sw(t0, MemOperand(sp));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ mov(a0, a2);
+  // a0 : expected number of arguments
+  // a1 : function (passed through to callee)
+  // a3 : new target (passed through to callee)
+  __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Call(t0);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+  // -------------------------------------------
+  // Don't adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Jump(t0);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ break_(0xCC);
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/builtins/mips64/OWNERS b/src/builtins/mips64/OWNERS
new file mode 100644
index 0000000..89455a4
--- /dev/null
+++ b/src/builtins/mips64/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
new file mode 100644
index 0000000..cbdb5c3
--- /dev/null
+++ b/src/builtins/mips64/builtins-mips64.cc
@@ -0,0 +1,3009 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : target
+  //  -- a3                 : new.target
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[8 * (argc - 1)] : first argument
+  //  -- sp[8 * agrc]       : receiver
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects a0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ Daddu(a0, a0, num_extra_args + 1);
+
+  // Insert extra arguments.
+  __ SmiTag(a0);
+  __ Push(a0, a1, a3);
+  __ SmiUntag(a0);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             PROTECT, exit_frame_type == BUILTIN_EXIT);
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the InternalArray function from the native context.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the Array function from the native context.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(a2, a4);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, a4,
+              Operand(zero_reg));
+    __ GetObjectType(a2, a3, a4);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, a4,
+              Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // Tail call a stub.
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(a2, a4);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, a4,
+              Operand(zero_reg));
+    __ GetObjectType(a2, a3, a4);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, a4,
+              Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  // Tail call a stub.
+  __ mov(a3, a1);
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : function
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in t1 and the double value in f0.
+  __ LoadRoot(t1, root_index);
+  __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
+
+  Label done_loop, loop;
+  __ mov(a3, a0);
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ Dsubu(a3, a3, Operand(1));
+    __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+
+    // Load the next parameter tagged value into a2.
+    __ Dlsa(at, sp, a3, kPointerSizeLog2);
+    __ ld(a2, MemOperand(at));
+
+    // Load the double value of the parameter into f2, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(a2, &convert_smi);
+    __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ JumpIfRoot(a4, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(a0);
+      __ SmiTag(a3);
+      __ EnterBuiltinFrame(cp, a1, a0);
+      __ Push(t1, a3);
+      __ mov(a0, a2);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mov(a2, v0);
+      __ Pop(t1, a3);
+      __ LeaveBuiltinFrame(cp, a1, a0);
+      __ SmiUntag(a3);
+      __ SmiUntag(a0);
+      {
+        // Restore the double accumulator value (f0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(t1, &restore_smi);
+        __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
+        __ jmp(&done_restore);
+        __ bind(&restore_smi);
+        __ SmiToDoubleFPURegister(t1, f0, a4);
+        __ bind(&done_restore);
+      }
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+    __ jmp(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDoubleFPURegister(a2, f2, a4);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with using Min/Max macro instructions the
+    // accumulator value on the left hand side (f0) and the next parameter value
+    // on the right hand side (f2).
+    // We need to work out which HeapNumber (or smi) the result came from.
+    Label compare_nan;
+    __ BranchF(nullptr, &compare_nan, eq, f0, f2);
+    __ Move(a4, f0);
+    if (kind == MathMaxMinKind::kMin) {
+      __ MinNaNCheck_d(f0, f0, f2);
+    } else {
+      DCHECK(kind == MathMaxMinKind::kMax);
+      __ MaxNaNCheck_d(f0, f0, f2);
+    }
+    __ Move(at, f0);
+    __ Branch(&loop, eq, a4, Operand(at));
+    __ mov(t1, a2);
+    __ jmp(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(t1, Heap::kNanValueRootIndex);
+    __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  // Drop all slots, including the receiver.
+  __ Daddu(a0, a0, Operand(1));
+  __ Dlsa(sp, sp, a0, kPointerSizeLog2);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, t1);  // In delay slot.
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into a0 and get rid of the rest (including the
+  // receiver).
+  Label no_arguments;
+  {
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Dsubu(t1, a0, Operand(1));  // In delay slot.
+    __ mov(t0, a0);                // Store argc in t0.
+    __ Dlsa(at, sp, t1, kPointerSizeLog2);
+    __ ld(a0, MemOperand(at));
+  }
+
+  // 2a. Convert first argument to number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+
+  // 2b. No arguments, return +0.
+  __ bind(&no_arguments);
+  __ Move(v0, Smi::FromInt(0));
+  __ DropAndRet(1);
+}
+
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- a3                     : new target
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into a0 and get rid of the rest (including the
+  // receiver).
+  {
+    Label no_arguments, done;
+    __ mov(t0, a0);  // Store argc in t0.
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Dsubu(a0, a0, Operand(1));  // In delay slot.
+    __ Dlsa(at, sp, a0, kPointerSizeLog2);
+    __ ld(a0, MemOperand(at));
+    __ jmp(&done);
+    __ bind(&no_arguments);
+    __ Move(a0, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure a0 is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(a0, &done_convert);
+    __ GetObjectType(a0, a2, a2);
+    __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(t0);
+      __ EnterBuiltinFrame(cp, a1, t0);
+      __ Push(a3);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Move(a0, v0);
+      __ Pop(a3);
+      __ LeaveBuiltinFrame(cp, a1, t0);
+      __ SmiUntag(t0);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ Branch(&new_object, ne, a1, Operand(a3));
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+  __ jmp(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ Push(a0);
+    __ CallStub(&stub);
+    __ Pop(a0);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+  __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into a0 and get rid of the rest (including the
+  // receiver).
+  Label no_arguments;
+  {
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Dsubu(t1, a0, Operand(1));  // In delay slot.
+    __ mov(t0, a0);                // Store argc in t0.
+    __ Dlsa(at, sp, t1, kPointerSizeLog2);
+    __ ld(a0, MemOperand(at));
+  }
+
+  // 2a. At least one argument, return a0 if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(a0, &to_string);
+    __ GetObjectType(a0, t1, t1);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ Subu(t1, t1, Operand(FIRST_NONSTRING_TYPE));
+    __ Branch(&symbol_descriptive_string, eq, t1, Operand(zero_reg));
+    __ Branch(&to_string, gt, t1, Operand(zero_reg));
+    __ mov(v0, a0);
+    __ jmp(&drop_frame_and_ret);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(v0, Heap::kempty_stringRootIndex);
+    __ DropAndRet(1);
+  }
+
+  // 3a. Convert a0 to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+  __ jmp(&drop_frame_and_ret);
+
+  // 3b. Convert symbol in a0 to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+    __ Drop(1);
+    __ Push(a0);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+}
+
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- a3                     : new target
+  //  -- cp                     : context
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into a0 and get rid of the rest (including the
+  // receiver).
+  {
+    Label no_arguments, done;
+    __ mov(t0, a0);  // Store argc in t0.
+    __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+    __ Dsubu(a0, a0, Operand(1));
+    __ Dlsa(at, sp, a0, kPointerSizeLog2);
+    __ ld(a0, MemOperand(at));
+    __ jmp(&done);
+    __ bind(&no_arguments);
+    __ LoadRoot(a0, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure a0 is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(a0, &convert);
+    __ GetObjectType(a0, a2, a2);
+    __ And(t1, a2, Operand(kIsNotStringMask));
+    __ Branch(&done_convert, eq, t1, Operand(zero_reg));
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(t0);
+      __ EnterBuiltinFrame(cp, a1, t0);
+      __ Push(a3);
+      __ CallStub(&stub);
+      __ Move(a0, v0);
+      __ Pop(a3);
+      __ LeaveBuiltinFrame(cp, a1, t0);
+      __ SmiUntag(t0);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ Branch(&new_object, ne, a1, Operand(a3));
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+  __ jmp(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(t0);
+    __ EnterBuiltinFrame(cp, a1, t0);
+    __ Push(a0);
+    __ CallStub(&stub);
+    __ Pop(a0);
+    __ LeaveBuiltinFrame(cp, a1, t0);
+    __ SmiUntag(t0);
+  }
+  __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+    __ DropAndRet(1);
+  }
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+  __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push a copy of the function onto the stack.
+    // Push a copy of the target function and the new target.
+    __ SmiTag(a0);
+    __ Push(a0, a1, a3, a1);
+
+    __ CallRuntime(function_id, 1);
+    // Restore target function and new target.
+    __ Pop(a0, a1, a3);
+    __ SmiUntag(a0);
+  }
+
+  __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ LoadRoot(a4, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(a4));
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- a2     : allocation site or undefined
+  //  -- a3     : new target
+  //  -- cp     : context
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(a2, t0);
+    __ SmiTag(a0);
+    __ Push(cp, a2, a0);
+
+    if (create_implicit_receiver) {
+      __ Push(a1, a3);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(t0, v0);
+      __ Pop(a1, a3);
+
+      // ----------- S t a t e -------------
+      // -- a1: constructor function
+      // -- a3: new target
+      // -- t0: newly allocated object
+      // -----------------------------------
+      __ ld(a0, MemOperand(sp));
+    }
+    __ SmiUntag(a0);
+
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ Push(t0, t0);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
+
+    // Set up pointer to last argument.
+    __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    // a0: number of arguments
+    // a1: constructor function
+    // a2: address of last argument (caller sp)
+    // a3: new target
+    // t0: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ mov(t0, a0);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ Dlsa(a4, a2, t0, kPointerSizeLog2);
+    __ ld(a5, MemOperand(a4));
+    __ push(a5);
+    __ bind(&entry);
+    __ Daddu(t0, t0, Operand(-1));
+    __ Branch(&loop, greater_equal, t0, Operand(zero_reg));
+
+    // Call the function.
+    // a0: number of arguments
+    // a1: constructor function
+    // a3: new target
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      // v0: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ JumpIfSmi(v0, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ GetObjectType(v0, a1, a3);
+      __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ ld(v0, MemOperand(sp));
+
+      // Remove receiver from the stack, remove caller arguments, and
+      // return.
+      __ bind(&exit);
+      // v0: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+    } else {
+      __ ld(a1, MemOperand(sp));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(v0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  __ SmiScale(a4, a1, kPointerSizeLog2);
+  __ Daddu(sp, sp, a4);
+  __ Daddu(sp, sp, kPointerSize);
+  if (create_implicit_receiver) {
+    __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+  }
+  __ Ret();
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : the value to pass to the generator
+  //  -- a1 : the JSGeneratorObject to resume
+  //  -- a2 : the resume mode (tagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(a1);
+
+  // Store input value into generator object.
+  __ sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ li(a5, Operand(last_step_action));
+  __ lb(a5, MemOperand(a5));
+  __ Branch(&prepare_step_in_if_stepping, ge, a5, Operand(StepIn));
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ li(a5, Operand(debug_suspended_generator));
+  __ ld(a5, MemOperand(a5));
+  __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
+  __ bind(&stepping_prepared);
+
+  // Push receiver.
+  __ ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+  __ Push(a5);
+
+  // ----------- S t a t e -------------
+  //  -- a1    : the JSGeneratorObject to resume
+  //  -- a2    : the resume mode (tagged)
+  //  -- a4    : generator function
+  //  -- cp    : generator context
+  //  -- ra    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3,
+        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ Dsubu(a3, a3, Operand(1));
+    __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+  __ GetObjectType(a3, a3, a3);
+  __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+
+  // New-style (ignition/turbofan) generator object.
+  {
+    __ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+          FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(a3, a1);
+    __ Move(a1, a4);
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    __ Jump(a2);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(ra, fp);
+    __ Move(fp, sp);
+    __ Push(cp, a4);
+
+    // Restore the operand stack.
+    __ ld(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+    __ ld(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
+    __ SmiUntag(a3);
+    __ Daddu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ Dlsa(a3, a0, a3, kPointerSizeLog2);
+    {
+      Label done_loop, loop;
+      __ bind(&loop);
+      __ Branch(&done_loop, eq, a0, Operand(a3));
+      __ ld(a5, MemOperand(a0));
+      __ Push(a5);
+      __ Branch(USE_DELAY_SLOT, &loop);
+      __ daddiu(a0, a0, kPointerSize);  // In delay slot.
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
+    __ sd(a5, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+    __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+    __ Daddu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(a2);
+    __ Daddu(a3, a3, Operand(a2));
+    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ Move(v0, a1);  // Continuation expects generator object in v0.
+    __ Jump(a3);
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2, a4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(a1, a2);
+  }
+  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ Push(a1);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers a2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+                                        IsTagged argc_is_tagged) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+  // Make a2 the space we have left. The stack might already be overflowed
+  // here which will cause r2 to become negative.
+  __ dsubu(a2, sp, a2);
+  // Check if the arguments will overflow the stack.
+  if (argc_is_tagged == kArgcIsSmiTagged) {
+    __ SmiScale(a7, v0, kPointerSizeLog2);
+  } else {
+    DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+    __ dsll(a7, argc, kPointerSizeLog2);
+  }
+  __ Branch(&okay, gt, a2, Operand(a7));  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from JSEntryStub::GenerateBody
+
+  // ----------- S t a t e -------------
+  //  -- a0: new.target
+  //  -- a1: function
+  //  -- a2: receiver_pointer
+  //  -- a3: argc
+  //  -- s0: argv
+  // -----------------------------------
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ li(cp, Operand(context_address));
+    __ ld(cp, MemOperand(cp));
+
+    // Push the function and the receiver onto the stack.
+    __ Push(a1, a2);
+
+    // Check if we have enough stack space to push all arguments.
+    // Clobbers a2.
+    Generate_CheckStackOverflow(masm, a3, kArgcIsUntaggedInt);
+
+    // Remember new.target.
+    __ mov(a5, a0);
+
+    // Copy arguments to the stack in a loop.
+    // a3: argc
+    // s0: argv, i.e. points to first arg
+    Label loop, entry;
+    __ Dlsa(a6, s0, a3, kPointerSizeLog2);
+    __ b(&entry);
+    __ nop();  // Branch delay slot nop.
+    // a6 points past last arg.
+    __ bind(&loop);
+    __ ld(a4, MemOperand(s0));  // Read next parameter.
+    __ daddiu(s0, s0, kPointerSize);
+    __ ld(a4, MemOperand(a4));  // Dereference handle.
+    __ push(a4);                // Push parameter.
+    __ bind(&entry);
+    __ Branch(&loop, ne, s0, Operand(a6));
+
+    // Setup new.target and argc.
+    __ mov(a0, a3);
+    __ mov(a3, a5);
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+    __ mov(s1, a4);
+    __ mov(s2, a4);
+    __ mov(s3, a4);
+    __ mov(s4, a4);
+    __ mov(s5, a4);
+    // s6 holds the root address. Do not clobber.
+    // s7 is cp. Do not init.
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Leave internal frame.
+  }
+  __ Jump(ra);
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ ld(args_count,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ Daddu(sp, sp, args_count);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o a1: the JS function object being called.
+//   o a3: the new target
+//   o cp: our context
+//   o fp: the caller's frame pointer
+//   o sp: stack pointer
+//   o ra: return address
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ PushStandardFrame(a1);
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  DCHECK(!debug_info.is(a0));
+  __ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+  __ Branch(&load_debug_bytecode_array, ne, debug_info,
+            Operand(DebugInfo::uninitialized()));
+  __ ld(kInterpreterBytecodeArrayRegister,
+        FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ ld(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
+  __ Branch(&switch_to_different_code_kind, ne, a0,
+            Operand(masm->CodeObject()));  // Self-reference to this code.
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ SmiTst(kInterpreterBytecodeArrayRegister, a4);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
+              Operand(zero_reg));
+    __ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
+              Operand(BYTECODE_ARRAY_TYPE));
+  }
+
+  // Load initial bytecode offset.
+  __ li(kInterpreterBytecodeOffsetRegister,
+        Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
+  __ Push(a3, kInterpreterBytecodeArrayRegister, a4);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size (word) from the BytecodeArray object.
+    __ lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                              BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ Dsubu(a5, sp, Operand(a4));
+    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+    __ Branch(&ok, hs, a5, Operand(a2));
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    Label loop_header;
+    Label loop_check;
+    __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+    __ Branch(&loop_check);
+    __ bind(&loop_header);
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    __ push(a5);
+    // Continue loop if not done.
+    __ bind(&loop_check);
+    __ Dsubu(a4, a4, Operand(kPointerSize));
+    __ Branch(&loop_header, ge, a4, Operand(zero_reg));
+  }
+
+  // Load accumulator and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ Daddu(a0, kInterpreterBytecodeArrayRegister,
+           kInterpreterBytecodeOffsetRegister);
+  __ lbu(a0, MemOperand(a0));
+  __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
+  __ ld(at, MemOperand(at));
+  __ Call(at);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in v0.
+  LeaveInterpreterFrame(masm, t0);
+  __ Jump(ra);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  __ ld(kInterpreterBytecodeArrayRegister,
+        FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ Branch(&bytecode_array_loaded);
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
+  __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(a1, a4, a5);
+  __ Jump(a4);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ ld(kContextRegister,
+        MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, t0);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(v0);
+
+    // Push function as argument and compile for baseline.
+    __ push(a1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(v0);
+  }
+  __ Jump(ra);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a2 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- a1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ Daddu(a3, a0, Operand(1));  // Add one for receiver.
+  __ dsll(a3, a3, kPointerSizeLog2);
+  __ Dsubu(a3, a2, Operand(a3));
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Branch(&loop_check);
+  __ bind(&loop_header);
+  __ ld(t0, MemOperand(a2));
+  __ Daddu(a2, a2, Operand(-kPointerSize));
+  __ push(t0);
+  __ bind(&loop_check);
+  __ Branch(&loop_header, gt, a2, Operand(a3));
+
+  // Call the target.
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- a0 : argument count (not including receiver)
+  // -- a3 : new target
+  // -- a1 : constructor to call
+  // -- a2 : address of the first argument
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ dsll(t0, a0, kPointerSizeLog2);
+  __ Dsubu(t0, a2, Operand(t0));
+
+  // Push a slot for the receiver.
+  __ push(zero_reg);
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Branch(&loop_check);
+  __ bind(&loop_header);
+  __ ld(t1, MemOperand(a2));
+  __ Daddu(a2, a2, Operand(-kPointerSize));
+  __ push(t1);
+  __ bind(&loop_check);
+  __ Branch(&loop_header, gt, a2, Operand(t0));
+
+  // Call the constructor with a0, a1, and a3 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+  __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
+                           Code::kHeaderSize - kHeapObjectTag));
+
+  // Initialize the dispatch table register.
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ ld(kInterpreterBytecodeArrayRegister,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ SmiTst(kInterpreterBytecodeArrayRegister, at);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
+              Operand(zero_reg));
+    __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
+              Operand(BYTECODE_ARRAY_TYPE));
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ ld(kInterpreterBytecodeOffsetRegister,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ Daddu(a1, kInterpreterBytecodeArrayRegister,
+           kInterpreterBytecodeOffsetRegister);
+  __ lbu(a1, MemOperand(a1));
+  __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
+  __ ld(a1, MemOperand(a1));
+  __ Jump(a1);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register argument_count = a0;
+  Register closure = a1;
+  Register new_target = a3;
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = a0;
+  Register index = a2;
+  __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+
+  // Find literals.
+  // a3  : native context
+  // a2  : length / index
+  // a0  : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = a3;
+  __ ld(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = a1;
+  Register array_pointer = a5;
+
+  // Does the native context match?
+  __ SmiScale(at, index, kPointerSizeLog2);
+  __ Daddu(array_pointer, map, Operand(at));
+  __ ld(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousContext));
+  __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+  // OSR id set to none?
+  __ ld(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+  // Literals available?
+  __ ld(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ ld(a4, MemOperand(sp, 0));
+  __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
+  __ push(index);
+  __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = a4;
+  __ ld(entry,
+        FieldMemOperand(array_pointer,
+                        SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, a5);
+
+  // Link the closure into the optimized function list.
+  // a4 : code entry
+  // a3 : native context
+  // a1 : closure
+  __ ld(a5,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ sd(closure,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Save closure before the write barrier.
+  __ mov(a5, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
+                            kRAHasNotBeenSaved, kDontSaveFPRegs);
+  __ mov(closure, a5);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ Jump(entry);
+
+  __ bind(&loop_bottom);
+  __ Dsubu(index, index,
+           Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                        SharedFunctionInfo::kSharedCodeIndex));
+  __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ And(a5, a5, Operand(Code::KindField::kMask));
+  __ dsrl(a5, a5, Code::KindField::kShift);
+  __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
+  // Yes, install the full code.
+  __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, a5);
+  __ Jump(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a1 : new target (preserved for callee)
+  //  -- a3 : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ Move(t2, a0);
+    __ SmiTag(a0);
+    __ Push(a0, a1, a3, a1);
+
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ Branch(&over, ne, t2, Operand(j));
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ ld(t2, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+                                     i * kPointerSize));
+        __ push(t2);
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(v0, &failed);
+
+    __ Drop(2);
+    __ pop(t2);
+    __ SmiUntag(t2);
+    scope.GenerateLeaveFrame();
+
+    __ Daddu(t2, t2, Operand(1));
+    __ Dlsa(sp, sp, t2, kPointerSizeLog2);
+    __ Ret();
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ Pop(a0, a1, a3);
+    __ SmiUntag(a0);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Set a0 to point to the head of the PlatformCodeAge sequence.
+  __ Dsubu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   a0 - contains return address (beginning of patch sequence)
+  //   a1 - isolate
+  //   a3 - new target
+  RegList saved_regs =
+      (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(saved_regs);
+  __ PrepareCallCFunction(2, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ MultiPop(saved_regs);
+  __ Jump(a0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+
+  // Set a0 to point to the head of the PlatformCodeAge sequence.
+  __ Dsubu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   a0 - contains return address (beginning of patch sequence)
+  //   a1 - isolate
+  //   a3 - new target
+  RegList saved_regs =
+      (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(saved_regs);
+  __ PrepareCallCFunction(2, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(saved_regs);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PushStandardFrame(a1);
+
+  // Jump to point after the code-age stub.
+  __ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength)));
+  __ Jump(a0);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ MultiPush(kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ MultiPop(kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ Daddu(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ Jump(ra);                              // Jump to miss handler
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+  }
+
+  // Get the full codegen state from the stack and untag it -> a6.
+  __ ld(a6, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(a6);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ Branch(
+      &with_tos_register, ne, a6,
+      Operand(static_cast<int64_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
+  __ Ret(USE_DELAY_SLOT);
+  // Safe to fill delay slot Addu will emit one instruction.
+  __ Daddu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+
+  __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
+  __ ld(v0, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(
+      &unknown_state, ne, a6,
+      Operand(static_cast<int64_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
+
+  __ Ret(USE_DELAY_SLOT);
+  // Safe to fill delay slot Addu will emit one instruction.
+  __ Daddu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// Clobbers {t2, t3, a4, a5}.
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Label* receiver_check_failed) {
+  Register signature = t2;
+  Register map = t3;
+  Register constructor = a4;
+  Register scratch = a5;
+
+  // If there is no signature, return the holder.
+  __ ld(signature, FieldMemOperand(function_template_info,
+                                   FunctionTemplateInfo::kSignatureOffset));
+  Label receiver_check_passed;
+  __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+                &receiver_check_passed);
+
+  // Walk the prototype chain.
+  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(constructor, map, scratch, scratch);
+  Label next_prototype;
+  __ Branch(&next_prototype, ne, scratch, Operand(JS_FUNCTION_TYPE));
+  Register type = constructor;
+  __ ld(type,
+        FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ Branch(&receiver_check_passed, eq, signature, Operand(type),
+            USE_DELAY_SLOT);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype);
+  __ GetObjectType(type, scratch, scratch);
+  __ Branch(&next_prototype, ne, scratch, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+
+  // Otherwise load the parent function template and iterate.
+  __ ld(type,
+        FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+  __ Branch(&function_template_loop);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ lwu(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+  __ DecodeField<Map::HasHiddenPrototype>(scratch);
+  __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+
+  __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ Branch(&prototype_loop_start);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : callee
+  //  -- ra                 : return address
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[8 * (argc - 1)] : first argument
+  //  -- sp[8 * argc]       : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ ld(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(t1, FieldMemOperand(t1, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check
+  Label receiver_check_failed;
+  __ Dlsa(t8, sp, a0, kPointerSizeLog2);
+  __ ld(t0, MemOperand(t8));
+  CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ ld(t2, FieldMemOperand(t1, FunctionTemplateInfo::kCallCodeOffset));
+  __ ld(t2, FieldMemOperand(t2, CallHandlerInfo::kFastHandlerOffset));
+  __ Daddu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(t2);
+
+  // Compatible receiver check failed: throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  // Drop the arguments (including the receiver);
+  __ Daddu(t8, t8, Operand(kPointerSize));
+  __ daddu(sp, t8, zero_reg);
+  __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(a0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  // If the code object is null, just return to the caller.
+  __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ LeaveFrame(StackFrame::STUB);
+  }
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+  __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+                               DeoptimizationInputData::kOsrPcOffsetIndex) -
+                               kHeapObjectTag));
+  __ SmiUntag(a1);
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+  __ daddu(v0, v0, a1);
+  __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+
+  // And "return" to the OSR entry point of the function.
+  __ Ret();
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments
+  //  -- a1                 : function
+  //  -- cp                 : context
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // 1. Pop receiver into a0 and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ Pop(a0);
+    __ JumpIfSmi(a0, &receiver_not_date);
+    __ GetObjectType(a0, t0, t0);
+    __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ Ret(USE_DELAY_SLOT);
+    __ ld(v0, FieldMemOperand(a0, JSDate::kValueOffset));  // In delay slot.
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+      __ ld(a1, MemOperand(a1));
+      __ ld(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
+      __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
+      __ Ret(USE_DELAY_SLOT);
+      __ ld(v0, FieldMemOperand(
+                    a0, JSDate::kValueOffset +
+                            field_index * kPointerSize));  // In delay slot.
+      __ bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, t0);
+    __ li(a1, Operand(Smi::FromInt(field_index)));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ Ret();
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(a0);
+    __ Move(a0, Smi::FromInt(0));
+    __ EnterBuiltinFrame(cp, a1, a0);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : argc
+  //  -- sp[0] : argArray
+  //  -- sp[4] : thisArg
+  //  -- sp[8] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into a1, argArray into a0 (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label no_arg;
+    Register scratch = a4;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ mov(a3, a2);
+    // Dlsa() cannot be used hare as scratch value used later.
+    __ dsll(scratch, a0, kPointerSizeLog2);
+    __ Daddu(a0, sp, Operand(scratch));
+    __ ld(a1, MemOperand(a0));  // receiver
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ ld(a2, MemOperand(a0));  // thisArg
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ ld(a3, MemOperand(a0));  // argArray
+    __ bind(&no_arg);
+    __ Daddu(sp, sp, Operand(scratch));
+    __ sd(a2, MemOperand(sp));
+    __ mov(a0, a3);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a0    : argArray
+  //  -- a1    : receiver
+  //  -- sp[0] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(a1, &receiver_not_callable);
+  __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+  __ And(a4, a4, Operand(1 << Map::kIsCallable));
+  __ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
+  __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ mov(a0, zero_reg);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ sd(a1, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // a0: actual number of arguments
+  {
+    Label done;
+    __ Branch(&done, ne, a0, Operand(zero_reg));
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ Daddu(a0, a0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack.
+  // a0: actual number of arguments
+  __ Dlsa(at, sp, a0, kPointerSizeLog2);
+  __ ld(a1, MemOperand(at));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // a0: actual number of arguments
+  // a1: function
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ Dlsa(a2, sp, a0, kPointerSizeLog2);
+
+    __ bind(&loop);
+    __ ld(at, MemOperand(a2, -kPointerSize));
+    __ sd(at, MemOperand(a2));
+    __ Dsubu(a2, a2, Operand(kPointerSize));
+    __ Branch(&loop, ne, a2, Operand(sp));
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ Dsubu(a0, a0, Operand(1));
+    __ Pop();
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : argc
+  //  -- sp[0]  : argumentsList
+  //  -- sp[4]  : thisArgument
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label no_arg;
+    Register scratch = a4;
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ mov(a2, a1);
+    __ mov(a3, a1);
+    __ dsll(scratch, a0, kPointerSizeLog2);
+    __ mov(a0, scratch);
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(zero_reg));
+    __ Daddu(a0, sp, Operand(a0));
+    __ ld(a1, MemOperand(a0));  // target
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ ld(a2, MemOperand(a0));  // thisArgument
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ ld(a3, MemOperand(a0));  // argumentsList
+    __ bind(&no_arg);
+    __ Daddu(sp, sp, Operand(scratch));
+    __ sd(a2, MemOperand(sp));
+    __ mov(a0, a3);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a0    : argumentsList
+  //  -- a1    : target
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(a1, &target_not_callable);
+  __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+  __ And(a4, a4, Operand(1 << Map::kIsCallable));
+  __ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ sd(a1, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : argc
+  //  -- sp[0]  : new.target (optional)
+  //  -- sp[4]  : argumentsList
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+  // new.target into a3 (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label no_arg;
+    Register scratch = a4;
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ mov(a2, a1);
+    // Dlsa() cannot be used hare as scratch value used later.
+    __ dsll(scratch, a0, kPointerSizeLog2);
+    __ Daddu(a0, sp, Operand(scratch));
+    __ sd(a2, MemOperand(a0));  // receiver
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ ld(a1, MemOperand(a0));  // target
+    __ mov(a3, a1);             // new.target defaults to target
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ ld(a2, MemOperand(a0));  // argumentsList
+    __ Dsubu(a0, a0, Operand(kPointerSize));
+    __ Branch(&no_arg, lt, a0, Operand(sp));
+    __ ld(a3, MemOperand(a0));  // new.target
+    __ bind(&no_arg);
+    __ Daddu(sp, sp, Operand(scratch));
+    __ mov(a0, a2);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a0    : argumentsList
+  //  -- a3    : new.target
+  //  -- a1    : target
+  //  -- sp[0] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(a1, &target_not_constructor);
+  __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+  __ And(a4, a4, Operand(1 << Map::kIsConstructor));
+  __ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(a3, &new_target_not_constructor);
+  __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+  __ And(a4, a4, Operand(1 << Map::kIsConstructor));
+  __ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ sd(a1, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ sd(a3, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- a0 : actual number of arguments
+  //  -- a1 : function (passed through to callee)
+  //  -- a2 : expected number of arguments
+  //  -- a3 : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
+  // Make a5 the space we have left. The stack might already be overflowed
+  // here which will cause a5 to become negative.
+  __ dsubu(a5, sp, a5);
+  // Check if the arguments will overflow the stack.
+  __ dsll(at, a2, kPointerSizeLog2);
+  // Signed comparison.
+  __ Branch(stack_overflow, le, a5, Operand(at));
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // __ sll(a0, a0, kSmiTagSize);
+  __ dsll32(a0, a0, 0);
+  __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
+  __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                           kPointerSize));
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                             kPointerSize)));
+  __ mov(sp, fp);
+  __ MultiPop(fp.bit() | ra.bit());
+  __ SmiScale(a4, a1, kPointerSizeLog2);
+  __ Daddu(sp, sp, a4);
+  // Adjust for the receiver.
+  __ Daddu(sp, sp, Operand(kPointerSize));
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : argumentsList
+  //  -- a1    : target
+  //  -- a3    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(a0, &create_runtime);
+
+    // Load the map of argumentsList into a2.
+    __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+
+    // Load native context into a4.
+    __ ld(a4, NativeContextMemOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ ld(at, ContextMemOperand(a4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ Branch(&create_arguments, eq, a2, Operand(at));
+    __ ld(at, ContextMemOperand(a4, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ Branch(&create_arguments, eq, a2, Operand(at));
+
+    // Check if argumentsList is a fast JSArray.
+    __ ld(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(a1, a3, a0);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ mov(a0, v0);
+      __ Pop(a1, a3);
+      __ ld(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+      __ SmiUntag(a2);
+    }
+    __ Branch(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ ld(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
+    __ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
+    __ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
+    __ Branch(&create_runtime, ne, a2, Operand(at));
+    __ SmiUntag(a2);
+    __ mov(a0, a4);
+    __ Branch(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ ld(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(a2);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
+    __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ ld(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+    __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+    __ SmiUntag(a2);
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(a4, Heap::kRealStackLimitRootIndex);
+    // Make ip the space we have left. The stack might already be overflowed
+    // here which will cause ip to become negative.
+    __ Dsubu(a4, sp, a4);
+    // Check if the arguments will overflow the stack.
+    __ dsll(at, a2, kPointerSizeLog2);
+    __ Branch(&done, gt, a4, Operand(at));  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- a1    : target
+  //  -- a0    : args (a FixedArray built from argumentsList)
+  //  -- a2    : len (number of elements to push from args)
+  //  -- a3    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    __ mov(a4, zero_reg);
+    Label done, loop;
+    __ bind(&loop);
+    __ Branch(&done, eq, a4, Operand(a2));
+    __ Dlsa(at, a0, a4, kPointerSizeLog2);
+    __ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+    __ Push(at);
+    __ Daddu(a4, a4, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done);
+    __ Move(a0, a4);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    Label construct;
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&construct, ne, a3, Operand(at));
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+    __ bind(&construct);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ li(at, Operand(is_tail_call_elimination_enabled));
+  __ lb(scratch1, MemOperand(at));
+  __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ ld(scratch3,
+          MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ Branch(&no_interpreter_frame, ne, scratch3,
+              Operand(Smi::FromInt(StackFrame::STUB)));
+    __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(scratch3,
+        MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ Branch(&no_arguments_adaptor, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(fp, scratch2);
+  __ ld(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ Branch(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ ld(scratch1,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  __ ld(scratch1,
+        FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(caller_args_count_reg,
+        FieldMemOperand(scratch1,
+                        SharedFunctionInfo::kFormalParameterCountOffset));
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that function is not a "classConstructor".
+  Label class_constructor;
+  __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
+  __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+  __ Branch(&class_constructor, ne, at, Operand(zero_reg));
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+                SharedFunctionInfo::kStrictModeByteOffset);
+  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+  __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                         (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+  __ Branch(&done_convert, ne, at, Operand(zero_reg));
+  {
+    // ----------- S t a t e -------------
+    //  -- a0 : the number of arguments (not including the receiver)
+    //  -- a1 : the function to call (checked to be a JSFunction)
+    //  -- a2 : the shared function info.
+    //  -- cp : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(a3);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ Dlsa(at, sp, a0, kPointerSizeLog2);
+      __ ld(a3, MemOperand(at));
+      __ JumpIfSmi(a3, &convert_to_object);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ GetObjectType(a3, a4, a4);
+      __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy);
+        __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(a3);
+        }
+        __ Branch(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(a0);
+        __ Push(a0, a1);
+        __ mov(a0, a3);
+        __ Push(cp);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(cp);
+        __ mov(a3, v0);
+        __ Pop(a0, a1);
+        __ SmiUntag(a0);
+      }
+      __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ Dlsa(at, sp, a0, kPointerSizeLog2);
+    __ sd(a3, MemOperand(at));
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSFunction)
+  //  -- a2 : the shared function info.
+  //  -- cp : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
+  __ lw(a2,
+        FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+  ParameterCount actual(a0);
+  ParameterCount expected(a2);
+  __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ Push(a1);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(a1);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  {
+    __ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+    __ Dlsa(a4, sp, a0, kPointerSizeLog2);
+    __ sd(at, MemOperand(a4));
+  }
+
+  // Load [[BoundArguments]] into a2 and length of that into a4.
+  __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+  __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ SmiUntag(a4);
+
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+  //  -- a4 : the number of [[BoundArguments]]
+  // -----------------------------------
+
+  // Reserve stack space for the [[BoundArguments]].
+  {
+    Label done;
+    __ dsll(a5, a4, kPointerSizeLog2);
+    __ Dsubu(sp, sp, Operand(a5));
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+    __ Branch(&done, gt, sp, Operand(at));  // Signed comparison.
+    // Restore the stack pointer.
+    __ Daddu(sp, sp, Operand(a5));
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ EnterFrame(StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowStackOverflow);
+    }
+    __ bind(&done);
+  }
+
+  // Relocate arguments down the stack.
+  {
+    Label loop, done_loop;
+    __ mov(a5, zero_reg);
+    __ bind(&loop);
+    __ Branch(&done_loop, gt, a5, Operand(a0));
+    __ Dlsa(a6, sp, a4, kPointerSizeLog2);
+    __ ld(at, MemOperand(a6));
+    __ Dlsa(a6, sp, a5, kPointerSizeLog2);
+    __ sd(at, MemOperand(a6));
+    __ Daddu(a4, a4, Operand(1));
+    __ Daddu(a5, a5, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Copy [[BoundArguments]] to the stack (below the arguments).
+  {
+    Label loop, done_loop;
+    __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+    __ SmiUntag(a4);
+    __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ bind(&loop);
+    __ Dsubu(a4, a4, Operand(1));
+    __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+    __ Dlsa(a5, a2, a4, kPointerSizeLog2);
+    __ ld(at, MemOperand(a5));
+    __ Dlsa(a5, sp, a0, kPointerSizeLog2);
+    __ sd(at, MemOperand(a5));
+    __ Daddu(a0, a0, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+                                      masm->isolate())));
+  __ ld(at, MemOperand(at));
+  __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(a1, &non_callable);
+  __ bind(&non_smi);
+  __ GetObjectType(a1, t1, t2);
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+  // Check if target has a [[Call]] internal method.
+  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+  __ And(t1, t1, Operand(1 << Map::kIsCallable));
+  __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
+  __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ Push(a1);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ Daddu(a0, a0, 2);
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver with the (original) target.
+  __ Dlsa(at, sp, a0, kPointerSizeLog2);
+  __ sd(a1, MemOperand(at));
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (checked to be a JSFunction)
+  //  -- a3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // Calling convention for function specific ConstructStubs require
+  // a2 to contain either an AllocationSite or undefined.
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+  __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  //  -- a3 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertBoundFunction(a1);
+
+  // Load [[BoundArguments]] into a2 and length of that into a4.
+  __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+  __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ SmiUntag(a4);
+
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the function to call (checked to be a JSBoundFunction)
+  //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+  //  -- a3 : the new target (checked to be a constructor)
+  //  -- a4 : the number of [[BoundArguments]]
+  // -----------------------------------
+
+  // Reserve stack space for the [[BoundArguments]].
+  {
+    Label done;
+    __ dsll(a5, a4, kPointerSizeLog2);
+    __ Dsubu(sp, sp, Operand(a5));
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+    __ Branch(&done, gt, sp, Operand(at));  // Signed comparison.
+    // Restore the stack pointer.
+    __ Daddu(sp, sp, Operand(a5));
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ EnterFrame(StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowStackOverflow);
+    }
+    __ bind(&done);
+  }
+
+  // Relocate arguments down the stack.
+  {
+    Label loop, done_loop;
+    __ mov(a5, zero_reg);
+    __ bind(&loop);
+    __ Branch(&done_loop, ge, a5, Operand(a0));
+    __ Dlsa(a6, sp, a4, kPointerSizeLog2);
+    __ ld(at, MemOperand(a6));
+    __ Dlsa(a6, sp, a5, kPointerSizeLog2);
+    __ sd(at, MemOperand(a6));
+    __ Daddu(a4, a4, Operand(1));
+    __ Daddu(a5, a5, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Copy [[BoundArguments]] to the stack (below the arguments).
+  {
+    Label loop, done_loop;
+    __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+    __ SmiUntag(a4);
+    __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ bind(&loop);
+    __ Dsubu(a4, a4, Operand(1));
+    __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+    __ Dlsa(a5, a2, a4, kPointerSizeLog2);
+    __ ld(at, MemOperand(a5));
+    __ Dlsa(a5, sp, a0, kPointerSizeLog2);
+    __ sd(at, MemOperand(a5));
+    __ Daddu(a0, a0, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  {
+    Label skip_load;
+    __ Branch(&skip_load, ne, a1, Operand(a3));
+    __ ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+    __ bind(&skip_load);
+  }
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ ld(at, MemOperand(at));
+  __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (checked to be a JSProxy)
+  //  -- a3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ Push(a1, a3);
+  // Include the pushed new_target, constructor and the receiver.
+  __ Daddu(a0, a0, Operand(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (can be any Object)
+  //  -- a3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(a1, &non_constructor);
+
+  // Dispatch based on instance type.
+  __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
+  // Check if target has a [[Construct]] internal method.
+  __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+  __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+  __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+          RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+          eq, t2, Operand(JS_PROXY_TYPE));
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ Dlsa(at, sp, a0, kPointerSizeLog2);
+    __ sd(a1, MemOperand(at));
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Push(a0);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(a0, a1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : message_id as Smi
+  //  -- ra : return address
+  // -----------------------------------
+  __ Push(a0);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in a0.
+  Label not_smi;
+  __ JumpIfNotSmi(a0, &not_smi);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // State setup as expected by MacroAssembler::InvokePrologue.
+  // ----------- S t a t e -------------
+  //  -- a0: actual arguments count
+  //  -- a1: function (passed through to callee)
+  //  -- a2: expected arguments count
+  //  -- a3: new target (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+
+  Label enough, too_few;
+  __ Branch(&dont_adapt_arguments, eq, a2,
+            Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  // We use Uless as the number of argument should always be greater than 0.
+  __ Branch(&too_few, Uless, a0, Operand(a2));
+
+  {  // Enough parameters: actual >= expected.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into a0 and copy end address into a4.
+    __ SmiScale(a0, a0, kPointerSizeLog2);
+    __ Daddu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Daddu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address.
+    __ dsll(a4, a2, kPointerSizeLog2);
+    __ dsubu(a4, a0, a4);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    // a4: copy end address
+
+    Label copy;
+    __ bind(&copy);
+    __ ld(a5, MemOperand(a0));
+    __ push(a5);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a4));
+    __ daddiu(a0, a0, -kPointerSize);  // In delay slot.
+
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into a0 and copy end address into a7.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    __ SmiScale(a0, a0, kPointerSizeLog2);
+    __ Daddu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Daddu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address. Also adjust for return address.
+    __ Daddu(a7, fp, kPointerSize);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    // a7: copy end address
+    Label copy;
+    __ bind(&copy);
+    __ ld(a4, MemOperand(a0));  // Adjusted above for return addr and receiver.
+    __ Dsubu(sp, sp, kPointerSize);
+    __ Dsubu(a0, a0, kPointerSize);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a7));
+    __ sd(a4, MemOperand(sp));  // In the delay slot.
+
+    // Fill the remaining expected arguments with undefined.
+    // a1: function
+    // a2: expected number of arguments
+    // a3: new target (passed through to callee)
+    __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+    __ dsll(a6, a2, kPointerSizeLog2);
+    __ Dsubu(a4, fp, Operand(a6));
+    // Adjust for frame.
+    __ Dsubu(a4, a4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                             2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ Dsubu(sp, sp, kPointerSize);
+    __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a4));
+    __ sd(a5, MemOperand(sp));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ mov(a0, a2);
+  // a0 : expected number of arguments
+  // a1 : function (passed through to callee)
+  // a3: new target (passed through to callee)
+  __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Call(a4);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+  // -------------------------------------------
+  // Don't adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Jump(a4);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ break_(0xCC);
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/builtins/ppc/OWNERS b/src/builtins/ppc/OWNERS
new file mode 100644
index 0000000..752e8e3
--- /dev/null
+++ b/src/builtins/ppc/OWNERS
@@ -0,0 +1,6 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc
new file mode 100644
index 0000000..dfea83f
--- /dev/null
+++ b/src/builtins/ppc/builtins-ppc.cc
@@ -0,0 +1,3016 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- r3                 : number of arguments excluding receiver
+  //  -- r4                 : target
+  //  -- r6                 : new.target
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+  __ AssertFunction(r4);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects r3 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ addi(r3, r3, Operand(num_extra_args + 1));
+
+  // Insert extra arguments.
+  __ SmiTag(r3);
+  __ Push(r3, r4, r6);
+  __ SmiUntag(r3);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             exit_frame_type == BUILTIN_EXIT);
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the InternalArray function from the current native context.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the Array function from the current native context.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, r4);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r5, r0);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+    __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r4);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r5, r0);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  __ mr(r6, r4);
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- r3                     : number of arguments
+  //  -- r4                     : function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in r8 and the double value in d1.
+  __ LoadRoot(r8, root_index);
+  __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
+
+  // Setup state for loop
+  // r5: address of arg[0] + kPointerSize
+  // r6: number of slots to drop at exit (arguments + receiver)
+  __ addi(r7, r3, Operand(1));
+
+  Label done_loop, loop;
+  __ mr(r7, r3);
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ subi(r7, r7, Operand(1));
+    __ cmpi(r7, Operand::Zero());
+    __ blt(&done_loop);
+
+    // Load the next parameter tagged value into r5.
+    __ ShiftLeftImm(r5, r7, Operand(kPointerSizeLog2));
+    __ LoadPX(r5, MemOperand(sp, r5));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(r5, &convert_smi);
+    __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+    __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(r3);
+      __ SmiTag(r7);
+      __ EnterBuiltinFrame(cp, r4, r3);
+      __ Push(r7, r8);
+      __ mr(r3, r5);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mr(r5, r3);
+      __ Pop(r7, r8);
+      __ LeaveBuiltinFrame(cp, r4, r3);
+      __ SmiUntag(r7);
+      __ SmiUntag(r3);
+      {
+        // Restore the double accumulator value (d1).
+        Label done_restore;
+        __ SmiToDouble(d1, r8);
+        __ JumpIfSmi(r8, &done_restore);
+        __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
+        __ bind(&done_restore);
+      }
+    }
+    __ b(&convert);
+    __ bind(&convert_number);
+    __ lfd(d2, FieldMemOperand(r5, HeapNumber::kValueOffset));
+    __ b(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDouble(d2, r5);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (d1) and the next parameter value on the right hand side (d2).
+    Label compare_nan, compare_swap;
+    __ fcmpu(d1, d2);
+    __ bunordered(&compare_nan);
+    __ b(cond_done, &loop);
+    __ b(CommuteCondition(cond_done), &compare_swap);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ TestDoubleIsMinusZero(reg, r9, r0);
+    __ bne(&loop);
+
+    // Update accumulator. Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ fmr(d1, d2);
+    __ mr(r8, r5);
+    __ b(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    // We still need to visit the rest of the arguments.
+    __ bind(&compare_nan);
+    __ LoadRoot(r8, Heap::kNanValueRootIndex);
+    __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
+    __ b(&loop);
+  }
+
+  __ bind(&done_loop);
+  // Drop all slots, including the receiver.
+  __ addi(r3, r3, Operand(1));
+  __ Drop(r3);
+  __ mr(r3, r8);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3                     : number of arguments
+  //  -- r4                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into r3.
+  Label no_arguments;
+  {
+    __ mr(r5, r3);  // Store argc in r5.
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&no_arguments);
+    __ subi(r3, r3, Operand(1));
+    __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+    __ LoadPX(r3, MemOperand(sp, r3));
+  }
+
+  // 2a. Convert the first argument to a number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(r5);
+    __ EnterBuiltinFrame(cp, r4, r5);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(cp, r4, r5);
+    __ SmiUntag(r5);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ Drop(r5);
+    __ Ret(1);
+  }
+
+  // 2b. No arguments, return +0.
+  __ bind(&no_arguments);
+  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+  __ Ret(1);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3                     : number of arguments
+  //  -- r4                     : constructor function
+  //  -- r6                     : new target
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r5.
+  {
+    Label no_arguments, done;
+    __ mr(r9, r3);  // Store argc in r9.
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&no_arguments);
+    __ subi(r3, r3, Operand(1));
+    __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+    __ LoadPX(r5, MemOperand(sp, r5));
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ LoadSmiLiteral(r5, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure r5 is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(r5, &done_convert);
+    __ CompareObjectType(r5, r7, r7, HEAP_NUMBER_TYPE);
+    __ beq(&done_convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(r9);
+      __ EnterBuiltinFrame(cp, r4, r9);
+      __ Push(r6);
+      __ mr(r3, r5);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mr(r5, r3);
+      __ Pop(r6);
+      __ LeaveBuiltinFrame(cp, r4, r9);
+      __ SmiUntag(r9);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ cmp(r4, r6);
+  __ bne(&new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+  __ b(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(r9);
+    __ EnterBuiltinFrame(cp, r4, r9);
+    __ Push(r5);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(r5);
+    __ LeaveBuiltinFrame(cp, r4, r9);
+    __ SmiUntag(r9);
+  }
+  __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r9);
+    __ Ret(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3                     : number of arguments
+  //  -- r4                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into r3.
+  Label no_arguments;
+  {
+    __ mr(r5, r3);  // Store argc in r5.
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&no_arguments);
+    __ subi(r3, r3, Operand(1));
+    __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+    __ LoadPX(r3, MemOperand(sp, r3));
+  }
+
+  // 2a. At least one argument, return r3 if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(r3, &to_string);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CompareObjectType(r3, r6, r6, FIRST_NONSTRING_TYPE);
+    __ bgt(&to_string);
+    __ beq(&symbol_descriptive_string);
+    __ b(&drop_frame_and_ret);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+    __ Ret(1);
+  }
+
+  // 3a. Convert r3 to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(r5);
+    __ EnterBuiltinFrame(cp, r4, r5);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(cp, r4, r5);
+    __ SmiUntag(r5);
+  }
+  __ b(&drop_frame_and_ret);
+
+  // 3b. Convert symbol in r3 to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ Drop(r5);
+    __ Drop(1);
+    __ Push(r3);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r5);
+    __ Ret(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3                     : number of arguments
+  //  -- r4                     : constructor function
+  //  -- r6                     : new target
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r5.
+  {
+    Label no_arguments, done;
+    __ mr(r9, r3);  // Store argc in r9.
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&no_arguments);
+    __ subi(r3, r3, Operand(1));
+    __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+    __ LoadPX(r5, MemOperand(sp, r5));
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ LoadRoot(r5, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure r5 is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(r5, &convert);
+    __ CompareObjectType(r5, r7, r7, FIRST_NONSTRING_TYPE);
+    __ blt(&done_convert);
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(r9);
+      __ EnterBuiltinFrame(cp, r4, r9);
+      __ Push(r6);
+      __ mr(r3, r5);
+      __ CallStub(&stub);
+      __ mr(r5, r3);
+      __ Pop(r6);
+      __ LeaveBuiltinFrame(cp, r4, r9);
+      __ SmiUntag(r9);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ cmp(r4, r6);
+  __ bne(&new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+  __ b(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(r9);
+    __ EnterBuiltinFrame(cp, r4, r9);
+    __ Push(r5);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(r5);
+    __ LeaveBuiltinFrame(cp, r4, r9);
+    __ SmiUntag(r9);
+  }
+  __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r9);
+    __ Ret(1);
+  }
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- r3 : argument count (preserved for callee)
+  //  -- r4 : target function (preserved for callee)
+  //  -- r6 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(r3);
+    __ Push(r3, r4, r6, r4);
+
+    __ CallRuntime(function_id, 1);
+    __ mr(r5, r3);
+
+    // Restore target function and new target.
+    __ Pop(r3, r4, r6);
+    __ SmiUntag(r3);
+  }
+  __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmpl(sp, ip);
+  __ bge(&ok);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments
+  //  -- r4     : constructor function
+  //  -- r5     : allocation site or undefined
+  //  -- r6     : new target
+  //  -- cp     : context
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(r5, r7);
+
+    if (!create_implicit_receiver) {
+      __ SmiTag(r7, r3, SetRC);
+      __ Push(cp, r5, r7);
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    } else {
+      __ SmiTag(r3);
+      __ Push(cp, r5, r3);
+
+      // Allocate the new receiver object.
+      __ Push(r4, r6);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mr(r7, r3);
+      __ Pop(r4, r6);
+
+      // ----------- S t a t e -------------
+      //  -- r4: constructor function
+      //  -- r6: new target
+      //  -- r7: newly allocated object
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ LoadP(r3, MemOperand(sp));
+      __ SmiUntag(r3, SetRC);
+
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ Push(r7, r7);
+    }
+
+    // Set up pointer to last argument.
+    __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    // r3: number of arguments
+    // r4: constructor function
+    // r5: address of last argument (caller sp)
+    // r6: new target
+    // cr0: condition indicating whether r3 is zero
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: number of arguments (smi-tagged)
+    Label loop, no_args;
+    __ beq(&no_args, cr0);
+    __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+    __ sub(sp, sp, ip);
+    __ mtctr(r3);
+    __ bind(&loop);
+    __ subi(ip, ip, Operand(kPointerSize));
+    __ LoadPX(r0, MemOperand(r5, ip));
+    __ StorePX(r0, MemOperand(sp, ip));
+    __ bdnz(&loop);
+    __ bind(&no_args);
+
+    // Call the function.
+    // r3: number of arguments
+    // r4: constructor function
+    // r6: new target
+
+    ParameterCount actual(r3);
+    __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    // r3: result
+    // sp[0]: receiver
+    // sp[1]: number of arguments (smi-tagged)
+    __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      // r3: result
+      // sp[0]: receiver
+      // sp[1]: number of arguments (smi-tagged)
+      __ JumpIfSmi(r3, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ CompareObjectType(r3, r4, r6, FIRST_JS_RECEIVER_TYPE);
+      __ bge(&exit);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ LoadP(r3, MemOperand(sp));
+
+      // Remove receiver from the stack, remove caller arguments, and
+      // return.
+      __ bind(&exit);
+      // r3: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+    } else {
+      __ LoadP(r4, MemOperand(sp));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(r3, &dont_throw);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  __ SmiToPtrArrayOffset(r4, r4);
+  __ add(sp, sp, r4);
+  __ addi(sp, sp, Operand(kPointerSize));
+  if (create_implicit_receiver) {
+    __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+  }
+  __ blr();
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the value to pass to the generator
+  //  -- r4 : the JSGeneratorObject to resume
+  //  -- r5 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(r4);
+
+  // Store input value into generator object.
+  __ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset),
+            r0);
+  __ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
+
+  // Load suspended function and context.
+  __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
+  __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ mov(ip, Operand(last_step_action));
+  __ LoadByte(ip, MemOperand(ip), r0);
+  __ extsb(ip, ip);
+  __ cmpi(ip, Operand(StepIn));
+  __ bge(&prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+
+  __ mov(ip, Operand(debug_suspended_generator));
+  __ LoadP(ip, MemOperand(ip));
+  __ cmp(ip, r4);
+  __ beq(&prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
+
+  // Push receiver.
+  __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+  __ Push(ip);
+
+  // ----------- S t a t e -------------
+  //  -- r4    : the JSGeneratorObject to resume
+  //  -- r5    : the resume mode (tagged)
+  //  -- r7    : generator function
+  //  -- cp    : generator context
+  //  -- lr    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      r3, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label loop, done_loop;
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+#if V8_TARGET_ARCH_PPC64
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&done_loop);
+#else
+    __ SmiUntag(r3, SetRC);
+    __ beq(&done_loop, cr0);
+#endif
+    __ mtctr(r3);
+    __ bind(&loop);
+    __ push(ip);
+    __ bdnz(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
+  __ bne(&old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ mr(r6, r4);
+    __ mr(r4, r7);
+    __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+    __ JumpToJSEntry(ip);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushStandardFrame(r7);
+
+    // Restore the operand stack.
+    __ LoadP(r3, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
+    __ LoadP(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
+    __ addi(r3, r3,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    {
+      Label loop, done_loop;
+      __ SmiUntag(r6, SetRC);
+      __ beq(&done_loop, cr0);
+      __ mtctr(r6);
+      __ bind(&loop);
+      __ LoadPU(ip, MemOperand(r3, kPointerSize));
+      __ Push(ip);
+      __ bdnz(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(ip, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset),
+              r0);
+
+    // Resume the generator function at the continuation.
+    __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
+    __ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+    {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+      if (FLAG_enable_embedded_constant_pool) {
+        __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r6);
+      }
+      __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+      __ SmiUntag(r5);
+      __ add(r6, r6, r5);
+      __ LoadSmiLiteral(r5,
+                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+      __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
+                r0);
+      __ mr(r3, r4);  // Continuation expects generator object in r3.
+      __ Jump(r6);
+    }
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4, r5, r7);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r4, r5);
+    __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4, r5);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(r4, r5);
+    __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+  __ push(r4);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers r5; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+                                        IsTagged argc_is_tagged) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+  // Make r5 the space we have left. The stack might already be overflowed
+  // here which will cause r5 to become negative.
+  __ sub(r5, sp, r5);
+  // Check if the arguments will overflow the stack.
+  if (argc_is_tagged == kArgcIsSmiTagged) {
+    __ SmiToPtrArrayOffset(r0, argc);
+  } else {
+    DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+    __ ShiftLeftImm(r0, argc, Operand(kPointerSizeLog2));
+  }
+  __ cmp(r5, r0);
+  __ bgt(&okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from Generate_JS_Entry
+  // r3: new.target
+  // r4: function
+  // r5: receiver
+  // r6: argc
+  // r7: argv
+  // r0,r8-r9, cp may be clobbered
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ mov(cp, Operand(context_address));
+    __ LoadP(cp, MemOperand(cp));
+
+    __ InitializeRootRegister();
+
+    // Push the function and the receiver onto the stack.
+    __ Push(r4, r5);
+
+    // Check if we have enough stack space to push all arguments.
+    // Clobbers r5.
+    Generate_CheckStackOverflow(masm, r6, kArgcIsUntaggedInt);
+
+    // Copy arguments to the stack in a loop.
+    // r4: function
+    // r6: argc
+    // r7: argv, i.e. points to first arg
+    Label loop, entry;
+    __ ShiftLeftImm(r0, r6, Operand(kPointerSizeLog2));
+    __ add(r5, r7, r0);
+    // r5 points past last arg.
+    __ b(&entry);
+    __ bind(&loop);
+    __ LoadP(r8, MemOperand(r7));  // read next parameter
+    __ addi(r7, r7, Operand(kPointerSize));
+    __ LoadP(r0, MemOperand(r8));  // dereference handle
+    __ push(r0);                   // push parameter
+    __ bind(&entry);
+    __ cmp(r7, r5);
+    __ bne(&loop);
+
+    // Setup new.target and argc.
+    __ mr(r7, r3);
+    __ mr(r3, r6);
+    __ mr(r6, r7);
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+    __ mr(r14, r7);
+    __ mr(r15, r7);
+    __ mr(r16, r7);
+    __ mr(r17, r7);
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+  }
+  __ blr();
+
+  // r3: result
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ LoadP(args_count,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lwz(args_count,
+         FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  __ add(sp, sp, args_count);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o r4: the JS function object being called.
+//   o r6: the new target
+//   o cp: our context
+//   o pp: the caller's constant pool pointer (if enabled)
+//   o fp: the caller's frame pointer
+//   o sp: stack pointer
+//   o lr: return address
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ PushStandardFrame(r4);
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  Label array_done;
+  Register debug_info = r5;
+  DCHECK(!debug_info.is(r3));
+  __ LoadP(debug_info,
+           FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+  // Load original bytecode array or the debug copy.
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+  __ beq(&array_done);
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ bind(&array_done);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ LoadP(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+  __ mov(ip, Operand(masm->CodeObject()));  // Self-reference to this code.
+  __ cmp(r3, ip);
+  __ bne(&switch_to_different_code_kind);
+
+  // Check function data field is actually a BytecodeArray object.
+
+  if (FLAG_debug_code) {
+    __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Load initial bytecode offset.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(r3, kInterpreterBytecodeOffsetRegister);
+  __ Push(r6, kInterpreterBytecodeArrayRegister, r3);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size (word) from the BytecodeArray object.
+    __ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                               BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ sub(r6, sp, r5);
+    __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
+    __ cmpl(r6, r0);
+    __ bge(&ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    Label loop, no_args;
+    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+    __ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC);
+    __ beq(&no_args, cr0);
+    __ mtctr(r5);
+    __ bind(&loop);
+    __ push(r6);
+    __ bdnz(&loop);
+    __ bind(&no_args);
+  }
+
+  // Load accumulator and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+                         kInterpreterBytecodeOffsetRegister));
+  __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
+  __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+  __ Call(ip);
+
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in r3.
+  LeaveInterpreterFrame(masm, r5);
+  __ blr();
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
+  __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(r4, r7, r8);
+  __ JumpToJSEntry(r7);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ LoadP(kContextRegister,
+           MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, r5);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(r3);
+
+    // Push function as argument and compile for baseline.
+    __ push(r4);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(r3);
+  }
+  __ blr();
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+                                         Register count, Register scratch) {
+  Label loop;
+  __ addi(index, index, Operand(kPointerSize));  // Bias up for LoadPU
+  __ mtctr(count);
+  __ bind(&loop);
+  __ LoadPU(scratch, MemOperand(index, -kPointerSize));
+  __ push(scratch);
+  __ bdnz(&loop);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r5 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- r4 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Calculate number of arguments (add one for receiver).
+  __ addi(r6, r3, Operand(1));
+
+  // Push the arguments.
+  Generate_InterpreterPushArgs(masm, r5, r6, r7);
+
+  // Call the target.
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- r3 : argument count (not including receiver)
+  // -- r6 : new target
+  // -- r4 : constructor to call
+  // -- r5 : address of the first argument
+  // -----------------------------------
+
+  // Push a slot for the receiver to be constructed.
+  __ li(r0, Operand::Zero());
+  __ push(r0);
+
+  // Push the arguments (skip if none).
+  Label skip;
+  __ cmpi(r3, Operand::Zero());
+  __ beq(&skip);
+  Generate_InterpreterPushArgs(masm, r5, r3, r7);
+  __ bind(&skip);
+
+  // Call the constructor with r3, r4, and r6 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(r5, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
+                          Code::kHeaderSize - kHeapObjectTag));
+  __ mtlr(r0);
+
+  // Initialize the dispatch table register.
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ LoadP(kInterpreterBytecodeOffsetRegister,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+                         kInterpreterBytecodeOffsetRegister));
+  __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
+  __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+  __ Jump(ip);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : argument count (preserved for callee)
+  //  -- r6 : new target (preserved for callee)
+  //  -- r4 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = r4;
+  Register map = r9;
+  Register index = r5;
+  __ LoadP(map,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(map,
+           FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
+  __ blt(&gotta_call_runtime);
+
+  // Find literals.
+  // r10 : native context
+  // r5  : length / index
+  // r9  : optimized code map
+  // r6  : new target
+  // r4  : closure
+  Register native_context = r10;
+  __ LoadP(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = r11;
+  Register array_pointer = r8;
+
+  // Does the native context match?
+  __ SmiToPtrArrayOffset(array_pointer, index);
+  __ add(array_pointer, map, array_pointer);
+  __ LoadP(temp, FieldMemOperand(array_pointer,
+                                 SharedFunctionInfo::kOffsetToPreviousContext));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ bne(&loop_bottom);
+  // OSR id set to none?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
+  __ bne(&loop_bottom);
+  // Literals available?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r7,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = r7;
+  __ LoadP(entry,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  // Store code entry in the closure.
+  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r8);
+
+  // Link the closure into the optimized function list.
+  // r7 : code entry
+  // r10: native context
+  // r4 : closure
+  __ LoadP(
+      r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
+            r0);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, temp,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ StoreP(
+      closure,
+      ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+  // Save closure before the write barrier.
+  __ mr(r8, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, r8, temp,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&loop_bottom);
+  __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
+                   r0);
+  __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
+  __ bgt(&loop_top);
+
+  // We found neither literals nor code.
+  __ b(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ LoadP(entry,
+           FieldMemOperand(map, FixedArray::kHeaderSize +
+                                    SharedFunctionInfo::kSharedCodeIndex));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ b(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  // Is the full code valid?
+  __ LoadP(entry,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ DecodeField<Code::KindField>(r8);
+  __ cmpi(r8, Operand(Code::BUILTIN));
+  __ beq(&gotta_call_runtime);
+  // Yes, install the full code.
+  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r8);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&gotta_call_runtime);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : argument count (preserved for callee)
+  //  -- r4 : new target (preserved for callee)
+  //  -- r6 : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ Move(r7, r3);
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(r3);
+    __ Push(r3, r4, r6, r4);
+
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ cmpi(r7, Operand(j));
+        __ bne(&over);
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+                                        i * kPointerSize));
+        __ push(r7);
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(r3, &failed);
+
+    __ Drop(2);
+    __ pop(r7);
+    __ SmiUntag(r7);
+    scope.GenerateLeaveFrame();
+
+    __ addi(r7, r7, Operand(1));
+    __ Drop(r7);
+    __ Ret();
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ Pop(r3, r4, r6);
+    __ SmiUntag(r3);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r3 at the start of the PlatformCodeAge sequence.
+  __ mr(r3, ip);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r3 - contains return address (beginning of patch sequence)
+  //   r4 - isolate
+  //   r6 - new target
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ mflr(r0);
+  __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r5);
+  __ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
+  __ mtlr(r0);
+  __ mr(ip, r3);
+  __ Jump(ip);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r3 at the start of the PlatformCodeAge sequence.
+  __ mr(r3, ip);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r3 - contains return address (beginning of patch sequence)
+  //   r4 - isolate
+  //   r6 - new target
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ mflr(r0);
+  __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r5);
+  __ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
+  __ mtlr(r0);
+  __ mr(ip, r3);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PushStandardFrame(r4);
+
+  // Jump to point after the code-age stub.
+  __ addi(r3, ip, Operand(kNoCodeAgeSequenceLength));
+  __ Jump(r3);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ MultiPush(kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ MultiPop(kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ addi(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ blr();                                // Jump to miss handler
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
+    __ push(r3);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+  }
+
+  // Get the full codegen state from the stack and untag it -> r9.
+  __ LoadP(r9, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(r9);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ cmpi(
+      r9,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
+  __ bne(&with_tos_register);
+  __ addi(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
+  __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+  __ cmpi(
+      r9,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
+  __ bne(&unknown_state);
+  __ addi(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// Clobbers registers {r7, r8, r9, r10}.
+void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                             Register function_template_info,
+                             Label* receiver_check_failed) {
+  Register signature = r7;
+  Register map = r8;
+  Register constructor = r9;
+  Register scratch = r10;
+
+  // If there is no signature, return the holder.
+  __ LoadP(signature, FieldMemOperand(function_template_info,
+                                      FunctionTemplateInfo::kSignatureOffset));
+  Label receiver_check_passed;
+  __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+                &receiver_check_passed);
+
+  // Walk the prototype chain.
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(constructor, map, scratch, scratch);
+  __ cmpi(scratch, Operand(JS_FUNCTION_TYPE));
+  Label next_prototype;
+  __ bne(&next_prototype);
+  Register type = constructor;
+  __ LoadP(type,
+           FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(type,
+           FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ cmp(signature, type);
+  __ beq(&receiver_check_passed);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype);
+  __ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
+  __ bne(&next_prototype);
+
+  // Otherwise load the parent function template and iterate.
+  __ LoadP(type,
+           FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+  __ b(&function_template_loop);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ lwz(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+  __ DecodeField<Map::HasHiddenPrototype>(scratch, SetRC);
+  __ beq(receiver_check_failed, cr0);
+
+  __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ b(&prototype_loop_start);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3                 : number of arguments excluding receiver
+  //  -- r4                 : callee
+  //  -- lr                 : return address
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ ShiftLeftImm(r11, r3, Operand(kPointerSizeLog2));
+  __ LoadPX(r5, MemOperand(sp, r11));
+  CompatibleReceiverCheck(masm, r5, r6, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ LoadP(r7, FieldMemOperand(r6, FunctionTemplateInfo::kCallCodeOffset));
+  __ LoadP(r7, FieldMemOperand(r7, CallHandlerInfo::kFastHandlerOffset));
+  __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+
+  // Compatible receiver check failed: throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  // Drop the arguments (including the receiver);
+  __ addi(r11, r11, Operand(kPointerSize));
+  __ add(sp, sp, r11);
+  __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(r3);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  // If the code object is null, just return to the caller.
+  Label skip;
+  __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+  __ bne(&skip);
+  __ Ret();
+
+  __ bind(&skip);
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ LeaveFrame(StackFrame::STUB);
+  }
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
+
+  {
+    ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+    __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
+
+    if (FLAG_enable_embedded_constant_pool) {
+      __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
+    }
+
+    // Load the OSR entrypoint offset from the deoptimization data.
+    // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+    __ LoadP(r4, FieldMemOperand(
+                     r4, FixedArray::OffsetOfElementAt(
+                             DeoptimizationInputData::kOsrPcOffsetIndex)));
+    __ SmiUntag(r4);
+
+    // Compute the target address = code start + osr_offset
+    __ add(r0, r3, r4);
+
+    // And "return" to the OSR entry point of the function.
+    __ mtlr(r0);
+    __ blr();
+  }
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- r3    : number of arguments
+  //  -- r4    : function
+  //  -- cp    : context
+  //  -- lr    : return address
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // 1. Pop receiver into r3 and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ Pop(r3);
+    __ JumpIfSmi(r3, &receiver_not_date);
+    __ CompareObjectType(r3, r5, r6, JS_DATE_TYPE);
+    __ bne(&receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ LoadP(r3, FieldMemOperand(r3, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ mov(r4, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+      __ LoadP(r4, MemOperand(r4));
+      __ LoadP(ip, FieldMemOperand(r3, JSDate::kCacheStampOffset));
+      __ cmp(r4, ip);
+      __ bne(&stamp_mismatch);
+      __ LoadP(r3, FieldMemOperand(
+                       r3, JSDate::kValueOffset + field_index * kPointerSize));
+      __ Ret();
+      __ bind(&stamp_mismatch);
+    }
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, r4);
+    __ LoadSmiLiteral(r4, Smi::FromInt(field_index));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ Ret();
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ push(r3);
+    __ LoadSmiLiteral(r3, Smi::FromInt(0));
+    __ EnterBuiltinFrame(cp, r4, r3);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3    : argc
+  //  -- sp[0] : argArray
+  //  -- sp[4] : thisArg
+  //  -- sp[8] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into r4, argArray into r3 (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label skip;
+    Register arg_size = r5;
+    Register new_sp = r6;
+    Register scratch = r7;
+    __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+    __ add(new_sp, sp, arg_size);
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ mr(scratch, r3);
+    __ LoadP(r4, MemOperand(new_sp, 0));  // receiver
+    __ cmpi(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize));  // thisArg
+    __ beq(&skip);
+    __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize));  // argArray
+    __ bind(&skip);
+    __ mr(sp, new_sp);
+    __ StoreP(scratch, MemOperand(sp, 0));
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r3    : argArray
+  //  -- r4    : receiver
+  //  -- sp[0] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(r4, &receiver_not_callable);
+  __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+  __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+  __ TestBit(r7, Map::kIsCallable, r0);
+  __ beq(&receiver_not_callable, cr0);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(r3, Heap::kNullValueRootIndex, &no_arguments);
+  __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ li(r3, Operand::Zero());
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ StoreP(r4, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // r3: actual number of arguments
+  {
+    Label done;
+    __ cmpi(r3, Operand::Zero());
+    __ bne(&done);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ addi(r3, r3, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  // r3: actual number of arguments
+  __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+  __ LoadPX(r4, MemOperand(sp, r5));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // r3: actual number of arguments
+  // r4: callable
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ add(r5, sp, r5);
+
+    __ mtctr(r3);
+    __ bind(&loop);
+    __ LoadP(ip, MemOperand(r5, -kPointerSize));
+    __ StoreP(ip, MemOperand(r5));
+    __ subi(r5, r5, Operand(kPointerSize));
+    __ bdnz(&loop);
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ subi(r3, r3, Operand(1));
+    __ pop();
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3     : argc
+  //  -- sp[0]  : argumentsList
+  //  -- sp[4]  : thisArgument
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label skip;
+    Register arg_size = r5;
+    Register new_sp = r6;
+    Register scratch = r7;
+    __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+    __ add(new_sp, sp, arg_size);
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ mr(scratch, r4);
+    __ mr(r3, r4);
+    __ cmpi(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize));  // target
+    __ beq(&skip);
+    __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize));  // thisArgument
+    __ cmpi(arg_size, Operand(2 * kPointerSize));
+    __ beq(&skip);
+    __ LoadP(r3, MemOperand(new_sp, 3 * -kPointerSize));  // argumentsList
+    __ bind(&skip);
+    __ mr(sp, new_sp);
+    __ StoreP(scratch, MemOperand(sp, 0));
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r3    : argumentsList
+  //  -- r4    : target
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(r4, &target_not_callable);
+  __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+  __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+  __ TestBit(r7, Map::kIsCallable, r0);
+  __ beq(&target_not_callable, cr0);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ StoreP(r4, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3     : argc
+  //  -- sp[0]  : new.target (optional)
+  //  -- sp[4]  : argumentsList
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+  // new.target into r6 (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label skip;
+    Register arg_size = r5;
+    Register new_sp = r7;
+    __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+    __ add(new_sp, sp, arg_size);
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ mr(r3, r4);
+    __ mr(r6, r4);
+    __ StoreP(r4, MemOperand(new_sp, 0));  // receiver (undefined)
+    __ cmpi(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize));  // target
+    __ mr(r6, r4);  // new.target defaults to target
+    __ beq(&skip);
+    __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize));  // argumentsList
+    __ cmpi(arg_size, Operand(2 * kPointerSize));
+    __ beq(&skip);
+    __ LoadP(r6, MemOperand(new_sp, 3 * -kPointerSize));  // new.target
+    __ bind(&skip);
+    __ mr(sp, new_sp);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r3    : argumentsList
+  //  -- r6    : new.target
+  //  -- r4    : target
+  //  -- sp[0] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(r4, &target_not_constructor);
+  __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+  __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+  __ TestBit(r7, Map::kIsConstructor, r0);
+  __ beq(&target_not_constructor, cr0);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(r6, &new_target_not_constructor);
+  __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+  __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+  __ TestBit(r7, Map::kIsConstructor, r0);
+  __ beq(&new_target_not_constructor, cr0);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ StoreP(r4, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ StoreP(r6, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- r3 : actual number of arguments
+  //  -- r4 : function (passed through to callee)
+  //  -- r5 : expected number of arguments
+  //  -- r6 : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
+  // Make r8 the space we have left. The stack might already be overflowed
+  // here which will cause r8 to become negative.
+  __ sub(r8, sp, r8);
+  // Check if the arguments will overflow the stack.
+  __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+  __ cmp(r8, r0);
+  __ ble(stack_overflow);  // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ SmiTag(r3);
+  __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ mflr(r0);
+  __ push(r0);
+  if (FLAG_enable_embedded_constant_pool) {
+    __ Push(fp, kConstantPoolRegister, r7, r4, r3);
+  } else {
+    __ Push(fp, r7, r4, r3);
+  }
+  __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                          kPointerSize));
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ LoadP(r4, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                                kPointerSize)));
+  int stack_adjustment = kPointerSize;  // adjust for receiver
+  __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
+  __ SmiToPtrArrayOffset(r0, r4);
+  __ add(sp, sp, r0);
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3    : argumentsList
+  //  -- r4    : target
+  //  -- r6    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(r3, &create_runtime);
+
+    // Load the map of argumentsList into r5.
+    __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+
+    // Load native context into r7.
+    __ LoadP(r7, NativeContextMemOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ LoadP(ip, ContextMemOperand(r7, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ cmp(ip, r5);
+    __ beq(&create_arguments);
+    __ LoadP(ip, ContextMemOperand(r7, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ cmp(ip, r5);
+    __ beq(&create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CompareInstanceType(r5, ip, JS_ARRAY_TYPE);
+    __ beq(&create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r4, r6, r3);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(r4, r6);
+      __ LoadP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
+      __ SmiUntag(r5);
+    }
+    __ b(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ LoadP(r5, FieldMemOperand(r3, JSArgumentsObject::kLengthOffset));
+    __ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
+    __ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
+    __ cmp(r5, ip);
+    __ bne(&create_runtime);
+    __ SmiUntag(r5);
+    __ mr(r3, r7);
+    __ b(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ lbz(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(r5);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ cmpi(r5, Operand(FAST_ELEMENTS));
+    __ bgt(&create_runtime);
+    __ cmpi(r5, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ beq(&create_runtime);
+    __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
+    __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ SmiUntag(r5);
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+    // Make ip the space we have left. The stack might already be overflowed
+    // here which will cause ip to become negative.
+    __ sub(ip, sp, ip);
+    // Check if the arguments will overflow the stack.
+    __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+    __ cmp(ip, r0);  // Signed comparison.
+    __ bgt(&done);
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r4    : target
+  //  -- r3    : args (a FixedArray built from argumentsList)
+  //  -- r5    : len (number of elements to push from args)
+  //  -- r6    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    Label loop, no_args;
+    __ cmpi(r5, Operand::Zero());
+    __ beq(&no_args);
+    __ addi(r3, r3,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    __ mtctr(r5);
+    __ bind(&loop);
+    __ LoadPU(r0, MemOperand(r3, kPointerSize));
+    __ push(r0);
+    __ bdnz(&loop);
+    __ bind(&no_args);
+    __ mr(r3, r5);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(r6, Heap::kUndefinedValueRootIndex);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
+  __ lbz(scratch1, MemOperand(scratch1));
+  __ cmpi(scratch1, Operand::Zero());
+  __ beq(&done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ LoadP(scratch3,
+             MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+    __ bne(&no_interpreter_frame);
+    __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(
+      scratch3,
+      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mr(fp, scratch2);
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ LoadP(scratch1,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  __ LoadP(scratch1,
+           FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      caller_args_count_reg,
+      FieldMemOperand(scratch1,
+                      SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+  __ SmiUntag(caller_args_count_reg);
+#endif
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(r4);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+  __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorBits, r0);
+  __ bne(&class_constructor, cr0);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+                          (1 << SharedFunctionInfo::kNativeBit)));
+  __ bne(&done_convert, cr0);
+  {
+    // ----------- S t a t e -------------
+    //  -- r3 : the number of arguments (not including the receiver)
+    //  -- r4 : the function to call (checked to be a JSFunction)
+    //  -- r5 : the shared function info.
+    //  -- cp : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(r6);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
+      __ LoadPX(r6, MemOperand(sp, r6));
+      __ JumpIfSmi(r6, &convert_to_object);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
+      __ bge(&done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy);
+        __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(r6);
+        }
+        __ b(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(r3);
+        __ Push(r3, r4);
+        __ mr(r3, r6);
+        __ Push(cp);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(cp);
+        __ mr(r6, r3);
+        __ Pop(r3, r4);
+        __ SmiUntag(r3);
+      }
+      __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+    __ StorePX(r6, MemOperand(sp, r7));
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the function to call (checked to be a JSFunction)
+  //  -- r5 : the shared function info.
+  //  -- cp : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r3, r6, r7, r8);
+  }
+
+  __ LoadWordArith(
+      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+  __ SmiUntag(r5);
+#endif
+  ParameterCount actual(r3);
+  ParameterCount expected(r5);
+  __ InvokeFunctionCode(r4, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
+    __ push(r4);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : target (checked to be a JSBoundFunction)
+  //  -- r6 : new.target (only in case of [[Construct]])
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into r5 and length of that into r7.
+  Label no_bound_arguments;
+  __ LoadP(r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
+  __ LoadP(r7, FieldMemOperand(r5, FixedArray::kLengthOffset));
+  __ SmiUntag(r7, SetRC);
+  __ beq(&no_bound_arguments, cr0);
+  {
+    // ----------- S t a t e -------------
+    //  -- r3 : the number of arguments (not including the receiver)
+    //  -- r4 : target (checked to be a JSBoundFunction)
+    //  -- r5 : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- r6 : new.target (only in case of [[Construct]])
+    //  -- r7 : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ mr(r9, sp);  // preserve previous stack pointer
+      __ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2));
+      __ sub(sp, sp, r10);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+      __ bgt(&done);  // Signed comparison.
+      // Restore the stack pointer.
+      __ mr(sp, r9);
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Relocate arguments down the stack.
+    //  -- r3 : the number of arguments (not including the receiver)
+    //  -- r9 : the previous stack pointer
+    //  -- r10: the size of the [[BoundArguments]]
+    {
+      Label skip, loop;
+      __ li(r8, Operand::Zero());
+      __ cmpi(r3, Operand::Zero());
+      __ beq(&skip);
+      __ mtctr(r3);
+      __ bind(&loop);
+      __ LoadPX(r0, MemOperand(r9, r8));
+      __ StorePX(r0, MemOperand(sp, r8));
+      __ addi(r8, r8, Operand(kPointerSize));
+      __ bdnz(&loop);
+      __ bind(&skip);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ add(r5, r5, r10);
+      __ mtctr(r7);
+      __ bind(&loop);
+      __ LoadPU(r0, MemOperand(r5, -kPointerSize));
+      __ StorePX(r0, MemOperand(sp, r8));
+      __ addi(r8, r8, Operand(kPointerSize));
+      __ bdnz(&loop);
+      __ add(r3, r3, r7);
+    }
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(r4);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r3, r6, r7, r8);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
+  __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+  __ StorePX(ip, MemOperand(sp, r0));
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ LoadP(r4,
+           FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+                                       masm->isolate())));
+  __ LoadP(ip, MemOperand(ip));
+  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(r4, &non_callable);
+  __ bind(&non_smi);
+  __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+  __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+  __ TestBit(r7, Map::kIsCallable, r0);
+  __ beq(&non_callable, cr0);
+
+  __ cmpi(r8, Operand(JS_PROXY_TYPE));
+  __ bne(&non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r3, r6, r7, r8);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ Push(r4);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ addi(r3, r3, Operand(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver the (original) target.
+  __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+  __ StorePX(r4, MemOperand(sp, r8));
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the constructor to call (checked to be a JSFunction)
+  //  -- r6 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertFunction(r4);
+
+  // Calling convention for function specific ConstructStubs require
+  // r5 to contain either an AllocationSite or undefined.
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
+  __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the function to call (checked to be a JSBoundFunction)
+  //  -- r6 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertBoundFunction(r4);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  Label skip;
+  __ cmp(r4, r6);
+  __ bne(&skip);
+  __ LoadP(r6,
+           FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ bind(&skip);
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ LoadP(r4,
+           FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ LoadP(ip, MemOperand(ip));
+  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the constructor to call (checked to be a JSProxy)
+  //  -- r6 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ Push(r4, r6);
+  // Include the pushed new_target, constructor and the receiver.
+  __ addi(r3, r3, Operand(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the constructor to call (can be any Object)
+  //  -- r6 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(r4, &non_constructor);
+
+  // Dispatch based on instance type.
+  __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Construct]] internal method.
+  __ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
+  __ TestBit(r5, Map::kIsConstructor, r0);
+  __ beq(&non_constructor, cr0);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ cmpi(r8, Operand(JS_PROXY_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+          eq);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+    __ StorePX(r4, MemOperand(sp, r8));
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r4);
+  __ Push(r4);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r4);
+  __ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(r4, r5);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : message_id as Smi
+  //  -- lr : return address
+  // -----------------------------------
+  __ push(r4);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r3.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ TestIfSmi(r3, r0);
+  __ Ret(eq, cr0);
+
+  __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
+  // r3: receiver
+  // r4: receiver instance type
+  __ Ret(eq);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : actual number of arguments
+  //  -- r4 : function (passed through to callee)
+  //  -- r5 : expected number of arguments
+  //  -- r6 : new target (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+
+  Label enough, too_few;
+  __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+  __ cmp(r3, r5);
+  __ blt(&too_few);
+  __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ beq(&dont_adapt_arguments);
+
+  {  // Enough parameters: actual >= expected
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r3 and copy end address into r7.
+    // r3: actual number of arguments as a smi
+    // r4: function
+    // r5: expected number of arguments
+    // r6: new target (passed through to callee)
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r3, r3);
+    __ add(r3, r3, fp);
+    // adjust for return address and receiver
+    __ addi(r3, r3, Operand(2 * kPointerSize));
+    __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+    __ sub(r7, r3, r7);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r3: copy start address
+    // r4: function
+    // r5: expected number of arguments
+    // r6: new target (passed through to callee)
+    // r7: copy end address
+    // ip: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ LoadP(r0, MemOperand(r3, 0));
+    __ push(r0);
+    __ cmp(r3, r7);  // Compare before moving to next argument.
+    __ subi(r3, r3, Operand(kPointerSize));
+    __ bne(&copy);
+
+    __ b(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected
+    __ bind(&too_few);
+
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r0 and copy end address is fp.
+    // r3: actual number of arguments as a smi
+    // r4: function
+    // r5: expected number of arguments
+    // r6: new target (passed through to callee)
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r3, r3);
+    __ add(r3, r3, fp);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r3: copy start address
+    // r4: function
+    // r5: expected number of arguments
+    // r6: new target (passed through to callee)
+    // ip: code entry to call
+    Label copy;
+    __ bind(&copy);
+    // Adjust load for return address and receiver.
+    __ LoadP(r0, MemOperand(r3, 2 * kPointerSize));
+    __ push(r0);
+    __ cmp(r3, fp);  // Compare before moving to next argument.
+    __ subi(r3, r3, Operand(kPointerSize));
+    __ bne(&copy);
+
+    // Fill the remaining expected arguments with undefined.
+    // r4: function
+    // r5: expected number of arguments
+    // r6: new target (passed through to callee)
+    // ip: code entry to call
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+    __ sub(r7, fp, r7);
+    // Adjust for frame.
+    __ subi(r7, r7, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                            2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ push(r0);
+    __ cmp(sp, r7);
+    __ bne(&fill);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ mr(r3, r5);
+  // r3 : expected number of arguments
+  // r4 : function (passed through to callee)
+  // r6 : new target (passed through to callee)
+  __ CallJSEntry(ip);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ blr();
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ JumpToJSEntry(ip);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bkpt(0);
+  }
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/src/builtins/s390/OWNERS b/src/builtins/s390/OWNERS
new file mode 100644
index 0000000..752e8e3
--- /dev/null
+++ b/src/builtins/s390/OWNERS
@@ -0,0 +1,6 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc
new file mode 100644
index 0000000..c68fcc3
--- /dev/null
+++ b/src/builtins/s390/builtins-s390.cc
@@ -0,0 +1,3030 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- r2                 : number of arguments excluding receiver
+  //  -- r3                 : target
+  //  -- r5                 : new.target
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects r2 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ AddP(r2, r2, Operand(num_extra_args + 1));
+
+  // Insert extra arguments.
+  __ SmiTag(r2);
+  __ Push(r2, r3, r5);
+  __ SmiUntag(r2);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             exit_frame_type == BUILTIN_EXIT);
+}
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the InternalArray function from the current native context.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
+}
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the Array function from the current native context.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, r3);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r4);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+    __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r3);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r4);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  __ LoadRR(r5, r3);
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in r7 and the double value in d1.
+  __ LoadRoot(r7, root_index);
+  __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
+
+  // Setup state for loop
+  // r4: address of arg[0] + kPointerSize
+  // r5: number of slots to drop at exit (arguments + receiver)
+  __ AddP(r6, r2, Operand(1));
+
+  Label done_loop, loop;
+  __ LoadRR(r6, r2);
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ SubP(r6, Operand(1));
+    __ blt(&done_loop);
+
+    // Load the next parameter tagged value into r2.
+    __ ShiftLeftP(r1, r6, Operand(kPointerSizeLog2));
+    __ LoadP(r4, MemOperand(sp, r1));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(r4, &convert_smi);
+    __ LoadP(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
+    __ JumpIfRoot(r5, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      DCHECK(!FLAG_enable_embedded_constant_pool);
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(r2);
+      __ SmiTag(r6);
+      __ EnterBuiltinFrame(cp, r3, r2);
+      __ Push(r6, r7);
+      __ LoadRR(r2, r4);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ LoadRR(r4, r2);
+      __ Pop(r6, r7);
+      __ LeaveBuiltinFrame(cp, r3, r2);
+      __ SmiUntag(r6);
+      __ SmiUntag(r2);
+      {
+        // Restore the double accumulator value (d1).
+        Label done_restore;
+        __ SmiToDouble(d1, r7);
+        __ JumpIfSmi(r7, &done_restore);
+        __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
+        __ bind(&done_restore);
+      }
+    }
+    __ b(&convert);
+    __ bind(&convert_number);
+    __ LoadDouble(d2, FieldMemOperand(r4, HeapNumber::kValueOffset));
+    __ b(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDouble(d2, r4);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (d1) and the next parameter value on the right hand side (d2).
+    Label compare_nan, compare_swap;
+    __ cdbr(d1, d2);
+    __ bunordered(&compare_nan);
+    __ b(cond_done, &loop);
+    __ b(CommuteCondition(cond_done), &compare_swap);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ TestDoubleIsMinusZero(reg, r1, r0);
+    __ bne(&loop);
+
+    // Update accumulator. Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ ldr(d1, d2);
+    __ LoadRR(r7, r4);
+    __ b(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    // We still need to visit the rest of the arguments.
+    __ bind(&compare_nan);
+    __ LoadRoot(r7, Heap::kNanValueRootIndex);
+    __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
+    __ b(&loop);
+  }
+
+  __ bind(&done_loop);
+  // Drop all slots, including the receiver.
+  __ AddP(r2, Operand(1));
+  __ Drop(r2);
+  __ LoadRR(r2, r7);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into r2.
+  Label no_arguments;
+  {
+    __ LoadRR(r4, r2);  // Store argc in r4.
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
+    __ LoadP(r2, MemOperand(sp, r2));
+  }
+
+  // 2a. Convert the first argument to a number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(r4);
+    __ EnterBuiltinFrame(cp, r3, r4);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(cp, r3, r4);
+    __ SmiUntag(r4);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ Drop(r4);
+    __ Ret(1);
+  }
+
+  // 2b. No arguments, return +0.
+  __ bind(&no_arguments);
+  __ LoadSmiLiteral(r2, Smi::FromInt(0));
+  __ Ret(1);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- r5                     : new target
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r4.
+  {
+    Label no_arguments, done;
+    __ LoadRR(r8, r2);  // Store argc in r8.
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+    __ LoadP(r4, MemOperand(sp, r4));
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ LoadSmiLiteral(r4, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure r4 is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(r4, &done_convert);
+    __ CompareObjectType(r4, r6, r6, HEAP_NUMBER_TYPE);
+    __ beq(&done_convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(r8);
+      __ EnterBuiltinFrame(cp, r3, r8);
+      __ Push(r5);
+      __ LoadRR(r2, r4);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ LoadRR(r4, r2);
+      __ Pop(r5);
+      __ LeaveBuiltinFrame(cp, r3, r8);
+      __ SmiUntag(r8);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ CmpP(r3, r5);
+  __ bne(&new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
+  __ b(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(r8);
+    __ EnterBuiltinFrame(cp, r3, r8);
+    __ Push(r4);  // first argument
+    __ CallStub(&stub);
+    __ Pop(r4);
+    __ LeaveBuiltinFrame(cp, r3, r8);
+    __ SmiUntag(r8);
+  }
+  __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r8);
+    __ Ret(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  // 1. Load the first argument into r2.
+  Label no_arguments;
+  {
+    __ LoadRR(r4, r2);  // Store argc in r4
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
+    __ LoadP(r2, MemOperand(sp, r2));
+  }
+
+  // 2a. At least one argument, return r2 if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(r2, &to_string);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CompareObjectType(r2, r5, r5, FIRST_NONSTRING_TYPE);
+    __ bgt(&to_string);
+    __ beq(&symbol_descriptive_string);
+    __ b(&drop_frame_and_ret);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(r2, Heap::kempty_stringRootIndex);
+    __ Ret(1);
+  }
+
+  // 3a. Convert r2 to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(r4);
+    __ EnterBuiltinFrame(cp, r3, r4);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(cp, r3, r4);
+    __ SmiUntag(r4);
+  }
+  __ b(&drop_frame_and_ret);
+  // 3b. Convert symbol in r2 to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ Drop(r4);
+    __ Drop(1);
+    __ Push(r2);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r4);
+    __ Ret(1);
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : number of arguments
+  //  -- r3                     : constructor function
+  //  -- r5                     : new target
+  //  -- cp                     : context
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+
+  // 2. Load the first argument into r4.
+  {
+    Label no_arguments, done;
+    __ LoadRR(r8, r2);  // Store argc in r8.
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&no_arguments);
+    __ SubP(r2, r2, Operand(1));
+    __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+    __ LoadP(r4, MemOperand(sp, r4));
+    __ b(&done);
+    __ bind(&no_arguments);
+    __ LoadRoot(r4, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure r4 is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(r4, &convert);
+    __ CompareObjectType(r4, r6, r6, FIRST_NONSTRING_TYPE);
+    __ blt(&done_convert);
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(r8);
+      __ EnterBuiltinFrame(cp, r3, r8);
+      __ Push(r5);
+      __ LoadRR(r2, r4);
+      __ CallStub(&stub);
+      __ LoadRR(r4, r2);
+      __ Pop(r5);
+      __ LeaveBuiltinFrame(cp, r3, r8);
+      __ SmiUntag(r8);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ CmpP(r3, r5);
+  __ bne(&new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
+  __ b(&drop_frame_and_ret);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    FastNewObjectStub stub(masm->isolate());
+    __ SmiTag(r8);
+    __ EnterBuiltinFrame(cp, r3, r8);
+    __ Push(r4);  // first argument
+    __ CallStub(&stub);
+    __ Pop(r4);
+    __ LeaveBuiltinFrame(cp, r3, r8);
+    __ SmiUntag(r8);
+  }
+  __ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
+
+  __ bind(&drop_frame_and_ret);
+  {
+    __ Drop(r8);
+    __ Ret(1);
+  }
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+  __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- r2 : argument count (preserved for callee)
+  //  -- r3 : target function (preserved for callee)
+  //  -- r5 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(r2);
+    __ Push(r2, r3, r5, r3);
+
+    __ CallRuntime(function_id, 1);
+    __ LoadRR(r4, r2);
+
+    // Restore target function and new target.
+    __ Pop(r2, r3, r5);
+    __ SmiUntag(r2);
+  }
+  __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+  __ bge(&ok, Label::kNear);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- r2     : number of arguments
+  //  -- r3     : constructor function
+  //  -- r4     : allocation site or undefined
+  //  -- r5     : new target
+  //  -- cp     : context
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(r4, r6);
+
+    if (!create_implicit_receiver) {
+      __ SmiTag(r6, r2);
+      __ LoadAndTestP(r6, r6);
+      __ Push(cp, r4, r6);
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    } else {
+      __ SmiTag(r2);
+      __ Push(cp, r4, r2);
+
+      // Allocate the new receiver object.
+      __ Push(r3, r5);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ LoadRR(r6, r2);
+      __ Pop(r3, r5);
+
+      // ----------- S t a t e -------------
+      //  -- r3: constructor function
+      //  -- r5: new target
+      //  -- r6: newly allocated object
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ LoadP(r2, MemOperand(sp));
+      __ SmiUntag(r2);
+      __ LoadAndTestP(r2, r2);
+
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ Push(r6, r6);
+    }
+
+    // Set up pointer to last argument.
+    __ la(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    // r2: number of arguments
+    // r3: constructor function
+    // r4: address of last argument (caller sp)
+    // r5: new target
+    // cr0: condition indicating whether r2 is zero
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: number of arguments (smi-tagged)
+    Label loop, no_args;
+    __ beq(&no_args);
+    __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
+    __ SubP(sp, sp, ip);
+    __ LoadRR(r1, r2);
+    __ bind(&loop);
+    __ lay(ip, MemOperand(ip, -kPointerSize));
+    __ LoadP(r0, MemOperand(ip, r4));
+    __ StoreP(r0, MemOperand(ip, sp));
+    __ BranchOnCount(r1, &loop);
+    __ bind(&no_args);
+
+    // Call the function.
+    // r2: number of arguments
+    // r3: constructor function
+    // r5: new target
+
+    ParameterCount actual(r2);
+    __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    // r2: result
+    // sp[0]: receiver
+    // sp[1]: number of arguments (smi-tagged)
+    __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      // r2: result
+      // sp[0]: receiver
+      // sp[1]: new.target
+      // sp[2]: number of arguments (smi-tagged)
+      __ JumpIfSmi(r2, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ CompareObjectType(r2, r3, r5, FIRST_JS_RECEIVER_TYPE);
+      __ bge(&exit);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ LoadP(r2, MemOperand(sp));
+
+      // Remove receiver from the stack, remove caller arguments, and
+      // return.
+      __ bind(&exit);
+      // r2: result
+      // sp[0]: receiver (newly allocated object)
+      // sp[1]: number of arguments (smi-tagged)
+      __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+    } else {
+      __ LoadP(r3, MemOperand(sp));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(r2, &dont_throw);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  __ SmiToPtrArrayOffset(r3, r3);
+  __ AddP(sp, sp, r3);
+  __ AddP(sp, sp, Operand(kPointerSize));
+  if (create_implicit_receiver) {
+    __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r3, r4);
+  }
+  __ Ret();
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the value to pass to the generator
+  //  -- r3 : the JSGeneratorObject to resume
+  //  -- r4 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(r3);
+
+  // Store input value into generator object.
+  __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
+            r0);
+  __ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
+  __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ mov(ip, Operand(last_step_action));
+  __ LoadB(ip, MemOperand(ip));
+  __ CmpP(ip, Operand(StepIn));
+  __ bge(&prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+
+  __ mov(ip, Operand(debug_suspended_generator));
+  __ LoadP(ip, MemOperand(ip));
+  __ CmpP(ip, r3);
+  __ beq(&prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
+
+  // Push receiver.
+  __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+  __ Push(ip);
+
+  // ----------- S t a t e -------------
+  //  -- r3    : the JSGeneratorObject to resume
+  //  -- r4    : the resume mode (tagged)
+  //  -- r6    : generator function
+  //  -- cp    : generator context
+  //  -- lr    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadW(
+      r2, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label loop, done_loop;
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+#if V8_TARGET_ARCH_S390X
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&done_loop);
+#else
+    __ SmiUntag(r2);
+    __ LoadAndTestP(r2, r2);
+    __ beq(&done_loop);
+#endif
+    __ LoadRR(r1, r2);
+    __ bind(&loop);
+    __ push(ip);
+    __ BranchOnCount(r1, &loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
+  __ bne(&old_generator, Label::kNear);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ LoadRR(r5, r3);
+    __ LoadRR(r3, r6);
+    __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
+    __ JumpToJSEntry(ip);
+  }
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushStandardFrame(r6);
+
+    // Restore the operand stack.
+    __ LoadP(r2, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
+    __ LoadP(r5, FieldMemOperand(r2, FixedArray::kLengthOffset));
+    __ AddP(r2, r2,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    {
+      Label loop, done_loop;
+      __ SmiUntag(r5);
+      __ LoadAndTestP(r5, r5);
+      __ beq(&done_loop);
+      __ LoadRR(r1, r5);
+      __ bind(&loop);
+      __ LoadP(ip, MemOperand(r2, kPointerSize));
+      __ la(r2, MemOperand(r2, kPointerSize));
+      __ Push(ip);
+      __ BranchOnCount(r1, &loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(ip, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset),
+              r0);
+
+    // Resume the generator function at the continuation.
+    __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+    __ AddP(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+    {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+      __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+      __ SmiUntag(r4);
+      __ AddP(r5, r5, r4);
+      __ LoadSmiLiteral(r4,
+                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+      __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+                r0);
+      __ LoadRR(r2, r3);  // Continuation expects generator object in r2.
+      __ Jump(r5);
+    }
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3, r4, r6);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r3, r4);
+    __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3, r4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(r3, r4);
+    __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+  __ push(r3);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+
+// Clobbers r4; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+                                        IsTagged argc_is_tagged) {
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(r4, Heap::kRealStackLimitRootIndex);
+  // Make r4 the space we have left. The stack might already be overflowed
+  // here which will cause r4 to become negative.
+  __ SubP(r4, sp, r4);
+  // Check if the arguments will overflow the stack.
+  if (argc_is_tagged == kArgcIsSmiTagged) {
+    __ SmiToPtrArrayOffset(r0, argc);
+  } else {
+    DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
+    __ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
+  }
+  __ CmpP(r4, r0);
+  __ bgt(&okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from Generate_JS_Entry
+  // r2: new.target
+  // r3: function
+  // r4: receiver
+  // r5: argc
+  // r6: argv
+  // r0,r7-r9, cp may be clobbered
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Enter an internal frame.
+  {
+    // FrameScope ends up calling MacroAssembler::EnterFrame here
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ mov(cp, Operand(context_address));
+    __ LoadP(cp, MemOperand(cp));
+
+    __ InitializeRootRegister();
+
+    // Push the function and the receiver onto the stack.
+    __ Push(r3, r4);
+
+    // Check if we have enough stack space to push all arguments.
+    // Clobbers r4.
+    Generate_CheckStackOverflow(masm, r5, kArgcIsUntaggedInt);
+
+    // Copy arguments to the stack in a loop from argv to sp.
+    // The arguments are actually placed in reverse order on sp
+    // compared to argv (i.e. arg1 is highest memory in sp).
+    // r3: function
+    // r5: argc
+    // r6: argv, i.e. points to first arg
+    // r7: scratch reg to hold scaled argc
+    // r8: scratch reg to hold arg handle
+    // r9: scratch reg to hold index into argv
+    Label argLoop, argExit;
+    intptr_t zero = 0;
+    __ ShiftLeftP(r7, r5, Operand(kPointerSizeLog2));
+    __ SubRR(sp, r7);                // Buy the stack frame to fit args
+    __ LoadImmP(r9, Operand(zero));  // Initialize argv index
+    __ bind(&argLoop);
+    __ CmpPH(r7, Operand(zero));
+    __ beq(&argExit, Label::kNear);
+    __ lay(r7, MemOperand(r7, -kPointerSize));
+    __ LoadP(r8, MemOperand(r9, r6));         // read next parameter
+    __ la(r9, MemOperand(r9, kPointerSize));  // r9++;
+    __ LoadP(r0, MemOperand(r8));             // dereference handle
+    __ StoreP(r0, MemOperand(r7, sp));        // push parameter
+    __ b(&argLoop);
+    __ bind(&argExit);
+
+    // Setup new.target and argc.
+    __ LoadRR(r6, r2);
+    __ LoadRR(r2, r5);
+    __ LoadRR(r5, r6);
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(r7, r6);
+    __ LoadRR(r8, r6);
+    __ LoadRR(r9, r6);
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+  }
+  __ b(r14);
+
+  // r2: result
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ LoadP(args_count,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ LoadlW(args_count,
+            FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  __ AddP(sp, sp, args_count);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o r3: the JS function object being called.
+//   o r5: the new target
+//   o cp: our context
+//   o pp: the caller's constant pool pointer (if enabled)
+//   o fp: the caller's frame pointer
+//   o sp: stack pointer
+//   o lr: return address
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ PushStandardFrame(r3);
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  Label array_done;
+  Register debug_info = r4;
+  DCHECK(!debug_info.is(r2));
+  __ LoadP(debug_info,
+           FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+  // Load original bytecode array or the debug copy.
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+  __ beq(&array_done);
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ bind(&array_done);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ LoadP(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+  __ CmpP(r2, Operand(masm->CodeObject()));  // Self-reference to this code.
+  __ bne(&switch_to_different_code_kind);
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ TestIfSmi(kInterpreterBytecodeArrayRegister);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Load the initial bytecode offset.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
+  __ Push(r5, kInterpreterBytecodeArrayRegister, r4);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size (word) from the BytecodeArray object.
+    __ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                  BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ SubP(r5, sp, r4);
+    __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
+    __ CmpLogicalP(r5, r0);
+    __ bge(&ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    Label loop, no_args;
+    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
+    __ LoadAndTestP(r4, r4);
+    __ beq(&no_args);
+    __ LoadRR(r1, r4);
+    __ bind(&loop);
+    __ push(r5);
+    __ SubP(r1, Operand(1));
+    __ bne(&loop);
+    __ bind(&no_args);
+  }
+
+  // Load accumulator and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+                           kInterpreterBytecodeOffsetRegister));
+  __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
+  __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+  __ Call(ip);
+
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in r2.
+  LeaveInterpreterFrame(masm, r4);
+  __ Ret();
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
+  __ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(r3, r6, r7);
+  __ JumpToJSEntry(r6);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ LoadP(kContextRegister,
+           MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, r4);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(r2);
+
+    // Push function as argument and compile for baseline.
+    __ push(r3);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(r2);
+  }
+  __ Ret();
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+                                         Register count, Register scratch) {
+  Label loop;
+  __ AddP(index, index, Operand(kPointerSize));  // Bias up for LoadPU
+  __ LoadRR(r0, count);
+  __ bind(&loop);
+  __ LoadP(scratch, MemOperand(index, -kPointerSize));
+  __ lay(index, MemOperand(index, -kPointerSize));
+  __ push(scratch);
+  __ SubP(r0, Operand(1));
+  __ bne(&loop);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r4 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- r3 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Calculate number of arguments (AddP one for receiver).
+  __ AddP(r5, r2, Operand(1));
+
+  // Push the arguments.
+  Generate_InterpreterPushArgs(masm, r4, r5, r6);
+
+  // Call the target.
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- r2 : argument count (not including receiver)
+  // -- r5 : new target
+  // -- r3 : constructor to call
+  // -- r4 : address of the first argument
+  // -----------------------------------
+
+  // Push a slot for the receiver to be constructed.
+  __ LoadImmP(r0, Operand::Zero());
+  __ push(r0);
+
+  // Push the arguments (skip if none).
+  Label skip;
+  __ CmpP(r2, Operand::Zero());
+  __ beq(&skip);
+  Generate_InterpreterPushArgs(masm, r4, r2, r6);
+  __ bind(&skip);
+
+  // Call the constructor with r2, r3, and r5 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(r4, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
+                           Code::kHeaderSize - kHeapObjectTag));
+
+  // Initialize the dispatch table register.
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ TestIfSmi(kInterpreterBytecodeArrayRegister);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
+                         BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ LoadP(kInterpreterBytecodeOffsetRegister,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+                           kInterpreterBytecodeOffsetRegister));
+  __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
+  __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+  __ Jump(ip);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : argument count (preserved for callee)
+  //  -- r5 : new target (preserved for callee)
+  //  -- r3 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = r3;
+  Register map = r8;
+  Register index = r4;
+  __ LoadP(map,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(map,
+           FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
+  __ blt(&gotta_call_runtime);
+
+  // Find literals.
+  // r9 : native context
+  // r4  : length / index
+  // r8  : optimized code map
+  // r5  : new target
+  // r3  : closure
+  Register native_context = r9;
+  __ LoadP(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = r1;
+  Register array_pointer = r7;
+
+  // Does the native context match?
+  __ SmiToPtrArrayOffset(array_pointer, index);
+  __ AddP(array_pointer, map, array_pointer);
+  __ LoadP(temp, FieldMemOperand(array_pointer,
+                                 SharedFunctionInfo::kOffsetToPreviousContext));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ CmpP(temp, native_context);
+  __ bne(&loop_bottom, Label::kNear);
+  // OSR id set to none?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
+  __ bne(&loop_bottom, Label::kNear);
+  // Literals available?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r6,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = r6;
+  __ LoadP(entry,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  // Store code entry in the closure.
+  __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r7);
+
+  // Link the closure into the optimized function list.
+  // r6 : code entry
+  // r9: native context
+  // r3 : closure
+  __ LoadP(
+      r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
+            r0);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ StoreP(
+      closure,
+      ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+  // Save closure before the write barrier.
+  __ LoadRR(r7, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&loop_bottom);
+  __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
+                   r0);
+  __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
+  __ bgt(&loop_top);
+
+  // We found neither literals nor code.
+  __ b(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ LoadP(entry,
+           FieldMemOperand(map, FixedArray::kHeaderSize +
+                                    SharedFunctionInfo::kSharedCodeIndex));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ b(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  // Is the full code valid?
+  __ LoadP(entry,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ DecodeField<Code::KindField>(r7);
+  __ CmpP(r7, Operand(Code::BUILTIN));
+  __ beq(&gotta_call_runtime);
+  // Yes, install the full code.
+  __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r7);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&gotta_call_runtime);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : argument count (preserved for callee)
+  //  -- r3 : new target (preserved for callee)
+  //  -- r5 : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ Move(r6, r2);
+    // Push a copy of the target function and the new target.
+    __ SmiTag(r2);
+    // Push another copy as a parameter to the runtime call.
+    __ Push(r2, r3, r5, r3);
+
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ CmpP(r6, Operand(j));
+        __ b(ne, &over);
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+                                        i * kPointerSize));
+        __ push(r6);
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(r2, &failed);
+
+    __ Drop(2);
+    __ pop(r6);
+    __ SmiUntag(r6);
+    scope.GenerateLeaveFrame();
+
+    __ AddP(r6, r6, Operand(1));
+    __ Drop(r6);
+    __ Ret();
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ Pop(r2, r3, r5);
+    __ SmiUntag(r2);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r2 at the start of the PlatformCodeAge sequence.
+  __ CleanseP(r14);
+  __ SubP(r14, Operand(kCodeAgingSequenceLength));
+  __ LoadRR(r2, r14);
+
+  __ pop(r14);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r2 - contains return address (beginning of patch sequence)
+  //   r3 - isolate
+  //   r5 - new target
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r4);
+  __ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ LoadRR(ip, r2);
+  __ Jump(ip);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r2 at the start of the PlatformCodeAge sequence.
+  __ CleanseP(r14);
+  __ SubP(r14, Operand(kCodeAgingSequenceLength));
+  __ LoadRR(r2, r14);
+
+  __ pop(r14);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r2 - contains return address (beginning of patch sequence)
+  //   r3 - isolate
+  //   r5 - new target
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r4);
+  __ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
+  __ LoadRR(ip, r2);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PushStandardFrame(r3);
+
+  // Jump to point after the code-age stub.
+  __ AddP(r2, ip, Operand(kNoCodeAgeSequenceLength));
+  __ Jump(r2);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ MultiPush(kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ MultiPop(kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ la(sp, MemOperand(sp, kPointerSize));  // Ignore state
+  __ Ret();                                 // Jump to miss handler
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ LoadSmiLiteral(r2, Smi::FromInt(static_cast<int>(type)));
+    __ push(r2);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+  }
+
+  // Get the full codegen state from the stack and untag it -> r8.
+  __ LoadP(r8, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(r8);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ CmpP(
+      r8,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
+  __ bne(&with_tos_register);
+  __ la(sp, MemOperand(sp, 1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r2.code());
+  __ LoadP(r2, MemOperand(sp, 1 * kPointerSize));
+  __ CmpP(
+      r8,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
+  __ bne(&unknown_state);
+  __ la(sp, MemOperand(sp, 2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// Clobbers registers {r6, r7, r8, r9}.
+void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                             Register function_template_info,
+                             Label* receiver_check_failed) {
+  Register signature = r6;
+  Register map = r7;
+  Register constructor = r8;
+  Register scratch = r9;
+
+  // If there is no signature, return the holder.
+  __ LoadP(signature, FieldMemOperand(function_template_info,
+                                      FunctionTemplateInfo::kSignatureOffset));
+  Label receiver_check_passed;
+  __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+                &receiver_check_passed);
+
+  // Walk the prototype chain.
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(constructor, map, scratch, scratch);
+  __ CmpP(scratch, Operand(JS_FUNCTION_TYPE));
+  Label next_prototype;
+  __ bne(&next_prototype);
+  Register type = constructor;
+  __ LoadP(type,
+           FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(type,
+           FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ CmpP(signature, type);
+  __ beq(&receiver_check_passed);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype);
+  __ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
+  __ bne(&next_prototype);
+
+  // Otherwise load the parent function template and iterate.
+  __ LoadP(type,
+           FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+  __ b(&function_template_loop);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ LoadlW(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+  __ DecodeField<Map::HasHiddenPrototype>(scratch);
+  __ beq(receiver_check_failed);
+
+  __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ b(&prototype_loop_start);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2                 : number of arguments excluding receiver
+  //  -- r3                 : callee
+  //  -- lr                 : return address
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+  __ LoadP(r4, MemOperand(sp, r1));
+  CompatibleReceiverCheck(masm, r4, r5, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ LoadP(r6, FieldMemOperand(r5, FunctionTemplateInfo::kCallCodeOffset));
+  __ LoadP(r6, FieldMemOperand(r6, CallHandlerInfo::kFastHandlerOffset));
+  __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+
+  // Compatible receiver check failed: throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  // Drop the arguments (including the receiver);
+  __ AddP(r1, r1, Operand(kPointerSize));
+  __ AddP(sp, sp, r1);
+  __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(r2);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  // If the code object is null, just return to the caller.
+  Label skip;
+  __ CmpSmiLiteral(r2, Smi::FromInt(0), r0);
+  __ bne(&skip);
+  __ Ret();
+
+  __ bind(&skip);
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ LeaveFrame(StackFrame::STUB);
+  }
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+  __ LoadP(
+      r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
+                                  DeoptimizationInputData::kOsrPcOffsetIndex)));
+  __ SmiUntag(r3);
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+  __ AddP(r2, r3);
+  __ AddP(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ LoadRR(r14, r0);
+
+  // And "return" to the OSR entry point of the function.
+  __ Ret();
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- r2    : number of arguments
+  //  -- r3    : function
+  //  -- cp    : context
+
+  //  -- lr    : return address
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // 1. Pop receiver into r2 and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ Pop(r2);
+    __ JumpIfSmi(r2, &receiver_not_date);
+    __ CompareObjectType(r2, r4, r5, JS_DATE_TYPE);
+    __ bne(&receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ LoadP(r2, FieldMemOperand(r2, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ mov(r3, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+      __ LoadP(r3, MemOperand(r3));
+      __ LoadP(ip, FieldMemOperand(r2, JSDate::kCacheStampOffset));
+      __ CmpP(r3, ip);
+      __ bne(&stamp_mismatch);
+      __ LoadP(r2, FieldMemOperand(
+                       r2, JSDate::kValueOffset + field_index * kPointerSize));
+      __ Ret();
+      __ bind(&stamp_mismatch);
+    }
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, r3);
+    __ LoadSmiLiteral(r3, Smi::FromInt(field_index));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ Ret();
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ push(r2);
+    __ LoadSmiLiteral(r2, Smi::FromInt(0));
+    __ EnterBuiltinFrame(cp, r3, r2);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : argc
+  //  -- sp[0] : argArray
+  //  -- sp[4] : thisArg
+  //  -- sp[8] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into r3, argArray into r2 (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label skip;
+    Register arg_size = r4;
+    Register new_sp = r5;
+    Register scratch = r6;
+    __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+    __ AddP(new_sp, sp, arg_size);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(scratch, r2);
+    __ LoadP(r3, MemOperand(new_sp, 0));  // receiver
+    __ CmpP(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize));  // thisArg
+    __ beq(&skip);
+    __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize));  // argArray
+    __ bind(&skip);
+    __ LoadRR(sp, new_sp);
+    __ StoreP(scratch, MemOperand(sp, 0));
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r2    : argArray
+  //  -- r3    : receiver
+  //  -- sp[0] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(r3, &receiver_not_callable);
+  __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsCallable);
+  __ beq(&receiver_not_callable);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
+  __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ LoadImmP(r2, Operand::Zero());
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ StoreP(r3, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // r2: actual number of arguments
+  {
+    Label done;
+    __ CmpP(r2, Operand::Zero());
+    __ bne(&done, Label::kNear);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ AddP(r2, Operand(1));
+    __ bind(&done);
+  }
+
+  // r2: actual number of arguments
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
+  __ LoadP(r3, MemOperand(sp, r4));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // r2: actual number of arguments
+  // r3: callable
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ AddP(r4, sp, r4);
+
+    __ bind(&loop);
+    __ LoadP(ip, MemOperand(r4, -kPointerSize));
+    __ StoreP(ip, MemOperand(r4));
+    __ SubP(r4, Operand(kPointerSize));
+    __ CmpP(r4, sp);
+    __ bne(&loop);
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ SubP(r2, Operand(1));
+    __ pop();
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : argc
+  //  -- sp[0]  : argumentsList
+  //  -- sp[4]  : thisArgument
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label skip;
+    Register arg_size = r4;
+    Register new_sp = r5;
+    Register scratch = r6;
+    __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+    __ AddP(new_sp, sp, arg_size);
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(scratch, r3);
+    __ LoadRR(r2, r3);
+    __ CmpP(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize));  // target
+    __ beq(&skip);
+    __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize));  // thisArgument
+    __ CmpP(arg_size, Operand(2 * kPointerSize));
+    __ beq(&skip);
+    __ LoadP(r2, MemOperand(new_sp, 3 * -kPointerSize));  // argumentsList
+    __ bind(&skip);
+    __ LoadRR(sp, new_sp);
+    __ StoreP(scratch, MemOperand(sp, 0));
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r2    : argumentsList
+  //  -- r3    : target
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(r3, &target_not_callable);
+  __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsCallable);
+  __ beq(&target_not_callable);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ StoreP(r3, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2     : argc
+  //  -- sp[0]  : new.target (optional)
+  //  -- sp[4]  : argumentsList
+  //  -- sp[8]  : target
+  //  -- sp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+  // new.target into r5 (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label skip;
+    Register arg_size = r4;
+    Register new_sp = r6;
+    __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
+    __ AddP(new_sp, sp, arg_size);
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ LoadRR(r2, r3);
+    __ LoadRR(r5, r3);
+    __ StoreP(r3, MemOperand(new_sp, 0));  // receiver (undefined)
+    __ CmpP(arg_size, Operand(kPointerSize));
+    __ blt(&skip);
+    __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize));  // target
+    __ LoadRR(r5, r3);  // new.target defaults to target
+    __ beq(&skip);
+    __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize));  // argumentsList
+    __ CmpP(arg_size, Operand(2 * kPointerSize));
+    __ beq(&skip);
+    __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize));  // new.target
+    __ bind(&skip);
+    __ LoadRR(sp, new_sp);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r2    : argumentsList
+  //  -- r5    : new.target
+  //  -- r3    : target
+  //  -- sp[0] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(r3, &target_not_constructor);
+  __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsConstructor);
+  __ beq(&target_not_constructor);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(r5, &new_target_not_constructor);
+  __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsConstructor);
+  __ beq(&new_target_not_constructor);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ StoreP(r3, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ StoreP(r5, MemOperand(sp, 0));
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- r2 : actual number of arguments
+  //  -- r3 : function (passed through to callee)
+  //  -- r4 : expected number of arguments
+  //  -- r5 : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(r7, Heap::kRealStackLimitRootIndex);
+  // Make r7 the space we have left. The stack might already be overflowed
+  // here which will cause r7 to become negative.
+  __ SubP(r7, sp, r7);
+  // Check if the arguments will overflow the stack.
+  __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+  __ CmpP(r7, r0);
+  __ ble(stack_overflow);  // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ SmiTag(r2);
+  __ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  // Stack updated as such:
+  //    old SP --->
+  //                 R14 Return Addr
+  //                 Old FP                     <--- New FP
+  //                 Argument Adapter SMI
+  //                 Function
+  //                 ArgC as SMI                <--- New SP
+  __ lay(sp, MemOperand(sp, -5 * kPointerSize));
+
+  // Cleanse the top nibble of 31-bit pointers.
+  __ CleanseP(r14);
+  __ StoreP(r14, MemOperand(sp, 4 * kPointerSize));
+  __ StoreP(fp, MemOperand(sp, 3 * kPointerSize));
+  __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
+  __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
+  __ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
+  __ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp +
+                               kPointerSize));
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ LoadP(r3, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                                kPointerSize)));
+  int stack_adjustment = kPointerSize;  // adjust for receiver
+  __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
+  __ SmiToPtrArrayOffset(r3, r3);
+  __ lay(sp, MemOperand(sp, r3));
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2    : argumentsList
+  //  -- r3    : target
+  //  -- r5    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(r2, &create_runtime);
+
+    // Load the map of argumentsList into r4.
+    __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+
+    // Load native context into r6.
+    __ LoadP(r6, NativeContextMemOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ LoadP(ip, ContextMemOperand(r6, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ CmpP(ip, r4);
+    __ beq(&create_arguments);
+    __ LoadP(ip, ContextMemOperand(r6, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ CmpP(ip, r4);
+    __ beq(&create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CompareInstanceType(r4, ip, JS_ARRAY_TYPE);
+    __ beq(&create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r3, r5, r2);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(r3, r5);
+      __ LoadP(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+      __ SmiUntag(r4);
+    }
+    __ b(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ LoadP(r4, FieldMemOperand(r2, JSArgumentsObject::kLengthOffset));
+    __ LoadP(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
+    __ LoadP(ip, FieldMemOperand(r6, FixedArray::kLengthOffset));
+    __ CmpP(r4, ip);
+    __ bne(&create_runtime);
+    __ SmiUntag(r4);
+    __ LoadRR(r2, r6);
+    __ b(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ LoadlB(r4, FieldMemOperand(r4, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(r4);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ CmpP(r4, Operand(FAST_ELEMENTS));
+    __ bgt(&create_runtime);
+    __ CmpP(r4, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ beq(&create_runtime);
+    __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
+    __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
+    __ SmiUntag(r4);
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+    // Make ip the space we have left. The stack might already be overflowed
+    // here which will cause ip to become negative.
+    __ SubP(ip, sp, ip);
+    // Check if the arguments will overflow the stack.
+    __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+    __ CmpP(ip, r0);  // Signed comparison.
+    __ bgt(&done);
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- r3    : target
+  //  -- r2    : args (a FixedArray built from argumentsList)
+  //  -- r4    : len (number of elements to push from args)
+  //  -- r5    : new.target (checked to be constructor or undefined)
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    Label loop, no_args;
+    __ CmpP(r4, Operand::Zero());
+    __ beq(&no_args);
+    __ AddP(r2, r2,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    __ LoadRR(r1, r4);
+    __ bind(&loop);
+    __ LoadP(r0, MemOperand(r2, kPointerSize));
+    __ la(r2, MemOperand(r2, kPointerSize));
+    __ push(r0);
+    __ BranchOnCount(r1, &loop);
+    __ bind(&no_args);
+    __ LoadRR(r2, r4);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is active.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
+  __ LoadlB(scratch1, MemOperand(scratch1));
+  __ CmpP(scratch1, Operand::Zero());
+  __ beq(&done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ LoadP(scratch3,
+             MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+    __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+    __ bne(&no_interpreter_frame);
+    __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(
+      scratch3,
+      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_arguments_adaptor);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ LoadRR(fp, scratch2);
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ LoadP(scratch1,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  __ LoadP(scratch1,
+           FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadW(caller_args_count_reg,
+           FieldMemOperand(scratch1,
+                           SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_S390X
+  __ SmiUntag(caller_args_count_reg);
+#endif
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+  __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorBits, r0);
+  __ bne(&class_constructor);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ AndP(r0, r5, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+                          (1 << SharedFunctionInfo::kNativeBit)));
+  __ bne(&done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- r2 : the number of arguments (not including the receiver)
+    //  -- r3 : the function to call (checked to be a JSFunction)
+    //  -- r4 : the shared function info.
+    //  -- cp : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(r5);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2));
+      __ LoadP(r5, MemOperand(sp, r5));
+      __ JumpIfSmi(r5, &convert_to_object);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CompareObjectType(r5, r6, r6, FIRST_JS_RECEIVER_TYPE);
+      __ bge(&done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy);
+        __ JumpIfNotRoot(r5, Heap::kNullValueRootIndex, &convert_to_object);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(r5);
+        }
+        __ b(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(r2);
+        __ Push(r2, r3);
+        __ LoadRR(r2, r5);
+        __ Push(cp);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(cp);
+        __ LoadRR(r5, r2);
+        __ Pop(r2, r3);
+        __ SmiUntag(r2);
+      }
+      __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2));
+    __ StoreP(r5, MemOperand(sp, r6));
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSFunction)
+  //  -- r4 : the shared function info.
+  //  -- cp : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r2, r5, r6, r7);
+  }
+
+  __ LoadW(
+      r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_S390X
+  __ SmiUntag(r4);
+#endif
+  ParameterCount actual(r2);
+  ParameterCount expected(r4);
+  __ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
+    __ push(r3);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : target (checked to be a JSBoundFunction)
+  //  -- r5 : new.target (only in case of [[Construct]])
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into r4 and length of that into r6.
+  Label no_bound_arguments;
+  __ LoadP(r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
+  __ LoadP(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  __ SmiUntag(r6);
+  __ LoadAndTestP(r6, r6);
+  __ beq(&no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- r2 : the number of arguments (not including the receiver)
+    //  -- r3 : target (checked to be a JSBoundFunction)
+    //  -- r4 : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- r5 : new.target (only in case of [[Construct]])
+    //  -- r6 : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ LoadRR(r8, sp);  // preserve previous stack pointer
+      __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
+      __ SubP(sp, sp, r9);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+      __ bgt(&done);  // Signed comparison.
+      // Restore the stack pointer.
+      __ LoadRR(sp, r8);
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Relocate arguments down the stack.
+    //  -- r2 : the number of arguments (not including the receiver)
+    //  -- r8 : the previous stack pointer
+    //  -- r9: the size of the [[BoundArguments]]
+    {
+      Label skip, loop;
+      __ LoadImmP(r7, Operand::Zero());
+      __ CmpP(r2, Operand::Zero());
+      __ beq(&skip);
+      __ LoadRR(r1, r2);
+      __ bind(&loop);
+      __ LoadP(r0, MemOperand(r8, r7));
+      __ StoreP(r0, MemOperand(sp, r7));
+      __ AddP(r7, r7, Operand(kPointerSize));
+      __ BranchOnCount(r1, &loop);
+      __ bind(&skip);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ AddP(r4, r4, r9);
+      __ LoadRR(r1, r6);
+      __ bind(&loop);
+      __ LoadP(r0, MemOperand(r4, -kPointerSize));
+      __ lay(r4, MemOperand(r4, -kPointerSize));
+      __ StoreP(r0, MemOperand(sp, r7));
+      __ AddP(r7, r7, Operand(kPointerSize));
+      __ BranchOnCount(r1, &loop);
+      __ AddP(r2, r2, r6);
+    }
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(r3);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r2, r5, r6, r7);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+  __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+  __ StoreP(ip, MemOperand(sp, r1));
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ LoadP(r3,
+           FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+                                       masm->isolate())));
+  __ LoadP(ip, MemOperand(ip));
+  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(r3, &non_callable);
+  __ bind(&non_smi);
+  __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+  __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r6, Map::kIsCallable);
+  __ beq(&non_callable);
+
+  __ CmpP(r7, Operand(JS_PROXY_TYPE));
+  __ bne(&non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r2, r5, r6, r7);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ Push(r3);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ AddP(r2, r2, Operand(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver the (original) target.
+  __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+  __ StoreP(r3, MemOperand(sp, r7));
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (checked to be a JSFunction)
+  //  -- r5 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertFunction(r3);
+
+  // Calling convention for function specific ConstructStubs require
+  // r4 to contain either an AllocationSite or undefined.
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
+  __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the function to call (checked to be a JSBoundFunction)
+  //  -- r5 : the new target (checked to be a constructor)
+  // -----------------------------------
+  __ AssertBoundFunction(r3);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  Label skip;
+  __ CmpP(r3, r5);
+  __ bne(&skip);
+  __ LoadP(r5,
+           FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ bind(&skip);
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ LoadP(r3,
+           FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ LoadP(ip, MemOperand(ip));
+  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (checked to be a JSProxy)
+  //  -- r5 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ Push(r3, r5);
+  // Include the pushed new_target, constructor and the receiver.
+  __ AddP(r2, r2, Operand(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (can be any Object)
+  //  -- r5 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(r3, &non_constructor);
+
+  // Dispatch based on instance type.
+  __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
+  __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Construct]] internal method.
+  __ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ TestBit(r4, Map::kIsConstructor);
+  __ beq(&non_constructor);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+          RelocInfo::CODE_TARGET, eq);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ CmpP(r7, Operand(JS_PROXY_TYPE));
+  __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+          eq);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
+    __ StoreP(r3, MemOperand(sp, r7));
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r3);
+  __ Push(r3);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r3);
+  __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(r3, r4);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : message_id as Smi
+  //  -- lr : return address
+  // -----------------------------------
+  __ push(r3);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r2.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ TestIfSmi(r2);
+  __ Ret(eq);
+
+  __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
+  // r2: receiver
+  // r3: receiver instance type
+  __ Ret(eq);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : actual number of arguments
+  //  -- r3 : function (passed through to callee)
+  //  -- r4 : expected number of arguments
+  //  -- r5 : new target (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+
+  Label enough, too_few;
+  __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
+  __ CmpP(r2, r4);
+  __ blt(&too_few);
+  __ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ beq(&dont_adapt_arguments);
+
+  {  // Enough parameters: actual >= expected
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r2 and copy end address into r6.
+    // r2: actual number of arguments as a smi
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r2, r2);
+    __ AddP(r2, fp);
+    // adjust for return address and receiver
+    __ AddP(r2, r2, Operand(2 * kPointerSize));
+    __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+    __ SubP(r6, r2, r6);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r2: copy start address
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // r6: copy end address
+    // ip: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ LoadP(r0, MemOperand(r2, 0));
+    __ push(r0);
+    __ CmpP(r2, r6);  // Compare before moving to next argument.
+    __ lay(r2, MemOperand(r2, -kPointerSize));
+    __ bne(&copy);
+
+    __ b(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected
+    __ bind(&too_few);
+
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentAdaptorStackCheck(masm, &stack_overflow);
+
+    // Calculate copy start address into r0 and copy end address is fp.
+    // r2: actual number of arguments as a smi
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r2, r2);
+    __ lay(r2, MemOperand(r2, fp));
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r2: copy start address
+    // r3: function
+    // r4: expected number of arguments
+    // r5: new target (passed through to callee)
+    // ip: code entry to call
+    Label copy;
+    __ bind(&copy);
+    // Adjust load for return address and receiver.
+    __ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
+    __ push(r0);
+    __ CmpP(r2, fp);  // Compare before moving to next argument.
+    __ lay(r2, MemOperand(r2, -kPointerSize));
+    __ bne(&copy);
+
+    // Fill the remaining expected arguments with undefined.
+    // r3: function
+    // r4: expected number of argumentus
+    // ip: code entry to call
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
+    __ SubP(r6, fp, r6);
+    // Adjust for frame.
+    __ SubP(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                            2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ push(r0);
+    __ CmpP(sp, r6);
+    __ bne(&fill);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ LoadRR(r2, r4);
+  // r2 : expected number of arguments
+  // r3 : function (passed through to callee)
+  // r5 : new target (passed through to callee)
+  __ CallJSEntry(ip);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ JumpToJSEntry(ip);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bkpt(0);
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_S390
diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc
new file mode 100644
index 0000000..1536604
--- /dev/null
+++ b/src/builtins/x64/builtins-x64.cc
@@ -0,0 +1,3087 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/code-factory.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments excluding receiver
+  //  -- rdi                 : target
+  //  -- rdx                 : new.target
+  //  -- rsp[0]              : return address
+  //  -- rsp[8]              : last argument
+  //  -- ...
+  //  -- rsp[8 * argc]       : first argument
+  //  -- rsp[8 * (argc + 1)] : receiver
+  // -----------------------------------
+  __ AssertFunction(rdi);
+
+  // The logic contained here is mirrored for TurboFan inlining in
+  // JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects rax to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ addp(rax, Immediate(num_extra_args + 1));
+
+  // Unconditionally insert argc, target and new target as extra arguments. They
+  // will be used by stack frame iterators when constructing the stack trace.
+  __ PopReturnAddressTo(kScratchRegister);
+  __ Integer32ToSmi(rax, rax);
+  __ Push(rax);
+  __ SmiToInteger32(rax, rax);
+  __ Push(rdi);
+  __ Push(rdx);
+  __ PushReturnAddressFrom(kScratchRegister);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             exit_frame_type == BUILTIN_EXIT);
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ movp(kScratchRegister,
+          FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(kScratchRegister,
+          FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
+  __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+  __ jmp(kScratchRegister);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- rax : argument count (preserved for callee)
+  //  -- rdx : new target (preserved for callee)
+  //  -- rdi : target function (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ Integer32ToSmi(rax, rax);
+    __ Push(rax);
+    // Push a copy of the target function and the new target.
+    __ Push(rdi);
+    __ Push(rdx);
+    // Function is also the parameter to the runtime call.
+    __ Push(rdi);
+
+    __ CallRuntime(function_id, 1);
+    __ movp(rbx, rax);
+
+    // Restore target function and new target.
+    __ Pop(rdx);
+    __ Pop(rdi);
+    __ Pop(rax);
+    __ SmiToInteger32(rax, rax);
+  }
+  __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
+  __ jmp(rbx);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  __ j(above_equal, &ok);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- rax: number of arguments
+  //  -- rsi: context
+  //  -- rdi: constructor function
+  //  -- rbx: allocation site or undefined
+  //  -- rdx: new target
+  // -----------------------------------
+
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(rbx);
+    __ Push(rsi);
+    __ Push(rbx);
+    __ Integer32ToSmi(rcx, rax);
+    __ Push(rcx);
+
+    if (create_implicit_receiver) {
+      // Allocate the new receiver object.
+      __ Push(rdi);
+      __ Push(rdx);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ movp(rbx, rax);
+      __ Pop(rdx);
+      __ Pop(rdi);
+
+      // ----------- S t a t e -------------
+      //  -- rdi: constructor function
+      //  -- rbx: newly allocated object
+      //  -- rdx: new target
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ SmiToInteger32(rax, Operand(rsp, 0 * kPointerSize));
+    }
+
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ Push(rbx);
+      __ Push(rbx);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
+
+    // Set up pointer to last argument.
+    __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ movp(rcx, rax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ Push(Operand(rbx, rcx, times_pointer_size, 0));
+    __ bind(&entry);
+    __ decp(rcx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    ParameterCount actual(rax);
+    __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      __ JumpIfSmi(rax, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+      __ j(above_equal, &exit);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ movp(rax, Operand(rsp, 0));
+
+      // Restore the arguments count and leave the construct frame. The
+      // arguments count is stored below the receiver.
+      __ bind(&exit);
+      __ movp(rbx, Operand(rsp, 1 * kPointerSize));
+    } else {
+      __ movp(rbx, Operand(rsp, 0));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(rax, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  // Remove caller arguments from the stack and return.
+  __ PopReturnAddressTo(rcx);
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+  __ PushReturnAddressFrom(rcx);
+  if (create_implicit_receiver) {
+    Counters* counters = masm->isolate()->counters();
+    __ IncrementCounter(counters->constructed_objects(), 1);
+  }
+  __ ret(0);
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ Push(rdi);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kRaxIsSmiTagged, kRaxIsUntaggedInt };
+
+// Clobbers rcx, r11, kScratchRegister; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+                                        IsTagged rax_is_tagged) {
+  // rax   : the number of items to be pushed to the stack
+  //
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+  __ movp(rcx, rsp);
+  // Make rcx the space we have left. The stack might already be overflowed
+  // here which will cause rcx to become negative.
+  __ subp(rcx, kScratchRegister);
+  // Make r11 the space we need for the array when it is unrolled onto the
+  // stack.
+  if (rax_is_tagged == kRaxIsSmiTagged) {
+    __ PositiveSmiTimesPowerOfTwoToInteger64(r11, rax, kPointerSizeLog2);
+  } else {
+    DCHECK(rax_is_tagged == kRaxIsUntaggedInt);
+    __ movp(r11, rax);
+    __ shlq(r11, Immediate(kPointerSizeLog2));
+  }
+  // Check if the arguments will overflow the stack.
+  __ cmpp(rcx, r11);
+  __ j(greater, &okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Expects five C++ function parameters.
+  // - Object* new_target
+  // - JSFunction* function
+  // - Object* receiver
+  // - int argc
+  // - Object*** argv
+  // (see Handle::Invoke in execution.cc).
+
+  // Open a C++ scope for the FrameScope.
+  {
+// Platform specific argument handling. After this, the stack contains
+// an internal frame and the pushed function and receiver, and
+// register rax and rbx holds the argument count and argument array,
+// while rdi holds the function pointer, rsi the context, and rdx the
+// new.target.
+
+#ifdef _WIN64
+    // MSVC parameters in:
+    // rcx        : new_target
+    // rdx        : function
+    // r8         : receiver
+    // r9         : argc
+    // [rsp+0x20] : argv
+
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ movp(rsi, masm->ExternalOperand(context_address));
+
+    // Push the function and the receiver onto the stack.
+    __ Push(rdx);
+    __ Push(r8);
+
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movp(rax, r9);
+    // Load the previous frame pointer to access C argument on stack
+    __ movp(kScratchRegister, Operand(rbp, 0));
+    __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+    // Load the function pointer into rdi.
+    __ movp(rdi, rdx);
+    // Load the new.target into rdx.
+    __ movp(rdx, rcx);
+#else   // _WIN64
+    // GCC parameters in:
+    // rdi : new_target
+    // rsi : function
+    // rdx : receiver
+    // rcx : argc
+    // r8  : argv
+
+    __ movp(r11, rdi);
+    __ movp(rdi, rsi);
+    // rdi : function
+    // r11 : new_target
+
+    // Clear the context before we push it when entering the internal frame.
+    __ Set(rsi, 0);
+
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ movp(rsi, masm->ExternalOperand(context_address));
+
+    // Push the function and receiver onto the stack.
+    __ Push(rdi);
+    __ Push(rdx);
+
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movp(rax, rcx);
+    __ movp(rbx, r8);
+
+    // Load the new.target into rdx.
+    __ movp(rdx, r11);
+#endif  // _WIN64
+
+    // Current stack contents:
+    // [rsp + 2 * kPointerSize ... ] : Internal frame
+    // [rsp + kPointerSize]          : function
+    // [rsp]                         : receiver
+    // Current register contents:
+    // rax : argc
+    // rbx : argv
+    // rsi : context
+    // rdi : function
+    // rdx : new.target
+
+    // Check if we have enough stack space to push all arguments.
+    // Expects argument count in rax. Clobbers rcx, r11.
+    Generate_CheckStackOverflow(masm, kRaxIsUntaggedInt);
+
+    // Copy arguments to the stack in a loop.
+    // Register rbx points to array of pointers to handle locations.
+    // Push the values of these handles.
+    Label loop, entry;
+    __ Set(rcx, 0);  // Set loop variable to 0.
+    __ jmp(&entry, Label::kNear);
+    __ bind(&loop);
+    __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+    __ Push(Operand(kScratchRegister, 0));  // dereference handle
+    __ addp(rcx, Immediate(1));
+    __ bind(&entry);
+    __ cmpp(rcx, rax);
+    __ j(not_equal, &loop);
+
+    // Invoke the builtin code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the internal frame. Notice that this also removes the empty
+    // context and the function left on the stack by the code
+    // invocation.
+  }
+
+  // TODO(X64): Is argument correct? Is there a receiver to remove?
+  __ ret(1 * kPointerSize);  // Remove receiver.
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : the value to pass to the generator
+  //  -- rbx    : the JSGeneratorObject to resume
+  //  -- rdx    : the resume mode (tagged)
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(rbx);
+
+  // Store input value into generator object.
+  __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
+  __ RecordWriteField(rbx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
+                      kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
+
+  // Load suspended function and context.
+  __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+  __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  Operand last_step_action_operand = masm->ExternalOperand(last_step_action);
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ cmpb(last_step_action_operand, Immediate(StepIn));
+  __ j(greater_equal, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  Operand debug_suspended_generator_operand =
+      masm->ExternalOperand(debug_suspended_generator);
+  __ cmpp(rbx, debug_suspended_generator_operand);
+  __ j(equal, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
+
+  // Pop return address.
+  __ PopReturnAddressTo(rax);
+
+  // Push receiver.
+  __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+
+  // ----------- S t a t e -------------
+  //  -- rax    : return address
+  //  -- rbx    : the JSGeneratorObject to resume
+  //  -- rdx    : the resume mode (tagged)
+  //  -- rdi    : generator function
+  //  -- rsi    : generator context
+  //  -- rsp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadSharedFunctionInfoSpecialField(
+      rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ subl(rcx, Immediate(1));
+    __ j(carry, &done_loop, Label::kNear);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+  __ j(not_equal, &old_generator);
+
+  // New-style (ignition/turbofan) generator object.
+  {
+    __ PushReturnAddressFrom(rax);
+    __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadSharedFunctionInfoSpecialField(
+        rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ movp(rdx, rbx);
+    __ jmp(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Old-style (full-codegen) generator object.
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushReturnAddressFrom(rax);  // Return address.
+    __ Push(rbp);                   // Caller's frame pointer.
+    __ Move(rbp, rsp);
+    __ Push(rsi);  // Callee's context.
+    __ Push(rdi);  // Callee's JS Function.
+
+    // Restore the operand stack.
+    __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+    __ SmiToInteger32(rax, FieldOperand(rsi, FixedArray::kLengthOffset));
+    {
+      Label done_loop, loop;
+      __ Set(rcx, 0);
+      __ bind(&loop);
+      __ cmpl(rcx, rax);
+      __ j(equal, &done_loop, Label::kNear);
+      __ Push(
+          FieldOperand(rsi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+      __ addl(rcx, Immediate(1));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset),
+                Heap::kEmptyFixedArrayRootIndex);
+
+    // Restore context.
+    __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+
+    // Resume the generator function at the continuation.
+    __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+    __ SmiToInteger64(
+        rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
+    __ leap(rdx, FieldOperand(rdx, rcx, times_1, Code::kHeaderSize));
+    __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
+            Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+    __ movp(rax, rbx);  // Continuation expects generator object in rax.
+    __ jmp(rdx);
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(rbx);
+    __ Push(rdx);
+    __ Push(rdi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(rdx);
+    __ Pop(rbx);
+    __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(rbx);
+    __ Push(rdx);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(rdx);
+    __ Pop(rbx);
+    __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+                                  Register scratch2) {
+  Register args_count = scratch1;
+  Register return_pc = scratch2;
+
+  // Get the arguments + receiver count.
+  __ movp(args_count,
+          Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ movl(args_count,
+          FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments.
+  __ PopReturnAddressTo(return_pc);
+  __ addp(rsp, args_count);
+  __ PushReturnAddressFrom(return_pc);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o rdi: the JS function object being called
+//   o rdx: the new target
+//   o rsi: our context
+//   o rbp: the caller's frame pointer
+//   o rsp: stack pointer (pointing to return address)
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ pushq(rbp);  // Caller's frame pointer.
+  __ movp(rbp, rsp);
+  __ Push(rsi);  // Callee's context.
+  __ Push(rdi);  // Callee's JS function.
+  __ Push(rdx);  // Callee's new target.
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
+  __ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
+          Immediate(0));
+  __ j(not_equal, &load_debug_bytecode_array);
+  __ movp(kInterpreterBytecodeArrayRegister,
+          FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ Move(rcx, masm->CodeObject());  // Self-reference to this code.
+  __ cmpp(rcx, FieldOperand(rax, SharedFunctionInfo::kCodeOffset));
+  __ j(not_equal, &switch_to_different_code_kind);
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     rax);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Load initial bytecode offset.
+  __ movp(kInterpreterBytecodeOffsetRegister,
+          Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push bytecode array and Smi tagged bytecode offset.
+  __ Push(kInterpreterBytecodeArrayRegister);
+  __ Integer32ToSmi(rcx, kInterpreterBytecodeOffsetRegister);
+  __ Push(rcx);
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size from the BytecodeArray object.
+    __ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister,
+                              BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ movp(rdx, rsp);
+    __ subp(rdx, rcx);
+    __ CompareRoot(rdx, Heap::kRealStackLimitRootIndex);
+    __ j(above_equal, &ok, Label::kNear);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    Label loop_header;
+    Label loop_check;
+    __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+    __ j(always, &loop_check);
+    __ bind(&loop_header);
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    __ Push(rdx);
+    // Continue loop if not done.
+    __ bind(&loop_check);
+    __ subp(rcx, Immediate(kPointerSize));
+    __ j(greater_equal, &loop_header, Label::kNear);
+  }
+
+  // Load accumulator and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ Move(
+      kInterpreterDispatchTableRegister,
+      ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
+                       times_pointer_size, 0));
+  __ call(rbx);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in rax.
+  LeaveInterpreterFrame(masm, rbx, rcx);
+  __ ret(0);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  __ movp(debug_info, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
+  __ movp(kInterpreterBytecodeArrayRegister,
+          FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ jmp(&bytecode_array_loaded);
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ leave();  // Leave the frame so we can tail call.
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+  __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+  __ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
+  __ RecordWriteCodeEntryField(rdi, rcx, r15);
+  __ jmp(rcx);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ movp(rdi, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+  __ movp(kContextRegister,
+          Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, rbx, rcx);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ Push(rax);
+
+    // Push function as argument and compile for baseline.
+    __ Push(rdi);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ Pop(rax);
+  }
+  __ ret(0);
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         bool push_receiver) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rbx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ movp(rcx, rax);
+  if (push_receiver) {
+    __ addp(rcx, Immediate(1));  // Add one for receiver.
+  }
+
+  __ shlp(rcx, Immediate(kPointerSizeLog2));
+  __ negp(rcx);
+  __ addp(rcx, rbx);
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ j(always, &loop_check);
+  __ bind(&loop_header);
+  __ Push(Operand(rbx, 0));
+  __ subp(rbx, Immediate(kPointerSize));
+  __ bind(&loop_check);
+  __ cmpp(rbx, rcx);
+  __ j(greater, &loop_header, Label::kNear);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rbx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  //  -- rdi : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ PopReturnAddressTo(kScratchRegister);
+
+  Generate_InterpreterPushArgs(masm, true);
+
+  // Call the target.
+  __ PushReturnAddressFrom(kScratchRegister);  // Re-push return address.
+
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- rdi : the constructor to call (can be any Object)
+  //  -- rbx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ PopReturnAddressTo(kScratchRegister);
+
+  // Push slot for the receiver to be constructed.
+  __ Push(Immediate(0));
+
+  Generate_InterpreterPushArgs(masm, false);
+
+  // Push return address in preparation for the tail-call.
+  __ PushReturnAddressFrom(kScratchRegister);
+
+  // Call the constructor (rax, rdx, rdi passed on).
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(rbx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
+                         Code::kHeaderSize - kHeapObjectTag));
+  __ Push(rbx);
+
+  // Initialize dispatch table register.
+  __ Move(
+      kInterpreterDispatchTableRegister,
+      ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+
+  // Get the bytecode array pointer from the frame.
+  __ movp(kInterpreterBytecodeArrayRegister,
+          Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     rbx);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ movp(kInterpreterBytecodeOffsetRegister,
+          Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
+                    kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
+                       times_pointer_size, 0));
+  __ jmp(rbx);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argument count (preserved for callee)
+  //  -- rdx : new target (preserved for callee)
+  //  -- rdi : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = rdi;
+  Register map = r8;
+  Register index = r9;
+  __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
+  __ cmpl(index, Immediate(2));
+  __ j(less, &gotta_call_runtime);
+
+  // Find literals.
+  // r14 : native context
+  // r9  : length / index
+  // r8  : optimized code map
+  // rdx : new target
+  // rdi : closure
+  Register native_context = r14;
+  __ movp(native_context, NativeContextOperand());
+
+  __ bind(&loop_top);
+  // Native context match?
+  Register temp = r11;
+  __ movp(temp, FieldOperand(map, index, times_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousContext));
+  __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ cmpp(temp, native_context);
+  __ j(not_equal, &loop_bottom);
+  // OSR id set to none?
+  __ movp(temp, FieldOperand(map, index, times_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  __ SmiToInteger32(temp, temp);
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmpl(temp, Immediate(bailout_id));
+  __ j(not_equal, &loop_bottom);
+  // Literals available?
+  __ movp(temp, FieldOperand(map, index, times_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
+  __ movp(r15, index);
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = rcx;
+  __ movp(entry, FieldOperand(map, index, times_pointer_size,
+                              SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, r15);
+
+  // Link the closure into the optimized function list.
+  // rcx : code entry (entry)
+  // r14 : native context
+  // rdx : new target
+  // rdi : closure
+  __ movp(rbx,
+          ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, r15,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+          closure);
+  // Save closure before the write barrier.
+  __ movp(rbx, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, r15,
+                            kDontSaveFPRegs);
+  __ movp(closure, rbx);
+  __ jmp(entry);
+
+  __ bind(&loop_bottom);
+  __ subl(index, Immediate(SharedFunctionInfo::kEntryLength));
+  __ cmpl(index, Immediate(1));
+  __ j(greater, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
+                                       SharedFunctionInfo::kSharedCodeIndex));
+  __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  // Is the full code valid?
+  __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
+  __ andl(rbx, Immediate(Code::KindField::kMask));
+  __ shrl(rbx, Immediate(Code::KindField::kShift));
+  __ cmpl(rbx, Immediate(Code::BUILTIN));
+  __ j(equal, &gotta_call_runtime);
+  // Yes, install the full code.
+  __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, r15);
+  __ jmp(entry);
+
+  __ bind(&gotta_call_runtime);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argument count (preserved for callee)
+  //  -- rdx : new target (preserved for callee)
+  //  -- rdi : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ movp(kScratchRegister, rax);
+    // Push the number of arguments to the callee.
+    __ Integer32ToSmi(rax, rax);
+    __ Push(rax);
+    // Push a copy of the target function and the new target.
+    __ Push(rdi);
+    __ Push(rdx);
+
+    // The function.
+    __ Push(rdi);
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ cmpp(kScratchRegister, Immediate(j));
+        __ j(not_equal, &over, Label::kNear);
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ Push(Operand(
+            rbp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done, Label::kNear);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(rax, &failed, Label::kNear);
+
+    __ Drop(2);
+    __ Pop(kScratchRegister);
+    __ SmiToInteger32(kScratchRegister, kScratchRegister);
+    scope.GenerateLeaveFrame();
+
+    __ PopReturnAddressTo(rbx);
+    __ incp(kScratchRegister);
+    __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+    __ PushReturnAddressFrom(rbx);
+    __ ret(0);
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ Pop(rdx);
+    __ Pop(rdi);
+    __ Pop(rax);
+    __ SmiToInteger32(rax, rax);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Re-execute the code that was patched back to the young age when
+  // the stub returns.
+  __ subp(Operand(rsp, 0), Immediate(5));
+  __ Pushad();
+  __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+  __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+  {  // NOLINT
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(2);
+    __ CallCFunction(
+        ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  }
+  __ Popad();
+  __ ret(0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+  __ Pushad();
+  __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+  __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+  __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+  {  // NOLINT
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(2);
+    __ CallCFunction(
+        ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+        2);
+  }
+  __ Popad();
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PopReturnAddressTo(kScratchRegister);
+  __ pushq(rbp);  // Caller's frame pointer.
+  __ movp(rbp, rsp);
+  __ Push(rsi);  // Callee's context.
+  __ Push(rdi);  // Callee's JS Function.
+  __ PushReturnAddressFrom(kScratchRegister);
+
+  // Jump to point after the code-age stub.
+  __ ret(0);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ Pushad();
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ Popad();
+    // Tear down internal frame.
+  }
+
+  __ DropUnderReturnAddress(1);  // Ignore state offset
+  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
+
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Get the full codegen state from the stack and untag it.
+  __ SmiToInteger32(kScratchRegister, Operand(rsp, kPCOnStackSize));
+
+  // Switch on the state.
+  Label not_no_registers, not_tos_rax;
+  __ cmpp(kScratchRegister,
+          Immediate(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
+  __ j(not_equal, &not_no_registers, Label::kNear);
+  __ ret(1 * kPointerSize);  // Remove state.
+
+  __ bind(&not_no_registers);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
+  __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
+  __ cmpp(kScratchRegister,
+          Immediate(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
+  __ j(not_equal, &not_tos_rax, Label::kNear);
+  __ ret(2 * kPointerSize);  // Remove state, rax.
+
+  __ bind(&not_tos_rax);
+  __ Abort(kNoCasesLeft);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- rax    : number of arguments
+  //  -- rdi    : function
+  //  -- rsi    : context
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into rax and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    StackArgumentsAccessor args(rsp, 0);
+    __ movp(rax, args.GetReceiverOperand());
+    __ JumpIfSmi(rax, &receiver_not_date);
+    __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
+    __ j(not_equal, &receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ movp(rax, FieldOperand(rax, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ Load(rdx, ExternalReference::date_cache_stamp(masm->isolate()));
+      __ cmpp(rdx, FieldOperand(rax, JSDate::kCacheStampOffset));
+      __ j(not_equal, &stamp_mismatch, Label::kNear);
+      __ movp(rax, FieldOperand(
+                       rax, JSDate::kValueOffset + field_index * kPointerSize));
+      __ ret(1 * kPointerSize);
+      __ bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2);
+    __ Move(arg_reg_1, rax);
+    __ Move(arg_reg_2, Smi::FromInt(field_index));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ ret(1 * kPointerSize);
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Move(rbx, Smi::FromInt(0));
+    __ EnterBuiltinFrame(rsi, rdi, rbx);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax     : argc
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : argArray
+  //  -- rsp[16] : thisArg
+  //  -- rsp[24] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into rdi, argArray into rax (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label no_arg_array, no_this_arg;
+    StackArgumentsAccessor args(rsp, rax);
+    __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+    __ movp(rbx, rdx);
+    __ movp(rdi, args.GetReceiverOperand());
+    __ testp(rax, rax);
+    __ j(zero, &no_this_arg, Label::kNear);
+    {
+      __ movp(rdx, args.GetArgumentOperand(1));
+      __ cmpp(rax, Immediate(1));
+      __ j(equal, &no_arg_array, Label::kNear);
+      __ movp(rbx, args.GetArgumentOperand(2));
+      __ bind(&no_arg_array);
+    }
+    __ bind(&no_this_arg);
+    __ PopReturnAddressTo(rcx);
+    __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+    __ Push(rdx);
+    __ PushReturnAddressFrom(rcx);
+    __ movp(rax, rbx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- rax     : argArray
+  //  -- rdi     : receiver
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(rdi, &receiver_not_callable, Label::kNear);
+  __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsCallable));
+  __ j(zero, &receiver_not_callable, Label::kNear);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(rax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+  __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &no_arguments,
+                Label::kNear);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver. Since we did not create a frame for
+  // Function.prototype.apply() yet, we use a normal Call builtin here.
+  __ bind(&no_arguments);
+  {
+    __ Set(rax, 0);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    StackArgumentsAccessor args(rsp, 0);
+    __ movp(args.GetReceiverOperand(), rdi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // Stack Layout:
+  // rsp[0]           : Return address
+  // rsp[8]           : Argument n
+  // rsp[16]          : Argument n-1
+  //  ...
+  // rsp[8 * n]       : Argument 1
+  // rsp[8 * (n + 1)] : Receiver (callable to call)
+  //
+  // rax contains the number of arguments, n, not counting the receiver.
+  //
+  // 1. Make sure we have at least one argument.
+  {
+    Label done;
+    __ testp(rax, rax);
+    __ j(not_zero, &done, Label::kNear);
+    __ PopReturnAddressTo(rbx);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(rbx);
+    __ incp(rax);
+    __ bind(&done);
+  }
+
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  {
+    StackArgumentsAccessor args(rsp, rax);
+    __ movp(rdi, args.GetReceiverOperand());
+  }
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  {
+    Label loop;
+    __ movp(rcx, rax);
+    StackArgumentsAccessor args(rsp, rcx);
+    __ bind(&loop);
+    __ movp(rbx, args.GetArgumentOperand(1));
+    __ movp(args.GetArgumentOperand(0), rbx);
+    __ decp(rcx);
+    __ j(not_zero, &loop);              // While non-zero.
+    __ DropUnderReturnAddress(1, rbx);  // Drop one slot under return address.
+    __ decp(rax);  // One fewer argument (first argument is new receiver).
+  }
+
+  // 4. Call the callable.
+  // Since we did not create a frame for Function.prototype.call() yet,
+  // we use a normal Call builtin here.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax     : argc
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : argumentsList
+  //  -- rsp[16] : thisArgument
+  //  -- rsp[24] : target
+  //  -- rsp[32] : receiver
+  // -----------------------------------
+
+  // 1. Load target into rdi (if present), argumentsList into rax (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label done;
+    StackArgumentsAccessor args(rsp, rax);
+    __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+    __ movp(rdx, rdi);
+    __ movp(rbx, rdi);
+    __ cmpp(rax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ movp(rdi, args.GetArgumentOperand(1));  // target
+    __ j(equal, &done, Label::kNear);
+    __ movp(rdx, args.GetArgumentOperand(2));  // thisArgument
+    __ cmpp(rax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ movp(rbx, args.GetArgumentOperand(3));  // argumentsList
+    __ bind(&done);
+    __ PopReturnAddressTo(rcx);
+    __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+    __ Push(rdx);
+    __ PushReturnAddressFrom(rcx);
+    __ movp(rax, rbx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- rax     : argumentsList
+  //  -- rdi     : target
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(rdi, &target_not_callable, Label::kNear);
+  __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsCallable));
+  __ j(zero, &target_not_callable, Label::kNear);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    StackArgumentsAccessor args(rsp, 0);
+    __ movp(args.GetReceiverOperand(), rdi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax     : argc
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : new.target (optional)
+  //  -- rsp[16] : argumentsList
+  //  -- rsp[24] : target
+  //  -- rsp[32] : receiver
+  // -----------------------------------
+
+  // 1. Load target into rdi (if present), argumentsList into rax (if present),
+  // new.target into rdx (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label done;
+    StackArgumentsAccessor args(rsp, rax);
+    __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+    __ movp(rdx, rdi);
+    __ movp(rbx, rdi);
+    __ cmpp(rax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ movp(rdi, args.GetArgumentOperand(1));  // target
+    __ movp(rdx, rdi);                         // new.target defaults to target
+    __ j(equal, &done, Label::kNear);
+    __ movp(rbx, args.GetArgumentOperand(2));  // argumentsList
+    __ cmpp(rax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ movp(rdx, args.GetArgumentOperand(3));  // new.target
+    __ bind(&done);
+    __ PopReturnAddressTo(rcx);
+    __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(rcx);
+    __ movp(rax, rbx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- rax     : argumentsList
+  //  -- rdx     : new.target
+  //  -- rdi     : target
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(rdi, &target_not_constructor, Label::kNear);
+  __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &target_not_constructor, Label::kNear);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
+  __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &new_target_not_constructor, Label::kNear);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    StackArgumentsAccessor args(rsp, 0);
+    __ movp(args.GetReceiverOperand(), rdi);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    StackArgumentsAccessor args(rsp, 0);
+    __ movp(args.GetReceiverOperand(), rdx);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : argc
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the InternalArray function.
+  __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction);
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Check(equal, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : argc
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rdi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Check(equal, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  __ movp(rdx, rdi);
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments
+  //  -- rdi                 : function
+  //  -- rsi                 : context
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in rdx and the double value in xmm0.
+  __ LoadRoot(rdx, root_index);
+  __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+  __ Move(rcx, rax);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ testp(rcx, rcx);
+    __ j(zero, &done_loop);
+
+    // Load the next parameter tagged value into rbx.
+    __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+
+    // Load the double value of the parameter into xmm1, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(rbx, &convert_smi);
+    __ JumpIfRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+                  Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Integer32ToSmi(rax, rax);
+      __ Integer32ToSmi(rcx, rcx);
+      __ EnterBuiltinFrame(rsi, rdi, rax);
+      __ Push(rcx);
+      __ Push(rdx);
+      __ movp(rax, rbx);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ movp(rbx, rax);
+      __ Pop(rdx);
+      __ Pop(rcx);
+      __ LeaveBuiltinFrame(rsi, rdi, rax);
+      __ SmiToInteger32(rcx, rcx);
+      __ SmiToInteger32(rax, rax);
+      {
+        // Restore the double accumulator value (xmm0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(rdx, &restore_smi, Label::kNear);
+        __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+        __ jmp(&done_restore, Label::kNear);
+        __ bind(&restore_smi);
+        __ SmiToDouble(xmm0, rdx);
+        __ bind(&done_restore);
+      }
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ Movsd(xmm1, FieldOperand(rbx, HeapNumber::kValueOffset));
+    __ jmp(&done_convert, Label::kNear);
+    __ bind(&convert_smi);
+    __ SmiToDouble(xmm1, rbx);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (xmm0) and the next parameter value on the right hand side (xmm1).
+    Label compare_equal, compare_nan, compare_swap, done_compare;
+    __ Ucomisd(xmm0, xmm1);
+    __ j(parity_even, &compare_nan, Label::kNear);
+    __ j(cc, &done_compare, Label::kNear);
+    __ j(equal, &compare_equal, Label::kNear);
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ Movaps(xmm0, xmm1);
+    __ Move(rdx, rbx);
+    __ jmp(&done_compare, Label::kNear);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(rdx, Heap::kNanValueRootIndex);
+    __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+    __ jmp(&done_compare, Label::kNear);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    __ Movmskpd(kScratchRegister, reg);
+    __ testl(kScratchRegister, Immediate(1));
+    __ j(not_zero, &compare_swap);
+
+    __ bind(&done_compare);
+    __ decp(rcx);
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ PopReturnAddressTo(rcx);
+  __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+  __ PushReturnAddressFrom(rcx);
+  __ movp(rax, rdx);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments
+  //  -- rdi                 : constructor function
+  //  -- rsi                 : context
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into rbx.
+  Label no_arguments;
+  {
+    StackArgumentsAccessor args(rsp, rax);
+    __ testp(rax, rax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ movp(rbx, args.GetArgumentOperand(1));
+  }
+
+  // 2a. Convert the first argument to a number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Integer32ToSmi(rax, rax);
+    __ EnterBuiltinFrame(rsi, rdi, rax);
+    __ movp(rax, rbx);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(rsi, rdi, rbx);  // Argc popped to rbx.
+    __ SmiToInteger32(rbx, rbx);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(rcx);
+    __ leap(rsp, Operand(rsp, rbx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(rcx);
+    __ Ret();
+  }
+
+  // 2b. No arguments, return +0 (already in rax).
+  __ bind(&no_arguments);
+  __ ret(1 * kPointerSize);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments
+  //  -- rdi                 : constructor function
+  //  -- rdx                 : new target
+  //  -- rsi                 : context
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Store argc in r8.
+  __ Integer32ToSmi(r8, rax);
+
+  // 2. Load the first argument into rbx.
+  {
+    StackArgumentsAccessor args(rsp, rax);
+    Label no_arguments, done;
+    __ testp(rax, rax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ movp(rbx, args.GetArgumentOperand(1));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ Move(rbx, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure rbx is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(rbx, &done_convert);
+    __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(equal, &done_convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ EnterBuiltinFrame(rsi, rdi, r8);
+      __ Push(rdx);
+      __ Move(rax, rbx);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Move(rbx, rax);
+      __ Pop(rdx);
+      __ LeaveBuiltinFrame(rsi, rdi, r8);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ cmpp(rdx, rdi);
+  __ j(not_equal, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
+  __ jmp(&drop_frame_and_ret, Label::kNear);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ EnterBuiltinFrame(rsi, rdi, r8);
+    __ Push(rbx);  // the first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(FieldOperand(rax, JSValue::kValueOffset));
+    __ LeaveBuiltinFrame(rsi, rdi, r8);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(rcx);
+    __ SmiToInteger32(r8, r8);
+    __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(rcx);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments
+  //  -- rdi                 : constructor function
+  //  -- rsi                 : context
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into rax.
+  Label no_arguments;
+  {
+    StackArgumentsAccessor args(rsp, rax);
+    __ Integer32ToSmi(r8, rax);  // Store argc in r8.
+    __ testp(rax, rax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ movp(rax, args.GetArgumentOperand(1));
+  }
+
+  // 2a. At least one argument, return rax if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(rax, &to_string, Label::kNear);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+    __ j(above, &to_string, Label::kNear);
+    __ j(equal, &symbol_descriptive_string, Label::kNear);
+    __ jmp(&drop_frame_and_ret, Label::kNear);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(rax, Heap::kempty_stringRootIndex);
+    __ ret(1 * kPointerSize);
+  }
+
+  // 3a. Convert rax to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ EnterBuiltinFrame(rsi, rdi, r8);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(rsi, rdi, r8);
+  }
+  __ jmp(&drop_frame_and_ret, Label::kNear);
+
+  // 3b. Convert symbol in rax to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ PopReturnAddressTo(rcx);
+    __ SmiToInteger32(r8, r8);
+    __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
+    __ Push(rax);
+    __ PushReturnAddressFrom(rcx);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(rcx);
+    __ SmiToInteger32(r8, r8);
+    __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(rcx);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments
+  //  -- rdi                 : constructor function
+  //  -- rdx                 : new target
+  //  -- rsi                 : context
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Store argc in r8.
+  __ Integer32ToSmi(r8, rax);
+
+  // 2. Load the first argument into rbx.
+  {
+    StackArgumentsAccessor args(rsp, rax);
+    Label no_arguments, done;
+    __ testp(rax, rax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ movp(rbx, args.GetArgumentOperand(1));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure rbx is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(rbx, &convert, Label::kNear);
+    __ CmpObjectType(rbx, FIRST_NONSTRING_TYPE, rcx);
+    __ j(below, &done_convert);
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ EnterBuiltinFrame(rsi, rdi, r8);
+      __ Push(rdx);
+      __ Move(rax, rbx);
+      __ CallStub(&stub);
+      __ Move(rbx, rax);
+      __ Pop(rdx);
+      __ LeaveBuiltinFrame(rsi, rdi, r8);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, new_object;
+  __ cmpp(rdx, rdi);
+  __ j(not_equal, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
+  __ jmp(&drop_frame_and_ret, Label::kNear);
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ EnterBuiltinFrame(rsi, rdi, r8);
+    __ Push(rbx);  // the first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(FieldOperand(rax, JSValue::kValueOffset));
+    __ LeaveBuiltinFrame(rsi, rdi, r8);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(rcx);
+    __ SmiToInteger32(r8, r8);
+    __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(rcx);
+    __ Ret();
+  }
+}
+
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+                                       Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- rax : actual number of arguments
+  //  -- rbx : expected number of arguments
+  //  -- rdx : new target (passed through to callee)
+  //  -- rdi : function (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
+  __ movp(rcx, rsp);
+  // Make rcx the space we have left. The stack might already be overflowed
+  // here which will cause rcx to become negative.
+  __ subp(rcx, r8);
+  // Make r8 the space we need for the array when it is unrolled onto the
+  // stack.
+  __ movp(r8, rbx);
+  __ shlp(r8, Immediate(kPointerSizeLog2));
+  // Check if the arguments will overflow the stack.
+  __ cmpp(rcx, r8);
+  __ j(less_equal, stack_overflow);  // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ pushq(rbp);
+  __ movp(rbp, rsp);
+
+  // Store the arguments adaptor context sentinel.
+  __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+
+  // Push the function on the stack.
+  __ Push(rdi);
+
+  // Preserve the number of arguments on the stack. Must preserve rax,
+  // rbx and rcx because these registers are used when copying the
+  // arguments and the receiver.
+  __ Integer32ToSmi(r8, rax);
+  __ Push(r8);
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack. Number is a Smi.
+  __ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ movp(rsp, rbp);
+  __ popq(rbp);
+
+  // Remove caller arguments from the stack.
+  __ PopReturnAddressTo(rcx);
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+  __ PushReturnAddressFrom(rcx);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdx    : requested object size (untagged)
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ Integer32ToSmi(rdx, rdx);
+  __ PopReturnAddressTo(rcx);
+  __ Push(rdx);
+  __ PushReturnAddressFrom(rcx);
+  __ Move(rsi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdx    : requested object size (untagged)
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ Integer32ToSmi(rdx, rdx);
+  __ PopReturnAddressTo(rcx);
+  __ Push(rdx);
+  __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ PushReturnAddressFrom(rcx);
+  __ Move(rsi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdx    : message_id as Smi
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ PopReturnAddressTo(rcx);
+  __ Push(rdx);
+  __ PushReturnAddressFrom(rcx);
+  __ Move(rsi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in rax.
+  Label not_smi;
+  __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
+  __ Ret();
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : actual number of arguments
+  //  -- rbx : expected number of arguments
+  //  -- rdx : new target (passed through to callee)
+  //  -- rdi : function (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->arguments_adaptors(), 1);
+
+  Label enough, too_few;
+  __ cmpp(rax, rbx);
+  __ j(less, &too_few);
+  __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
+    __ Set(r8, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incp(r8);
+    __ Push(Operand(rax, 0));
+    __ subp(rax, Immediate(kPointerSize));
+    __ cmpp(r8, rbx);
+    __ j(less, &copy);
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
+    __ Set(r8, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incp(r8);
+    __ Push(Operand(rdi, 0));
+    __ subp(rdi, Immediate(kPointerSize));
+    __ cmpp(r8, rax);
+    __ j(less, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ bind(&fill);
+    __ incp(r8);
+    __ Push(kScratchRegister);
+    __ cmpp(r8, rbx);
+    __ j(less, &fill);
+
+    // Restore function pointer.
+    __ movp(rdi, Operand(rbp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ movp(rax, rbx);
+  // rax : expected number of arguments
+  // rdx : new target (passed through to callee)
+  // rdi : function (passed through to callee)
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  __ call(rcx);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  __ jmp(rcx);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ int3();
+  }
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : argumentsList
+  //  -- rdi    : target
+  //  -- rdx    : new.target (checked to be constructor or undefined)
+  //  -- rsp[0] : return address.
+  //  -- rsp[8] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(rax, &create_runtime);
+
+    // Load the map of argumentsList into rcx.
+    __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+
+    // Load native context into rbx.
+    __ movp(rbx, NativeContextOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ cmpp(rcx, ContextOperand(rbx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+    __ cmpp(rcx, ContextOperand(rbx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+    __ j(equal, &create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(rdi);
+      __ Push(rdx);
+      __ Push(rax);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(rdx);
+      __ Pop(rdi);
+      __ SmiToInteger32(rbx, FieldOperand(rax, FixedArray::kLengthOffset));
+    }
+    __ jmp(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ movp(rbx, FieldOperand(rax, JSArgumentsObject::kLengthOffset));
+    __ movp(rcx, FieldOperand(rax, JSObject::kElementsOffset));
+    __ cmpp(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+    __ j(not_equal, &create_runtime);
+    __ SmiToInteger32(rbx, rbx);
+    __ movp(rax, rcx);
+    __ jmp(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(rcx);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+    __ j(above, &create_runtime);
+    __ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+    __ j(equal, &create_runtime);
+    __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
+    __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+    __ movp(rcx, rsp);
+    // Make rcx the space we have left. The stack might already be overflowed
+    // here which will cause rcx to become negative.
+    __ subp(rcx, kScratchRegister);
+    __ sarp(rcx, Immediate(kPointerSizeLog2));
+    // Check if the arguments will overflow the stack.
+    __ cmpp(rcx, rbx);
+    __ j(greater, &done, Label::kNear);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- rdi    : target
+  //  -- rax    : args (a FixedArray built from argumentsList)
+  //  -- rbx    : len (number of elements to push from args)
+  //  -- rdx    : new.target (checked to be constructor or undefined)
+  //  -- rsp[0] : return address.
+  //  -- rsp[8] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    __ PopReturnAddressTo(r8);
+    __ Set(rcx, 0);
+    Label done, loop;
+    __ bind(&loop);
+    __ cmpl(rcx, rbx);
+    __ j(equal, &done, Label::kNear);
+    __ Push(
+        FieldOperand(rax, rcx, times_pointer_size, FixedArray::kHeaderSize));
+    __ incl(rcx);
+    __ jmp(&loop);
+    __ bind(&done);
+    __ PushReturnAddressFrom(r8);
+    __ Move(rax, rcx);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg
+// |  f()'s caller pc      <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is active.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ Move(kScratchRegister, is_tail_call_elimination_enabled);
+  __ cmpb(Operand(kScratchRegister, 0), Immediate(0));
+  __ j(equal, &done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ Cmp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+           Smi::FromInt(StackFrame::STUB));
+    __ j(not_equal, &no_interpreter_frame, Label::kNear);
+    __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ Cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ movp(rbp, scratch2);
+  __ SmiToInteger32(
+      caller_args_count_reg,
+      Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ movp(scratch1, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movp(scratch1,
+          FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadSharedFunctionInfoSpecialField(
+      caller_args_count_reg, scratch1,
+      SharedFunctionInfo::kFormalParameterCountOffset);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdi : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  StackArgumentsAccessor args(rsp, rax);
+  __ AssertFunction(rdi);
+
+  // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ testb(FieldOperand(rdx, SharedFunctionInfo::kFunctionKindByteOffset),
+           Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+  __ j(not_zero, &class_constructor);
+
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the shared function info.
+  //  -- rdi : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+                SharedFunctionInfo::kStrictModeByteOffset);
+  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+           Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                     (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+  __ j(not_zero, &done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- rax : the number of arguments (not including the receiver)
+    //  -- rdx : the shared function info.
+    //  -- rdi : the function to call (checked to be a JSFunction)
+    //  -- rsi : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(rcx);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ movp(rcx, args.GetReceiverOperand());
+      __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
+      __ j(above_equal, &done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy, Label::kNear);
+        __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object,
+                         Label::kNear);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(rcx);
+        }
+        __ jmp(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameScope scope(masm, StackFrame::INTERNAL);
+        __ Integer32ToSmi(rax, rax);
+        __ Push(rax);
+        __ Push(rdi);
+        __ movp(rax, rcx);
+        __ Push(rsi);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(rsi);
+        __ movp(rcx, rax);
+        __ Pop(rdi);
+        __ Pop(rax);
+        __ SmiToInteger32(rax, rax);
+      }
+      __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ movp(args.GetReceiverOperand(), rcx);
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the shared function info.
+  //  -- rdi : the function to call (checked to be a JSFunction)
+  //  -- rsi : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, rax, rbx, rcx, r8);
+  }
+
+  __ LoadSharedFunctionInfoSpecialField(
+      rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+  ParameterCount actual(rax);
+  ParameterCount expected(rbx);
+
+  __ InvokeFunctionCode(rdi, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ Push(rdi);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : new.target (only in case of [[Construct]])
+  //  -- rdi : target (checked to be a JSBoundFunction)
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into rcx and length of that into rbx.
+  Label no_bound_arguments;
+  __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+  __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ testl(rbx, rbx);
+  __ j(zero, &no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- rax : the number of arguments (not including the receiver)
+    //  -- rdx : new.target (only in case of [[Construct]])
+    //  -- rdi : target (checked to be a JSBoundFunction)
+    //  -- rcx : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ leap(kScratchRegister, Operand(rbx, times_pointer_size, 0));
+      __ subp(rsp, kScratchRegister);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex);
+      __ j(greater, &done, Label::kNear);  // Signed comparison.
+      // Restore the stack pointer.
+      __ leap(rsp, Operand(rsp, rbx, times_pointer_size, 0));
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Adjust effective number of arguments to include return address.
+    __ incl(rax);
+
+    // Relocate arguments and return address down the stack.
+    {
+      Label loop;
+      __ Set(rcx, 0);
+      __ leap(rbx, Operand(rsp, rbx, times_pointer_size, 0));
+      __ bind(&loop);
+      __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+      __ movp(Operand(rsp, rcx, times_pointer_size, 0), kScratchRegister);
+      __ incl(rcx);
+      __ cmpl(rcx, rax);
+      __ j(less, &loop);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+      __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+      __ bind(&loop);
+      __ decl(rbx);
+      __ movp(kScratchRegister, FieldOperand(rcx, rbx, times_pointer_size,
+                                             FixedArray::kHeaderSize));
+      __ movp(Operand(rsp, rax, times_pointer_size, 0), kScratchRegister);
+      __ leal(rax, Operand(rax, 1));
+      __ j(greater, &loop);
+    }
+
+    // Adjust effective number of arguments (rax contains the number of
+    // arguments from the call plus return address plus the number of
+    // [[BoundArguments]]), so we need to subtract one for the return address.
+    __ decl(rax);
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdi : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(rdi);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, rax, rbx, rcx, r8);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  StackArgumentsAccessor args(rsp, rax);
+  __ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
+  __ movp(args.GetReceiverOperand(), rbx);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ Load(rcx,
+          ExternalReference(Builtins::kCall_ReceiverIsAny, masm->isolate()));
+  __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+  __ jmp(rcx);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdi : the target to call (can be any Object)
+  // -----------------------------------
+  StackArgumentsAccessor args(rsp, rax);
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(rdi, &non_callable);
+  __ bind(&non_smi);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+       RelocInfo::CODE_TARGET);
+  __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+       RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Call]] internal method.
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsCallable));
+  __ j(zero, &non_callable);
+
+  __ CmpInstanceType(rcx, JS_PROXY_TYPE);
+  __ j(not_equal, &non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, rax, rbx, rcx, r8);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ PopReturnAddressTo(kScratchRegister);
+  __ Push(rdi);
+  __ PushReturnAddressFrom(kScratchRegister);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ addp(rax, Immediate(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver with the (original) target.
+  __ movp(args.GetReceiverOperand(), rdi);
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(rdi);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the new target (checked to be a constructor)
+  //  -- rdi : the constructor to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(rdi);
+
+  // Calling convention for function specific ConstructStubs require
+  // rbx to contain either an AllocationSite or undefined.
+  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
+  __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+  __ jmp(rcx);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the new target (checked to be a constructor)
+  //  -- rdi : the constructor to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(rdi);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  {
+    Label done;
+    __ cmpp(rdi, rdx);
+    __ j(not_equal, &done, Label::kNear);
+    __ movp(rdx,
+            FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+    __ bind(&done);
+  }
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ Load(rcx, ExternalReference(Builtins::kConstruct, masm->isolate()));
+  __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+  __ jmp(rcx);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdi : the constructor to call (checked to be a JSProxy)
+  //  -- rdx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ PopReturnAddressTo(kScratchRegister);
+  __ Push(rdi);
+  __ Push(rdx);
+  __ PushReturnAddressFrom(kScratchRegister);
+  // Include the pushed new_target, constructor and the receiver.
+  __ addp(rax, Immediate(3));
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- rdi : the constructor to call (can be any Object)
+  // -----------------------------------
+  StackArgumentsAccessor args(rsp, rax);
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(rdi, &non_constructor, Label::kNear);
+
+  // Dispatch based on instance type.
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Construct]] internal method.
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &non_constructor, Label::kNear);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ CmpInstanceType(rcx, JS_PROXY_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
+       RelocInfo::CODE_TARGET);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ movp(args.GetReceiverOperand(), rdi);
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Register scratch0, Register scratch1,
+                                    Register scratch2,
+                                    Label* receiver_check_failed) {
+  Register signature = scratch0;
+  Register map = scratch1;
+  Register constructor = scratch2;
+
+  // If there is no signature, return the holder.
+  __ movp(signature, FieldOperand(function_template_info,
+                                  FunctionTemplateInfo::kSignatureOffset));
+  __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+  Label receiver_check_passed;
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // Walk the prototype chain.
+  __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(constructor, map, kScratchRegister);
+  __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
+  Label next_prototype;
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Get the constructor's signature.
+  Register type = constructor;
+  __ movp(type,
+          FieldOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(type, FieldOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ cmpp(signature, type);
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(type, &next_prototype, Label::kNear);
+  __ CmpObjectType(type, FUNCTION_TEMPLATE_INFO_TYPE, kScratchRegister);
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Otherwise load the parent function template and iterate.
+  __ movp(type,
+          FieldOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+  __ jmp(&function_template_loop, Label::kNear);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ testq(FieldOperand(map, Map::kBitField3Offset),
+           Immediate(Map::HasHiddenPrototype::kMask));
+  __ j(zero, receiver_check_failed);
+  __ movp(receiver, FieldOperand(map, Map::kPrototypeOffset));
+  __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ jmp(&prototype_loop_start, Label::kNear);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax                : number of arguments (not including the receiver)
+  //  -- rdi                : callee
+  //  -- rsi                : context
+  //  -- rsp[0]             : return address
+  //  -- rsp[8]             : last argument
+  //  -- ...
+  //  -- rsp[rax * 8]       : first argument
+  //  -- rsp[(rax + 1) * 8] : receiver
+  // -----------------------------------
+
+  StackArgumentsAccessor args(rsp, rax);
+
+  // Load the FunctionTemplateInfo.
+  __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ movp(rcx, args.GetReceiverOperand());
+  CompatibleReceiverCheck(masm, rcx, rbx, rdx, r8, r9, &receiver_check_failed);
+
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ movp(rdx, FieldOperand(rbx, FunctionTemplateInfo::kCallCodeOffset));
+  __ movp(rdx, FieldOperand(rdx, CallHandlerInfo::kFastHandlerOffset));
+  __ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(rdx);
+
+  // Compatible receiver check failed: pop return address, arguments and
+  // receiver and throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  __ PopReturnAddressTo(rbx);
+  __ leap(rax, Operand(rax, times_pointer_size, 1 * kPointerSize));
+  __ addp(rsp, rax);
+  __ PushReturnAddressFrom(rbx);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+  }
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+    __ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ Push(rax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  Label skip;
+  // If the code object is null, just return to the caller.
+  __ cmpp(rax, Immediate(0));
+  __ j(not_equal, &skip, Label::kNear);
+  __ ret(0);
+
+  __ bind(&skip);
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ leave();
+  }
+
+  // Load deoptimization data from the code object.
+  __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  __ SmiToInteger32(
+      rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+                            DeoptimizationInputData::kOsrPcOffsetIndex) -
+                            kHeapObjectTag));
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+  // Overwrite the return address on the stack.
+  __ movq(StackOperandForReturnAddress(0), rax);
+
+  // And "return" to the OSR entry point of the function.
+  __ ret(0);
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/builtins/x87/OWNERS b/src/builtins/x87/OWNERS
new file mode 100644
index 0000000..dd9998b
--- /dev/null
+++ b/src/builtins/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/src/builtins/x87/builtins-x87.cc b/src/builtins/x87/builtins-x87.cc
new file mode 100644
index 0000000..9c46f20
--- /dev/null
+++ b/src/builtins/x87/builtins-x87.cc
@@ -0,0 +1,3064 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/code-factory.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/x87/frames-x87.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+                                ExitFrameType exit_frame_type) {
+  // ----------- S t a t e -------------
+  //  -- eax                : number of arguments excluding receiver
+  //  -- edi                : target
+  //  -- edx                : new.target
+  //  -- esp[0]             : return address
+  //  -- esp[4]             : last argument
+  //  -- ...
+  //  -- esp[4 * argc]      : first argument
+  //  -- esp[4 * (argc +1)] : receiver
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // Make sure we operate in the context of the called function (for example
+  // ConstructStubs implemented in C++ will be run in the context of the caller
+  // instead of the callee, due to the way that [[Construct]] is defined for
+  // ordinary functions).
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // JumpToExternalReference expects eax to contain the number of arguments
+  // including the receiver and the extra arguments.
+  const int num_extra_args = 3;
+  __ add(eax, Immediate(num_extra_args + 1));
+
+  // Insert extra arguments.
+  __ PopReturnAddressTo(ecx);
+  __ SmiTag(eax);
+  __ Push(eax);
+  __ SmiUntag(eax);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+
+  __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+                             exit_frame_type == BUILTIN_EXIT);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ SmiTag(eax);
+    __ push(eax);
+    // Push a copy of the target function and the new target.
+    __ push(edi);
+    __ push(edx);
+    // Function is also the parameter to the runtime call.
+    __ push(edi);
+
+    __ CallRuntime(function_id, 1);
+    __ mov(ebx, eax);
+
+    // Restore target function and new target.
+    __ pop(edx);
+    __ pop(edi);
+    __ pop(eax);
+    __ SmiUntag(eax);
+  }
+
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
+}
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
+}
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(masm->isolate());
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, Label::kNear);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  //  -- esi: context
+  //  -- edi: constructor function
+  //  -- ebx: allocation site or undefined
+  //  -- edx: new target
+  // -----------------------------------
+
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+    // Preserve the incoming parameters on the stack.
+    __ AssertUndefinedOrAllocationSite(ebx);
+    __ push(esi);
+    __ push(ebx);
+    __ SmiTag(eax);
+    __ push(eax);
+
+    if (create_implicit_receiver) {
+      // Allocate the new receiver object.
+      __ Push(edi);
+      __ Push(edx);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(edi);
+
+      // ----------- S t a t e -------------
+      //  -- edi: constructor function
+      //  -- ebx: newly allocated object
+      //  -- edx: new target
+      // -----------------------------------
+
+      // Retrieve smi-tagged arguments count from the stack.
+      __ mov(eax, Operand(esp, 0));
+    }
+
+    __ SmiUntag(eax);
+
+    if (create_implicit_receiver) {
+      // Push the allocated receiver to the stack. We need two copies
+      // because we may have to return the original one and the calling
+      // conventions dictate that the called function pops the receiver.
+      __ push(ebx);
+      __ push(ebx);
+    } else {
+      __ PushRoot(Heap::kTheHoleValueRootIndex);
+    }
+
+    // Set up pointer to last argument.
+    __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ mov(ecx, eax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(ebx, ecx, times_4, 0));
+    __ bind(&entry);
+    __ dec(ecx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
+
+    // Store offset of return address for deoptimizer.
+    if (create_implicit_receiver && !is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+
+    if (create_implicit_receiver) {
+      // If the result is an object (in the ECMA sense), we should get rid
+      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+      // on page 74.
+      Label use_receiver, exit;
+
+      // If the result is a smi, it is *not* an object in the ECMA sense.
+      __ JumpIfSmi(eax, &use_receiver);
+
+      // If the type of the result (stored in its map) is less than
+      // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+      __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+      __ j(above_equal, &exit);
+
+      // Throw away the result of the constructor invocation and use the
+      // on-stack receiver as the result.
+      __ bind(&use_receiver);
+      __ mov(eax, Operand(esp, 0));
+
+      // Restore the arguments count and leave the construct frame. The
+      // arguments count is stored below the receiver.
+      __ bind(&exit);
+      __ mov(ebx, Operand(esp, 1 * kPointerSize));
+    } else {
+      __ mov(ebx, Operand(esp, 0));
+    }
+
+    // Leave construct frame.
+  }
+
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(eax, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  // Remove caller arguments from the stack and return.
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+  if (create_implicit_receiver) {
+    __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+  }
+  __ ret(0);
+}
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  __ push(edi);
+  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
+
+// Clobbers ecx, edx, edi; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+                                        IsTagged eax_is_tagged) {
+  // eax   : the number of items to be pushed to the stack
+  //
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(edi, Operand::StaticVariable(real_stack_limit));
+  // Make ecx the space we have left. The stack might already be overflowed
+  // here which will cause ecx to become negative.
+  __ mov(ecx, esp);
+  __ sub(ecx, edi);
+  // Make edx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(edx, eax);
+  int smi_tag = eax_is_tagged == kEaxIsSmiTagged ? kSmiTagSize : 0;
+  __ shl(edx, kPointerSizeLog2 - smi_tag);
+  // Check if the arguments will overflow the stack.
+  __ cmp(ecx, edx);
+  __ j(greater, &okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ CallRuntime(Runtime::kThrowStackOverflow);
+
+  __ bind(&okay);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Setup the context (we need to use the caller context from the isolate).
+    ExternalReference context_address(Isolate::kContextAddress,
+                                      masm->isolate());
+    __ mov(esi, Operand::StaticVariable(context_address));
+
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
+
+    // Push the function and the receiver onto the stack.
+    __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+    __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+
+    // Load the number of arguments and setup pointer to the arguments.
+    __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+    __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+
+    // Check if we have enough stack space to push all arguments.
+    // Expects argument count in eax. Clobbers ecx, edx, edi.
+    Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
+
+    // Copy arguments to the stack in a loop.
+    Label loop, entry;
+    __ Move(ecx, Immediate(0));
+    __ jmp(&entry, Label::kNear);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+    __ push(Operand(edx, 0));                    // dereference handle
+    __ inc(ecx);
+    __ bind(&entry);
+    __ cmp(ecx, eax);
+    __ j(not_equal, &loop);
+
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
+
+    // Get the new.target and function from the frame.
+    __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
+    __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+
+    // Invoke the code.
+    Handle<Code> builtin = is_construct
+                               ? masm->isolate()->builtins()->Construct()
+                               : masm->isolate()->builtins()->Call();
+    __ Call(builtin, RelocInfo::CODE_TARGET);
+
+    // Exit the internal frame. Notice that this also removes the empty.
+    // context and the function left on the stack by the code
+    // invocation.
+  }
+  __ ret(kPointerSize);  // Remove receiver.
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : the value to pass to the generator
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(ebx);
+
+  // Store input value into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
+                      kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
+
+  // Load suspended function and context.
+  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+  __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+  __ j(greater_equal, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
+  __ j(equal, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
+
+  // Pop return address.
+  __ PopReturnAddressTo(eax);
+
+  // Push receiver.
+  __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+  // ----------- S t a t e -------------
+  //  -- eax    : return address
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- edi    : generator function
+  //  -- esi    : generator context
+  //  -- esp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ sub(ecx, Immediate(Smi::FromInt(1)));
+    __ j(carry, &done_loop, Label::kNear);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+  __ j(not_equal, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ PushReturnAddressFrom(eax);
+    __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(eax,
+           FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ mov(edx, ebx);
+    __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushReturnAddressFrom(eax);  // Return address.
+    __ Push(ebp);                   // Caller's frame pointer.
+    __ Move(ebp, esp);
+    __ Push(esi);  // Callee's context.
+    __ Push(edi);  // Callee's JS Function.
+
+    // Restore the operand stack.
+    __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+    {
+      Label done_loop, loop;
+      __ Move(ecx, Smi::FromInt(0));
+      __ bind(&loop);
+      __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+      __ j(equal, &done_loop, Label::kNear);
+      __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+      __ add(ecx, Immediate(Smi::FromInt(1)));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
+           Immediate(masm->isolate()->factory()->empty_fixed_array()));
+
+    // Resume the generator function at the continuation.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(ecx);
+    __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ mov(eax, ebx);  // Continuation expects generator object in eax.
+    __ jmp(edx);
+  }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+                                  Register scratch2) {
+  Register args_count = scratch1;
+  Register return_pc = scratch2;
+
+  // Get the arguments + reciever count.
+  __ mov(args_count,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(args_count,
+         FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments.
+  __ pop(return_pc);
+  __ add(esp, args_count);
+  __ push(return_pc);
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.  The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+//   o edi: the JS function object being called
+//   o edx: the new target
+//   o esi: our context
+//   o ebp: the caller's frame pointer
+//   o esp: stack pointer (pointing to return address)
+//
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS function.
+  __ push(edx);  // Callee's new target.
+
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
+  __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+         Immediate(DebugInfo::uninitialized()));
+  __ j(not_equal, &load_debug_bytecode_array);
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
+
+  // Check whether we should continue to use the interpreter.
+  Label switch_to_different_code_kind;
+  __ Move(ecx, masm->CodeObject());  // Self-reference to this code.
+  __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+  __ j(not_equal, &switch_to_different_code_kind);
+
+  // Check function data field is actually a BytecodeArray object.
+  if (FLAG_debug_code) {
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     eax);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Push bytecode array.
+  __ push(kInterpreterBytecodeArrayRegister);
+  // Push Smi tagged initial bytecode array offset.
+  __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
+
+  // Allocate the local and temporary register file on the stack.
+  {
+    // Load frame size from the BytecodeArray object.
+    __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+                             BytecodeArray::kFrameSizeOffset));
+
+    // Do a stack check to ensure we don't go over the limit.
+    Label ok;
+    __ mov(ecx, esp);
+    __ sub(ecx, ebx);
+    ExternalReference stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ cmp(ecx, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &ok);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&ok);
+
+    // If ok, push undefined as the initial value for all register file entries.
+    Label loop_header;
+    Label loop_check;
+    __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+    __ jmp(&loop_check);
+    __ bind(&loop_header);
+    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+    __ push(eax);
+    // Continue loop if not done.
+    __ bind(&loop_check);
+    __ sub(ebx, Immediate(kPointerSize));
+    __ j(greater_equal, &loop_header);
+  }
+
+  // Load accumulator, bytecode offset and dispatch table into registers.
+  __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Dispatch to the first bytecode handler for the function.
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
+  __ call(ebx);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+  // The return value is in eax.
+  LeaveInterpreterFrame(masm, ebx, ecx);
+  __ ret(0);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+  __ jmp(&bytecode_array_loaded);
+
+  // If the shared code is no longer this entry trampoline, then the underlying
+  // function has been switched to a different kind of code and we heal the
+  // closure by switching the code entry field over to the new code as well.
+  __ bind(&switch_to_different_code_kind);
+  __ pop(edx);  // Callee's new target.
+  __ pop(edi);  // Callee's JS function.
+  __ pop(esi);  // Callee's context.
+  __ leave();   // Leave the frame so we can tail call.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
+  __ RecordWriteCodeEntryField(edi, ecx, ebx);
+  __ jmp(ecx);
+}
+
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+  __ mov(kContextRegister,
+         Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, ebx, ecx);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(eax);
+
+    // Push function as argument and compile for baseline.
+    __ push(edi);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(eax);
+  }
+  __ ret(0);
+}
+
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+                                         Register array_limit) {
+  // ----------- S t a t e -------------
+  //  -- ebx : Pointer to the last argument in the args array.
+  //  -- array_limit : Pointer to one before the first argument in the
+  //                   args array.
+  // -----------------------------------
+  Label loop_header, loop_check;
+  __ jmp(&loop_check);
+  __ bind(&loop_header);
+  __ Push(Operand(ebx, 0));
+  __ sub(ebx, Immediate(kPointerSize));
+  __ bind(&loop_check);
+  __ cmp(ebx, array_limit);
+  __ j(greater, &loop_header, Label::kNear);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode,
+    CallableType function_type) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  //  -- edi : the target to call (can be any Object).
+  // -----------------------------------
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ Pop(edx);
+
+  // Find the address of the last argument.
+  __ mov(ecx, eax);
+  __ add(ecx, Immediate(1));  // Add one for receiver.
+  __ shl(ecx, kPointerSizeLog2);
+  __ neg(ecx);
+  __ add(ecx, ebx);
+
+  Generate_InterpreterPushArgs(masm, ecx);
+
+  // Call the target.
+  __ Push(edx);  // Re-push return address.
+
+  if (function_type == CallableType::kJSFunction) {
+    __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+                                                      tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK_EQ(function_type, CallableType::kAny);
+    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                              tail_call_mode),
+            RelocInfo::CODE_TARGET);
+  }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target
+  //  -- edi : the constructor
+  //  -- ebx : the address of the first argument to be pushed. Subsequent
+  //           arguments should be consecutive above this, in the same order as
+  //           they are to be pushed onto the stack.
+  // -----------------------------------
+
+  // Pop return address to allow tail-call after pushing arguments.
+  __ Pop(ecx);
+
+  // Push edi in the slot meant for receiver. We need an extra register
+  // so store edi temporarily on stack.
+  __ Push(edi);
+
+  // Find the address of the last argument.
+  __ mov(edi, eax);
+  __ neg(edi);
+  __ shl(edi, kPointerSizeLog2);
+  __ add(edi, ebx);
+
+  Generate_InterpreterPushArgs(masm, edi);
+
+  // Restore the constructor from slot on stack. It was pushed at the slot
+  // meant for receiver.
+  __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
+
+  // Re-push return address.
+  __ Push(ecx);
+
+  // Call the constructor with unmodified eax, edi, ebi values.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ LoadHeapObject(ebx,
+                    masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
+                        Code::kHeaderSize - kHeapObjectTag));
+  __ push(ebx);
+
+  // Initialize the dispatch table register.
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
+
+  // Get the bytecode array pointer from the frame.
+  __ mov(kInterpreterBytecodeArrayRegister,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+  if (FLAG_debug_code) {
+    // Check function data field is actually a BytecodeArray object.
+    __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+    __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+                     ebx);
+    __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+  }
+
+  // Get the target bytecode offset from the frame.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+  // Dispatch to the target bytecode.
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
+                          kInterpreterBytecodeOffsetRegister, times_1, 0));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
+  __ jmp(ebx);
+}
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = edi;
+  Register new_target = edx;
+  Register argument_count = eax;
+
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = argument_count;
+  Register index = ebx;
+  __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
+  __ cmp(index, Immediate(Smi::FromInt(2)));
+  __ j(less, &gotta_call_runtime);
+
+  // Find literals.
+  // edx : native context
+  // ebx : length / index
+  // eax : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = edx;
+  __ mov(native_context, NativeContextOperand());
+
+  __ bind(&loop_top);
+  Register temp = edi;
+
+  // Does the native context match?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousContext));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ j(not_equal, &loop_bottom);
+  // OSR id set to none?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
+  __ j(not_equal, &loop_bottom);
+  // Literals available?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ mov(ecx, Operand(esp, 0));
+  __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
+  __ push(index);
+  __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = ecx;
+  __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, eax);
+
+  // Link the closure into the optimized function list.
+  // ecx : code entry
+  // edx : native context
+  // edi : closure
+  __ mov(ebx,
+         ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+         closure);
+  // Save closure before the write barrier.
+  __ mov(ebx, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
+                            kDontSaveFPRegs);
+  __ mov(closure, ebx);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ jmp(entry);
+
+  __ bind(&loop_bottom);
+  __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ cmp(index, Immediate(Smi::FromInt(1)));
+  __ j(greater, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
+                                      SharedFunctionInfo::kSharedCodeIndex));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
+  __ and_(ebx, Code::KindField::kMask);
+  __ shr(ebx, Code::KindField::kShift);
+  __ cmp(ebx, Immediate(Code::BUILTIN));
+  __ j(equal, &gotta_call_runtime_no_stack);
+  // Yes, install the full code.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, ebx);
+  __ jmp(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
+
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
+}
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+}
+
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  Label failed;
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Preserve argument count for later compare.
+    __ mov(ecx, eax);
+    // Push the number of arguments to the callee.
+    __ SmiTag(eax);
+    __ push(eax);
+    // Push a copy of the target function and the new target.
+    __ push(edi);
+    __ push(edx);
+
+    // The function.
+    __ push(edi);
+    // Copy arguments from caller (stdlib, foreign, heap).
+    Label args_done;
+    for (int j = 0; j < 4; ++j) {
+      Label over;
+      if (j < 3) {
+        __ cmp(ecx, Immediate(j));
+        __ j(not_equal, &over, Label::kNear);
+      }
+      for (int i = j - 1; i >= 0; --i) {
+        __ Push(Operand(
+            ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+      }
+      for (int i = 0; i < 3 - j; ++i) {
+        __ PushRoot(Heap::kUndefinedValueRootIndex);
+      }
+      if (j < 3) {
+        __ jmp(&args_done, Label::kNear);
+        __ bind(&over);
+      }
+    }
+    __ bind(&args_done);
+
+    // Call runtime, on success unwind frame, and parent frame.
+    __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+    // A smi 0 is returned on failure, an object on success.
+    __ JumpIfSmi(eax, &failed, Label::kNear);
+
+    __ Drop(2);
+    __ Pop(ecx);
+    __ SmiUntag(ecx);
+    scope.GenerateLeaveFrame();
+
+    __ PopReturnAddressTo(ebx);
+    __ inc(ecx);
+    __ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
+    __ PushReturnAddressFrom(ebx);
+    __ ret(0);
+
+    __ bind(&failed);
+    // Restore target function and new target.
+    __ pop(edx);
+    __ pop(edi);
+    __ pop(eax);
+    __ SmiUntag(eax);
+  }
+  // On failure, tail call back to regular js.
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Re-execute the code that was patched back to the young age when
+  // the stub returns.
+  __ sub(Operand(esp, 0), Immediate(5));
+  __ pushad();
+  __ mov(eax, Operand(esp, 8 * kPointerSize));
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(masm->isolate())));
+    __ mov(Operand(esp, 0), eax);
+    __ CallCFunction(
+        ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  }
+  __ popad();
+  __ ret(0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+  __ pushad();
+  __ mov(eax, Operand(esp, 8 * kPointerSize));
+  __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+  {  // NOLINT
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(masm->isolate())));
+    __ mov(Operand(esp, 0), eax);
+    __ CallCFunction(
+        ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+        2);
+  }
+  __ popad();
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ pop(eax);   // Pop return address into scratch register.
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS Function.
+  __ push(eax);  // Push return address after frame prologue.
+
+  // Jump to point after the code-age stub.
+  __ ret(0);
+}
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
+  Generate_MarkCodeAsExecutedOnce(masm);
+}
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ pushad();
+    __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
+    __ popad();
+    // Tear down internal frame.
+  }
+
+  __ pop(MemOperand(esp, 0));  // Ignore state offset
+  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+}
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass deoptimization type to the runtime system.
+    __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+    // Tear down internal frame.
+  }
+
+  // Get the full codegen state from the stack and untag it.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  __ SmiUntag(ecx);
+
+  // Switch on the state.
+  Label not_no_registers, not_tos_eax;
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
+  __ j(not_equal, &not_no_registers, Label::kNear);
+  __ ret(1 * kPointerSize);  // Remove state.
+
+  __ bind(&not_no_registers);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
+  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
+  __ j(not_equal, &not_tos_eax, Label::kNear);
+  __ ret(2 * kPointerSize);  // Remove state, eax.
+
+  __ bind(&not_tos_eax);
+  __ Abort(kNoCasesLeft);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+                                               int field_index) {
+  // ----------- S t a t e -------------
+  //  -- eax    : number of arguments
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into eax and check that it's actually a JSDate object.
+  Label receiver_not_date;
+  {
+    __ mov(eax, Operand(esp, kPointerSize));
+    __ JumpIfSmi(eax, &receiver_not_date);
+    __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+    __ j(not_equal, &receiver_not_date);
+  }
+
+  // 2. Load the specified date field, falling back to the runtime as necessary.
+  if (field_index == JSDate::kDateValue) {
+    __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
+  } else {
+    if (field_index < JSDate::kFirstUncachedField) {
+      Label stamp_mismatch;
+      __ mov(edx, Operand::StaticVariable(
+                      ExternalReference::date_cache_stamp(masm->isolate())));
+      __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
+      __ j(not_equal, &stamp_mismatch, Label::kNear);
+      __ mov(eax, FieldOperand(
+                      eax, JSDate::kValueOffset + field_index * kPointerSize));
+      __ ret(1 * kPointerSize);
+      __ bind(&stamp_mismatch);
+    }
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ PrepareCallCFunction(2, ebx);
+    __ mov(Operand(esp, 0), eax);
+    __ mov(Operand(esp, 1 * kPointerSize),
+           Immediate(Smi::FromInt(field_index)));
+    __ CallCFunction(
+        ExternalReference::get_date_field_function(masm->isolate()), 2);
+  }
+  __ ret(1 * kPointerSize);
+
+  // 3. Raise a TypeError if the receiver is not a date.
+  __ bind(&receiver_not_date);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Move(ebx, Immediate(0));
+    __ EnterBuiltinFrame(esi, edi, ebx);
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : argArray
+  //  -- esp[8]  : thisArg
+  //  -- esp[12] : receiver
+  // -----------------------------------
+
+  // 1. Load receiver into edi, argArray into eax (if present), remove all
+  // arguments from the stack (including the receiver), and push thisArg (if
+  // present) instead.
+  {
+    Label no_arg_array, no_this_arg;
+    __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ mov(ebx, edx);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ test(eax, eax);
+    __ j(zero, &no_this_arg, Label::kNear);
+    {
+      __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+      __ cmp(eax, Immediate(1));
+      __ j(equal, &no_arg_array, Label::kNear);
+      __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+      __ bind(&no_arg_array);
+    }
+    __ bind(&no_this_arg);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ Push(edx);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argArray
+  //  -- edi    : receiver
+  //  -- esp[0] : return address
+  //  -- esp[4] : thisArg
+  // -----------------------------------
+
+  // 2. Make sure the receiver is actually callable.
+  Label receiver_not_callable;
+  __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
+  __ j(zero, &receiver_not_callable, Label::kNear);
+
+  // 3. Tail call with no arguments if argArray is null or undefined.
+  Label no_arguments;
+  __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+  __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+                Label::kNear);
+
+  // 4a. Apply the receiver to the given argArray (passing undefined for
+  // new.target).
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The argArray is either null or undefined, so we tail call without any
+  // arguments to the receiver.
+  __ bind(&no_arguments);
+  {
+    __ Set(eax, 0);
+    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  }
+
+  // 4c. The receiver is not callable, throw an appropriate TypeError.
+  __ bind(&receiver_not_callable);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+  // Stack Layout:
+  // esp[0]           : Return address
+  // esp[8]           : Argument n
+  // esp[16]          : Argument n-1
+  //  ...
+  // esp[8 * n]       : Argument 1
+  // esp[8 * (n + 1)] : Receiver (callable to call)
+  //
+  // eax contains the number of arguments, n, not counting the receiver.
+  //
+  // 1. Make sure we have at least one argument.
+  {
+    Label done;
+    __ test(eax, eax);
+    __ j(not_zero, &done, Label::kNear);
+    __ PopReturnAddressTo(ebx);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(ebx);
+    __ inc(eax);
+    __ bind(&done);
+  }
+
+  // 2. Get the callable to call (passed as receiver) from the stack.
+  __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+  // 3. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  {
+    Label loop;
+    __ mov(ecx, eax);
+    __ bind(&loop);
+    __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+    __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
+    __ dec(ecx);
+    __ j(not_sign, &loop);  // While non-negative (to copy return address).
+    __ pop(ebx);            // Discard copy of return address.
+    __ dec(eax);  // One fewer argument (first argument is new receiver).
+  }
+
+  // 4. Call the callable.
+  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : argumentsList
+  //  -- esp[8]  : thisArgument
+  //  -- esp[12] : target
+  //  -- esp[16] : receiver
+  // -----------------------------------
+
+  // 1. Load target into edi (if present), argumentsList into eax (if present),
+  // remove all arguments from the stack (including the receiver), and push
+  // thisArgument (if present) instead.
+  {
+    Label done;
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ mov(edx, edi);
+    __ mov(ebx, edi);
+    __ cmp(eax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+    __ j(equal, &done, Label::kNear);
+    __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+    __ cmp(eax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ Push(edx);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edi    : target
+  //  -- esp[0] : return address
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // 2. Make sure the target is actually callable.
+  Label target_not_callable;
+  __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
+  __ j(zero, &target_not_callable, Label::kNear);
+
+  // 3a. Apply the target to the given argumentsList (passing undefined for
+  // new.target).
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 3b. The target is not callable, throw an appropriate TypeError.
+  __ bind(&target_not_callable);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+  }
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax     : argc
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : new.target (optional)
+  //  -- esp[8]  : argumentsList
+  //  -- esp[12] : target
+  //  -- esp[16] : receiver
+  // -----------------------------------
+
+  // 1. Load target into edi (if present), argumentsList into eax (if present),
+  // new.target into edx (if present, otherwise use target), remove all
+  // arguments from the stack (including the receiver), and push thisArgument
+  // (if present) instead.
+  {
+    Label done;
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ mov(edx, edi);
+    __ mov(ebx, edi);
+    __ cmp(eax, Immediate(1));
+    __ j(below, &done, Label::kNear);
+    __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+    __ mov(edx, edi);
+    __ j(equal, &done, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+    __ cmp(eax, Immediate(3));
+    __ j(below, &done, Label::kNear);
+    __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+    __ bind(&done);
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+    __ PushRoot(Heap::kUndefinedValueRootIndex);
+    __ PushReturnAddressFrom(ecx);
+    __ Move(eax, ebx);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edx    : new.target
+  //  -- edi    : target
+  //  -- esp[0] : return address
+  //  -- esp[4] : receiver (undefined)
+  // -----------------------------------
+
+  // 2. Make sure the target is actually a constructor.
+  Label target_not_constructor;
+  __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
+  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &target_not_constructor, Label::kNear);
+
+  // 3. Make sure the target is actually a constructor.
+  Label new_target_not_constructor;
+  __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &new_target_not_constructor, Label::kNear);
+
+  // 4a. Construct the target with the given new.target and argumentsList.
+  __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+  // 4b. The target is not a constructor, throw an appropriate TypeError.
+  __ bind(&target_not_constructor);
+  {
+    __ mov(Operand(esp, kPointerSize), edi);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+
+  // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+  __ bind(&new_target_not_constructor);
+  {
+    __ mov(Operand(esp, kPointerSize), edx);
+    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the InternalArray function.
+  __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray function should be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argc
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
+  __ mov(edx, edi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function should be a map.
+    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+    __ CmpObjectType(ebx, MAP_TYPE, ecx);
+    __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ mov(ebx, masm->isolate()->factory()->undefined_value());
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : function
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  const int reg_sel = (kind == MathMaxMinKind::kMin) ? 1 : 0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in edx and the double value in stx_0.
+  __ LoadRoot(edx, root_index);
+  __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+  __ Move(ecx, eax);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ test(ecx, ecx);
+    __ j(zero, &done_loop);
+
+    // Load the next parameter tagged value into ebx.
+    __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+
+    // Load the double value of the parameter into stx_1, maybe converting the
+    // parameter to a number first using the ToNumber builtin if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(ebx, &convert_smi);
+    __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+                  Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ SmiTag(eax);
+      __ SmiTag(ecx);
+      __ EnterBuiltinFrame(esi, edi, eax);
+      __ Push(ecx);
+      __ Push(edx);
+      __ mov(eax, ebx);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(ecx);
+      __ LeaveBuiltinFrame(esi, edi, eax);
+      __ SmiUntag(ecx);
+      __ SmiUntag(eax);
+      {
+        // Restore the double accumulator value (stX_0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(edx, &restore_smi, Label::kNear);
+        __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+        __ jmp(&done_restore, Label::kNear);
+        __ bind(&restore_smi);
+        __ SmiUntag(edx);
+        __ push(edx);
+        __ fild_s(Operand(esp, 0));
+        __ pop(edx);
+        __ SmiTag(edx);
+        __ bind(&done_restore);
+      }
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    // Load another value into stx_1
+    __ fld_d(FieldOperand(ebx, HeapNumber::kValueOffset));
+    __ fxch();
+    __ jmp(&done_convert, Label::kNear);
+    __ bind(&convert_smi);
+    __ SmiUntag(ebx);
+    __ push(ebx);
+    __ fild_s(Operand(esp, 0));
+    __ pop(ebx);
+    __ fxch();
+    __ SmiTag(ebx);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (stx_0) and the next parameter value on the right hand side (stx_1).
+    Label compare_equal, compare_nan, compare_swap, done_compare;
+
+    // Duplicates the 2 float data for FCmp
+    __ fld(1);
+    __ fld(1);
+    __ FCmp();
+    __ j(parity_even, &compare_nan, Label::kNear);
+    __ j(cc, &done_compare, Label::kNear);
+    __ j(equal, &compare_equal, Label::kNear);
+
+    // Result is on the right hand side(stx_0).
+    __ bind(&compare_swap);
+    __ fxch();
+    __ mov(edx, ebx);
+    __ jmp(&done_compare, Label::kNear);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    // Set the result on the right hand side (stx_0) to nan
+    __ fstp(0);
+    __ LoadRoot(edx, Heap::kNanValueRootIndex);
+    __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+    __ jmp(&done_compare, Label::kNear);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    // Check the sign of the value in reg_sel
+    __ fld(reg_sel);
+    __ FXamSign();
+    __ j(not_zero, &compare_swap);
+
+    __ bind(&done_compare);
+    // The right result is on the right hand side(stx_0)
+    // and can remove the useless stx_1 now.
+    __ fxch();
+    __ fstp(0);
+    __ dec(ecx);
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ PopReturnAddressTo(ecx);
+  __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+  __ PushReturnAddressFrom(ecx);
+  __ mov(eax, edx);
+  __ Ret();
+}
+
+// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into ebx.
+  Label no_arguments;
+  {
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+  }
+
+  // 2a. Convert the first argument to a number.
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(eax);
+    __ EnterBuiltinFrame(esi, edi, eax);
+    __ mov(eax, ebx);
+    __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+    __ LeaveBuiltinFrame(esi, edi, ebx);  // Argc popped to ebx.
+    __ SmiUntag(ebx);
+  }
+
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ Ret();
+  }
+
+  // 2b. No arguments, return +0 (already in eax).
+  __ bind(&no_arguments);
+  __ ret(1 * kPointerSize);
+}
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- edx                 : new target
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Store argc in r8.
+  __ mov(ecx, eax);
+  __ SmiTag(ecx);
+
+  // 2. Load the first argument into ebx.
+  {
+    Label no_arguments, done;
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ Move(ebx, Smi::FromInt(0));
+    __ bind(&done);
+  }
+
+  // 3. Make sure ebx is a number.
+  {
+    Label done_convert;
+    __ JumpIfSmi(ebx, &done_convert);
+    __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(equal, &done_convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ EnterBuiltinFrame(esi, edi, ecx);
+      __ Push(edx);
+      __ Move(eax, ebx);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+      __ Move(ebx, eax);
+      __ Pop(edx);
+      __ LeaveBuiltinFrame(esi, edi, ecx);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, done_alloc, new_object;
+  __ cmp(edx, edi);
+  __ j(not_equal, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the number.
+  __ AllocateJSValue(eax, edi, ebx, esi, &done_alloc);
+  __ jmp(&drop_frame_and_ret);
+
+  __ bind(&done_alloc);
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));  // Restore esi.
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ EnterBuiltinFrame(esi, edi, ecx);
+    __ Push(ebx);  // the first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+    __ LeaveBuiltinFrame(esi, edi, ecx);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(esi);
+    __ SmiUntag(ecx);
+    __ lea(esp, Operand(esp, ecx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(esi);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Load the first argument into eax.
+  Label no_arguments;
+  {
+    __ mov(ebx, eax);  // Store argc in ebx.
+    __ test(eax, eax);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(eax, Operand(esp, eax, times_pointer_size, 0));
+  }
+
+  // 2a. At least one argument, return eax if it's a string, otherwise
+  // dispatch to appropriate conversion.
+  Label drop_frame_and_ret, to_string, symbol_descriptive_string;
+  {
+    __ JumpIfSmi(eax, &to_string, Label::kNear);
+    STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+    __ j(above, &to_string, Label::kNear);
+    __ j(equal, &symbol_descriptive_string, Label::kNear);
+    __ jmp(&drop_frame_and_ret, Label::kNear);
+  }
+
+  // 2b. No arguments, return the empty string (and pop the receiver).
+  __ bind(&no_arguments);
+  {
+    __ LoadRoot(eax, Heap::kempty_stringRootIndex);
+    __ ret(1 * kPointerSize);
+  }
+
+  // 3a. Convert eax to a string.
+  __ bind(&to_string);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    ToStringStub stub(masm->isolate());
+    __ SmiTag(ebx);
+    __ EnterBuiltinFrame(esi, edi, ebx);
+    __ CallStub(&stub);
+    __ LeaveBuiltinFrame(esi, edi, ebx);
+    __ SmiUntag(ebx);
+  }
+  __ jmp(&drop_frame_and_ret, Label::kNear);
+
+  // 3b. Convert symbol in eax to a string.
+  __ bind(&symbol_descriptive_string);
+  {
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ Push(eax);
+    __ PushReturnAddressFrom(ecx);
+    __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ Ret();
+  }
+}
+
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- edi                 : constructor function
+  //  -- edx                 : new target
+  //  -- esi                 : context
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // 1. Make sure we operate in the context of the called function.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  __ mov(ebx, eax);
+
+  // 2. Load the first argument into eax.
+  {
+    Label no_arguments, done;
+    __ test(ebx, ebx);
+    __ j(zero, &no_arguments, Label::kNear);
+    __ mov(eax, Operand(esp, ebx, times_pointer_size, 0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&no_arguments);
+    __ LoadRoot(eax, Heap::kempty_stringRootIndex);
+    __ bind(&done);
+  }
+
+  // 3. Make sure eax is a string.
+  {
+    Label convert, done_convert;
+    __ JumpIfSmi(eax, &convert, Label::kNear);
+    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+    __ j(below, &done_convert);
+    __ bind(&convert);
+    {
+      FrameScope scope(masm, StackFrame::MANUAL);
+      ToStringStub stub(masm->isolate());
+      __ SmiTag(ebx);
+      __ EnterBuiltinFrame(esi, edi, ebx);
+      __ Push(edx);
+      __ CallStub(&stub);
+      __ Pop(edx);
+      __ LeaveBuiltinFrame(esi, edi, ebx);
+      __ SmiUntag(ebx);
+    }
+    __ bind(&done_convert);
+  }
+
+  // 4. Check if new target and constructor differ.
+  Label drop_frame_and_ret, done_alloc, new_object;
+  __ cmp(edx, edi);
+  __ j(not_equal, &new_object);
+
+  // 5. Allocate a JSValue wrapper for the string.
+  // AllocateJSValue can't handle src == dst register. Reuse esi and restore it
+  // as needed after the call.
+  __ mov(esi, eax);
+  __ AllocateJSValue(eax, edi, esi, ecx, &done_alloc);
+  __ jmp(&drop_frame_and_ret);
+
+  __ bind(&done_alloc);
+  {
+    // Restore eax to the first argument and esi to the context.
+    __ mov(eax, esi);
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  }
+
+  // 6. Fallback to the runtime to create new object.
+  __ bind(&new_object);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ SmiTag(ebx);
+    __ EnterBuiltinFrame(esi, edi, ebx);
+    __ Push(eax);  // the first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
+    __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+    __ LeaveBuiltinFrame(esi, edi, ebx);
+    __ SmiUntag(ebx);
+  }
+
+  __ bind(&drop_frame_and_ret);
+  {
+    // Drop all arguments including the receiver.
+    __ PopReturnAddressTo(ecx);
+    __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+    __ PushReturnAddressFrom(ecx);
+    __ Ret();
+  }
+}
+
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+                                       Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- eax : actual number of arguments
+  //  -- ebx : expected number of arguments
+  //  -- edx : new target (passed through to callee)
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(edi, Operand::StaticVariable(real_stack_limit));
+  // Make ecx the space we have left. The stack might already be overflowed
+  // here which will cause ecx to become negative.
+  __ mov(ecx, esp);
+  __ sub(ecx, edi);
+  // Make edi the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(edi, ebx);
+  __ shl(edi, kPointerSizeLog2);
+  // Check if the arguments will overflow the stack.
+  __ cmp(ecx, edi);
+  __ j(less_equal, stack_overflow);  // Signed comparison.
+}
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ push(ebp);
+  __ mov(ebp, esp);
+
+  // Store the arguments adaptor context sentinel.
+  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Push the function on the stack.
+  __ push(edi);
+
+  // Preserve the number of arguments on the stack. Must preserve eax,
+  // ebx and ecx because these registers are used when copying the
+  // arguments and the receiver.
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
+  __ push(edi);
+}
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack.
+  __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ leave();
+
+  // Remove caller arguments from the stack.
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(ecx);
+}
+
+// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : argumentsList
+  //  -- edi    : target
+  //  -- edx    : new.target (checked to be constructor or undefined)
+  //  -- esp[0] : return address.
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // Create the list of arguments from the array-like argumentsList.
+  {
+    Label create_arguments, create_array, create_runtime, done_create;
+    __ JumpIfSmi(eax, &create_runtime);
+
+    // Load the map of argumentsList into ecx.
+    __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+
+    // Load native context into ebx.
+    __ mov(ebx, NativeContextOperand());
+
+    // Check if argumentsList is an (unmodified) arguments object.
+    __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+    __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ j(equal, &create_arguments);
+
+    // Check if argumentsList is a fast JSArray.
+    __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
+    __ j(equal, &create_array);
+
+    // Ask the runtime to create the list (actually a FixedArray).
+    __ bind(&create_runtime);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(edi);
+      __ Push(edx);
+      __ Push(eax);
+      __ CallRuntime(Runtime::kCreateListFromArrayLike);
+      __ Pop(edx);
+      __ Pop(edi);
+      __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
+      __ SmiUntag(ebx);
+    }
+    __ jmp(&done_create);
+
+    // Try to create the list from an arguments object.
+    __ bind(&create_arguments);
+    __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
+    __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
+    __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+    __ j(not_equal, &create_runtime);
+    __ SmiUntag(ebx);
+    __ mov(eax, ecx);
+    __ jmp(&done_create);
+
+    // Try to create the list from a JSArray object.
+    __ bind(&create_array);
+    __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(ecx);
+    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+    STATIC_ASSERT(FAST_ELEMENTS == 2);
+    __ cmp(ecx, Immediate(FAST_ELEMENTS));
+    __ j(above, &create_runtime);
+    __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+    __ j(equal, &create_runtime);
+    __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+    __ SmiUntag(ebx);
+    __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+
+    __ bind(&done_create);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    ExternalReference real_stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ mov(ecx, Operand::StaticVariable(real_stack_limit));
+    // Make ecx the space we have left. The stack might already be overflowed
+    // here which will cause ecx to become negative.
+    __ neg(ecx);
+    __ add(ecx, esp);
+    __ sar(ecx, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(ecx, ebx);
+    __ j(greater, &done, Label::kNear);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // ----------- S t a t e -------------
+  //  -- edi    : target
+  //  -- eax    : args (a FixedArray built from argumentsList)
+  //  -- ebx    : len (number of elements to push from args)
+  //  -- edx    : new.target (checked to be constructor or undefined)
+  //  -- esp[0] : return address.
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // Push arguments onto the stack (thisArgument is already on the stack).
+  {
+    __ push(edx);
+    __ fld_s(MemOperand(esp, 0));
+    __ lea(esp, Operand(esp, kFloatSize));
+
+    __ PopReturnAddressTo(edx);
+    __ Move(ecx, Immediate(0));
+    Label done, loop;
+    __ bind(&loop);
+    __ cmp(ecx, ebx);
+    __ j(equal, &done, Label::kNear);
+    __ Push(
+        FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+    __ inc(ecx);
+    __ jmp(&loop);
+    __ bind(&done);
+    __ PushReturnAddressFrom(edx);
+
+    __ lea(esp, Operand(esp, -kFloatSize));
+    __ fstp_s(MemOperand(esp, 0));
+    __ pop(edx);
+
+    __ Move(eax, ebx);
+  }
+
+  // Dispatch to Call or Construct depending on whether new.target is undefined.
+  {
+    __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+    __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  }
+}
+
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg
+// |  f()'s caller pc      <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if ES2015 tail call elimination is enabled.
+  Label done;
+  ExternalReference is_tail_call_elimination_enabled =
+      ExternalReference::is_tail_call_elimination_enabled_address(
+          masm->isolate());
+  __ movzx_b(scratch1,
+             Operand::StaticVariable(is_tail_call_elimination_enabled));
+  __ cmp(scratch1, Immediate(0));
+  __ j(equal, &done, Label::kNear);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
+           Immediate(Smi::FromInt(StackFrame::STUB)));
+    __ j(not_equal, &no_interpreter_frame, Label::kNear);
+    __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Register caller_args_count_reg = scratch1;
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop current frame and load arguments count from arguments adaptor frame.
+  __ mov(ebp, scratch2);
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(scratch1,
+         FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(
+      caller_args_count_reg,
+      FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, 0);
+  __ bind(&done);
+}
+}  // namespace
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the function to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+  // Check that the function is not a "classConstructor".
+  Label class_constructor;
+  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
+            Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+  __ j(not_zero, &class_constructor);
+
+  // Enter the context of the function; ToObject has to run in the function
+  // context, and we also need to take the global proxy from the function
+  // context in case of conversion.
+  STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+                SharedFunctionInfo::kStrictModeByteOffset);
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  // We need to convert the receiver for non-native sloppy mode functions.
+  Label done_convert;
+  __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
+            Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+                      (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+  __ j(not_zero, &done_convert);
+  {
+    // ----------- S t a t e -------------
+    //  -- eax : the number of arguments (not including the receiver)
+    //  -- edx : the shared function info.
+    //  -- edi : the function to call (checked to be a JSFunction)
+    //  -- esi : the function context.
+    // -----------------------------------
+
+    if (mode == ConvertReceiverMode::kNullOrUndefined) {
+      // Patch receiver to global proxy.
+      __ LoadGlobalProxy(ecx);
+    } else {
+      Label convert_to_object, convert_receiver;
+      __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+      __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+      __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+      __ j(above_equal, &done_convert);
+      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+        Label convert_global_proxy;
+        __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
+                      &convert_global_proxy, Label::kNear);
+        __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+                         Label::kNear);
+        __ bind(&convert_global_proxy);
+        {
+          // Patch receiver to global proxy.
+          __ LoadGlobalProxy(ecx);
+        }
+        __ jmp(&convert_receiver);
+      }
+      __ bind(&convert_to_object);
+      {
+        // Convert receiver using ToObject.
+        // TODO(bmeurer): Inline the allocation here to avoid building the frame
+        // in the fast case? (fall back to AllocateInNewSpace?)
+        FrameScope scope(masm, StackFrame::INTERNAL);
+        __ SmiTag(eax);
+        __ Push(eax);
+        __ Push(edi);
+        __ mov(eax, ecx);
+        __ Push(esi);
+        ToObjectStub stub(masm->isolate());
+        __ CallStub(&stub);
+        __ Pop(esi);
+        __ mov(ecx, eax);
+        __ Pop(edi);
+        __ Pop(eax);
+        __ SmiUntag(eax);
+      }
+      __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+      __ bind(&convert_receiver);
+    }
+    __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
+  }
+  __ bind(&done_convert);
+
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the shared function info.
+  //  -- edi : the function to call (checked to be a JSFunction)
+  //  -- esi : the function context.
+  // -----------------------------------
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+    // Reload shared function info.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  }
+
+  __ mov(ebx,
+         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(ebx);
+  ParameterCount actual(eax);
+  ParameterCount expected(ebx);
+  __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
+                        CheckDebugStepCallWrapper());
+  // The function is a "classConstructor", need to raise an exception.
+  __ bind(&class_constructor);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    __ push(edi);
+    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+  }
+}
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : new.target (only in case of [[Construct]])
+  //  -- edi : target (checked to be a JSBoundFunction)
+  // -----------------------------------
+
+  // Load [[BoundArguments]] into ecx and length of that into ebx.
+  Label no_bound_arguments;
+  __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+  __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ SmiUntag(ebx);
+  __ test(ebx, ebx);
+  __ j(zero, &no_bound_arguments);
+  {
+    // ----------- S t a t e -------------
+    //  -- eax : the number of arguments (not including the receiver)
+    //  -- edx : new.target (only in case of [[Construct]])
+    //  -- edi : target (checked to be a JSBoundFunction)
+    //  -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+    //  -- ebx : the number of [[BoundArguments]]
+    // -----------------------------------
+
+    // Reserve stack space for the [[BoundArguments]].
+    {
+      Label done;
+      __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+      __ sub(esp, ecx);
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+      __ j(greater, &done, Label::kNear);  // Signed comparison.
+      // Restore the stack pointer.
+      __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+      {
+        FrameScope scope(masm, StackFrame::MANUAL);
+        __ EnterFrame(StackFrame::INTERNAL);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
+      }
+      __ bind(&done);
+    }
+
+    // Adjust effective number of arguments to include return address.
+    __ inc(eax);
+
+    // Relocate arguments and return address down the stack.
+    {
+      Label loop;
+      __ Set(ecx, 0);
+      __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+      __ bind(&loop);
+      __ fld_s(Operand(ebx, ecx, times_pointer_size, 0));
+      __ fstp_s(Operand(esp, ecx, times_pointer_size, 0));
+      __ inc(ecx);
+      __ cmp(ecx, eax);
+      __ j(less, &loop);
+    }
+
+    // Copy [[BoundArguments]] to the stack (below the arguments).
+    {
+      Label loop;
+      __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+      __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+      __ SmiUntag(ebx);
+      __ bind(&loop);
+      __ dec(ebx);
+      __ fld_s(
+          FieldOperand(ecx, ebx, times_pointer_size, FixedArray::kHeaderSize));
+      __ fstp_s(Operand(esp, eax, times_pointer_size, 0));
+      __ lea(eax, Operand(eax, 1));
+      __ j(greater, &loop);
+    }
+
+    // Adjust effective number of arguments (eax contains the number of
+    // arguments from the call plus return address plus the number of
+    // [[BoundArguments]]), so we need to subtract one for the return address.
+    __ dec(eax);
+  }
+  __ bind(&no_bound_arguments);
+}
+
+}  // namespace
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the function to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(edi);
+
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
+  // Patch the receiver to [[BoundThis]].
+  __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Call the [[BoundTargetFunction]] via the Call builtin.
+  __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ecx, Operand::StaticVariable(ExternalReference(
+                  Builtins::kCall_ReceiverIsAny, masm->isolate())));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the target to call (can be any Object).
+  // -----------------------------------
+
+  Label non_callable, non_function, non_smi;
+  __ JumpIfSmi(edi, &non_callable);
+  __ bind(&non_smi);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+       RelocInfo::CODE_TARGET);
+  __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+       RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Call]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsCallable));
+  __ j(zero, &non_callable);
+
+  __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+  __ j(not_equal, &non_function);
+
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
+  // 1. Runtime fallback for Proxy [[Call]].
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ PushReturnAddressFrom(ecx);
+  // Increase the arguments size to include the pushed function and the
+  // existing receiver on the stack.
+  __ add(eax, Immediate(2));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+
+  // 2. Call to something else, which might have a [[Call]] internal method (if
+  // not we raise an exception).
+  __ bind(&non_function);
+  // Overwrite the original receiver with the (original) target.
+  __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+  // Let the "call_as_function_delegate" take care of the rest.
+  __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
+  __ Jump(masm->isolate()->builtins()->CallFunction(
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+          RelocInfo::CODE_TARGET);
+
+  // 3. Call to something that is not callable.
+  __ bind(&non_callable);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kThrowCalledNonCallable);
+  }
+}
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (checked to be a constructor)
+  //  -- edi : the constructor to call (checked to be a JSFunction)
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // Calling convention for function specific ConstructStubs require
+  // ebx to contain either an AllocationSite or undefined.
+  __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
+
+  // Tail call to the function-specific construct stub (still in the caller
+  // context at this point).
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (checked to be a constructor)
+  //  -- edi : the constructor to call (checked to be a JSBoundFunction)
+  // -----------------------------------
+  __ AssertBoundFunction(edi);
+
+  // Push the [[BoundArguments]] onto the stack.
+  Generate_PushBoundArguments(masm);
+
+  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+  {
+    Label done;
+    __ cmp(edi, edx);
+    __ j(not_equal, &done, Label::kNear);
+    __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+    __ bind(&done);
+  }
+
+  // Construct the [[BoundTargetFunction]] via the Construct builtin.
+  __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+  __ mov(ecx, Operand::StaticVariable(
+                  ExternalReference(Builtins::kConstruct, masm->isolate())));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ jmp(ecx);
+}
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the constructor to call (checked to be a JSProxy)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  // Call into the Runtime for Proxy [[Construct]].
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  // Include the pushed new_target, constructor and the receiver.
+  __ add(eax, Immediate(3));
+  // Tail-call to the runtime.
+  __ JumpToExternalReference(
+      ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- edi : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  // Check if target is a Smi.
+  Label non_constructor;
+  __ JumpIfSmi(edi, &non_constructor, Label::kNear);
+
+  // Dispatch based on instance type.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Construct]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+            Immediate(1 << Map::kIsConstructor));
+  __ j(zero, &non_constructor, Label::kNear);
+
+  // Only dispatch to bound functions after checking whether they are
+  // constructors.
+  __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+       RelocInfo::CODE_TARGET);
+
+  // Only dispatch to proxies after checking whether they are constructors.
+  __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+  __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
+       RelocInfo::CODE_TARGET);
+
+  // Called Construct on an exotic Object with a [[Construct]] internal method.
+  {
+    // Overwrite the original receiver with the (original) target.
+    __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+    // Let the "call_as_constructor_delegate" take care of the rest.
+    __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
+    __ Jump(masm->isolate()->builtins()->CallFunction(),
+            RelocInfo::CODE_TARGET);
+  }
+
+  // Called Construct on an Object that doesn't have a [[Construct]] internal
+  // method.
+  __ bind(&non_constructor);
+  __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : message_id as Smi
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in eax.
+  Label not_smi;
+  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+  __ Ret();
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : actual number of arguments
+  //  -- ebx : expected number of arguments
+  //  -- edx : new target (passed through to callee)
+  //  -- edi : function (passed through to callee)
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments, stack_overflow;
+  __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
+
+  Label enough, too_few;
+  __ cmp(eax, ebx);
+  __ j(less, &too_few);
+  __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(edi, Operand(ebp, eax, times_4, offset));
+    __ mov(eax, -1);  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(eax);
+    __ push(Operand(edi, 0));
+    __ sub(edi, Immediate(kPointerSize));
+    __ cmp(eax, ebx);
+    __ j(less, &copy);
+    // eax now contains the expected number of arguments.
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+
+    EnterArgumentsAdaptorFrame(masm);
+    ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
+    // Remember expected arguments in ecx.
+    __ mov(ecx, ebx);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(edi, Operand(ebp, eax, times_4, offset));
+    // ebx = expected - actual.
+    __ sub(ebx, eax);
+    // eax = -actual - 1
+    __ neg(eax);
+    __ sub(eax, Immediate(1));
+
+    Label copy;
+    __ bind(&copy);
+    __ inc(eax);
+    __ push(Operand(edi, 0));
+    __ sub(edi, Immediate(kPointerSize));
+    __ test(eax, eax);
+    __ j(not_zero, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ bind(&fill);
+    __ inc(eax);
+    __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+    __ cmp(eax, ebx);
+    __ j(less, &fill);
+
+    // Restore expected arguments.
+    __ mov(eax, ecx);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  // Restore function pointer.
+  __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
+  // eax : expected number of arguments
+  // edx : new target (passed through to callee)
+  // edi : function (passed through to callee)
+  __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  __ call(ecx);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  __ jmp(ecx);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    __ CallRuntime(Runtime::kThrowStackOverflow);
+    __ int3();
+  }
+}
+
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+                                    Register function_template_info,
+                                    Register scratch0, Register scratch1,
+                                    Label* receiver_check_failed) {
+  // If there is no signature, return the holder.
+  __ CompareRoot(FieldOperand(function_template_info,
+                              FunctionTemplateInfo::kSignatureOffset),
+                 Heap::kUndefinedValueRootIndex);
+  Label receiver_check_passed;
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // Walk the prototype chain.
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+  Label prototype_loop_start;
+  __ bind(&prototype_loop_start);
+
+  // Get the constructor, if any.
+  __ GetMapConstructor(scratch0, scratch0, scratch1);
+  __ CmpInstanceType(scratch1, JS_FUNCTION_TYPE);
+  Label next_prototype;
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Get the constructor's signature.
+  __ mov(scratch0,
+         FieldOperand(scratch0, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(scratch0,
+         FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Loop through the chain of inheriting function templates.
+  Label function_template_loop;
+  __ bind(&function_template_loop);
+
+  // If the signatures match, we have a compatible receiver.
+  __ cmp(scratch0, FieldOperand(function_template_info,
+                                FunctionTemplateInfo::kSignatureOffset));
+  __ j(equal, &receiver_check_passed, Label::kNear);
+
+  // If the current type is not a FunctionTemplateInfo, load the next prototype
+  // in the chain.
+  __ JumpIfSmi(scratch0, &next_prototype, Label::kNear);
+  __ CmpObjectType(scratch0, FUNCTION_TEMPLATE_INFO_TYPE, scratch1);
+  __ j(not_equal, &next_prototype, Label::kNear);
+
+  // Otherwise load the parent function template and iterate.
+  __ mov(scratch0,
+         FieldOperand(scratch0, FunctionTemplateInfo::kParentTemplateOffset));
+  __ jmp(&function_template_loop, Label::kNear);
+
+  // Load the next prototype.
+  __ bind(&next_prototype);
+  __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ test(FieldOperand(receiver, Map::kBitField3Offset),
+          Immediate(Map::HasHiddenPrototype::kMask));
+  __ j(zero, receiver_check_failed);
+
+  __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+  // Iterate.
+  __ jmp(&prototype_loop_start, Label::kNear);
+
+  __ bind(&receiver_check_passed);
+}
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax                : number of arguments (not including the receiver)
+  //  -- edi                : callee
+  //  -- esi                : context
+  //  -- esp[0]             : return address
+  //  -- esp[4]             : last argument
+  //  -- ...
+  //  -- esp[eax * 4]       : first argument
+  //  -- esp[(eax + 1) * 4] : receiver
+  // -----------------------------------
+
+  // Load the FunctionTemplateInfo.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+  // Do the compatible receiver check.
+  Label receiver_check_failed;
+  __ mov(ecx, Operand(esp, eax, times_pointer_size, kPCOnStackSize));
+  __ Push(eax);
+  CompatibleReceiverCheck(masm, ecx, ebx, edx, eax, &receiver_check_failed);
+  __ Pop(eax);
+  // Get the callback offset from the FunctionTemplateInfo, and jump to the
+  // beginning of the code.
+  __ mov(edx, FieldOperand(ebx, FunctionTemplateInfo::kCallCodeOffset));
+  __ mov(edx, FieldOperand(edx, CallHandlerInfo::kFastHandlerOffset));
+  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(edx);
+
+  // Compatible receiver check failed: pop return address, arguments and
+  // receiver and throw an Illegal Invocation exception.
+  __ bind(&receiver_check_failed);
+  __ Pop(eax);
+  __ PopReturnAddressTo(ebx);
+  __ lea(eax, Operand(eax, times_pointer_size, 1 * kPointerSize));
+  __ add(esp, eax);
+  __ PushReturnAddressFrom(ebx);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+  }
+}
+
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+                                              bool has_handler_frame) {
+  // Lookup the function in the JavaScript frame.
+  if (has_handler_frame) {
+    __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
+  } else {
+    __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(eax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+  }
+
+  Label skip;
+  // If the code object is null, just return to the caller.
+  __ cmp(eax, Immediate(0));
+  __ j(not_equal, &skip, Label::kNear);
+  __ ret(0);
+
+  __ bind(&skip);
+
+  // Drop any potential handler frame that is be sitting on top of the actual
+  // JavaScript frame. This is the case then OSR is triggered from bytecode.
+  if (has_handler_frame) {
+    __ leave();
+  }
+
+  // Load deoptimization data from the code object.
+  __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+                               DeoptimizationInputData::kOsrPcOffsetIndex) -
+                               kHeapObjectTag));
+  __ SmiUntag(ebx);
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+  // Overwrite the return address on the stack.
+  __ mov(Operand(esp, 0), eax);
+
+  // And "return" to the OSR entry point of the function.
+  __ ret(0);
+}
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+  Generate_OnStackReplacementHelper(masm, true);
+}
+
+#undef __
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X87