Merge V8 5.2.361.47  DO NOT MERGE

https://chromium.googlesource.com/v8/v8/+/5.2.361.47

FPIIM-449

Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/code-stub-assembler.cc b/src/code-stub-assembler.cc
new file mode 100644
index 0000000..3e26b52
--- /dev/null
+++ b/src/code-stub-assembler.cc
@@ -0,0 +1,1572 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     const CallInterfaceDescriptor& descriptor,
+                                     Code::Flags flags, const char* name,
+                                     size_t result_size)
+    : compiler::CodeAssembler(isolate, zone, descriptor, flags, name,
+                              result_size) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     int parameter_count, Code::Flags flags,
+                                     const char* name)
+    : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
+
+Node* CodeStubAssembler::BooleanMapConstant() {
+  return HeapConstant(isolate()->factory()->boolean_map());
+}
+
+Node* CodeStubAssembler::EmptyStringConstant() {
+  return LoadRoot(Heap::kempty_stringRootIndex);
+}
+
+Node* CodeStubAssembler::HeapNumberMapConstant() {
+  return HeapConstant(isolate()->factory()->heap_number_map());
+}
+
+Node* CodeStubAssembler::NoContextConstant() {
+  return SmiConstant(Smi::FromInt(0));
+}
+
+Node* CodeStubAssembler::NullConstant() {
+  return LoadRoot(Heap::kNullValueRootIndex);
+}
+
+Node* CodeStubAssembler::UndefinedConstant() {
+  return LoadRoot(Heap::kUndefinedValueRootIndex);
+}
+
+Node* CodeStubAssembler::StaleRegisterConstant() {
+  return LoadRoot(Heap::kStaleRegisterRootIndex);
+}
+
+Node* CodeStubAssembler::Float64Round(Node* x) {
+  Node* one = Float64Constant(1.0);
+  Node* one_half = Float64Constant(0.5);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this);
+
+  // Round up {x} towards Infinity.
+  var_x.Bind(Float64Ceil(x));
+
+  GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
+         &return_x);
+  var_x.Bind(Float64Sub(var_x.value(), one));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Ceil(Node* x) {
+  if (IsFloat64RoundUpSupported()) {
+    return Float64RoundUp(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Floor(Node* x) {
+  if (IsFloat64RoundDownSupported()) {
+    return Float64RoundDown(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards -Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards -Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Trunc(Node* x) {
+  if (IsFloat64RoundTruncateSupported()) {
+    return Float64RoundTruncate(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than 0.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    if (IsFloat64RoundDownSupported()) {
+      var_x.Bind(Float64RoundDown(x));
+    } else {
+      // Just return {x} unless it's in the range ]0,2^52[.
+      GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+      // Round positive {x} towards -Infinity.
+      var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+    }
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    if (IsFloat64RoundUpSupported()) {
+      var_x.Bind(Float64RoundUp(x));
+      Goto(&return_x);
+    } else {
+      // Just return {x} unless its in the range ]-2^52,0[.
+      GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+      GotoUnless(Float64LessThan(x, zero), &return_x);
+
+      // Round negated {x} towards -Infinity and return result negated.
+      Node* minus_x = Float64Neg(x);
+      var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+      Goto(&return_minus_x);
+    }
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::SmiFromWord32(Node* value) {
+  value = ChangeInt32ToIntPtr(value);
+  return WordShl(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiTag(Node* value) {
+  int32_t constant_value;
+  if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
+    return SmiConstant(Smi::FromInt(constant_value));
+  }
+  return WordShl(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiUntag(Node* value) {
+  return WordSar(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiToWord32(Node* value) {
+  Node* result = WordSar(value, SmiShiftBitsConstant());
+  if (Is64()) {
+    result = TruncateInt64ToInt32(result);
+  }
+  return result;
+}
+
+Node* CodeStubAssembler::SmiToFloat64(Node* value) {
+  return ChangeInt32ToFloat64(SmiToWord32(value));
+}
+
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
+
+Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
+  return IntPtrAddWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
+
+Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
+  return IntPtrSubWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+
+Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
+  return UintPtrGreaterThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
+  return IntPtrLessThan(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
+  return IntPtrLessThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
+  // TODO(bmeurer): Consider using Select once available.
+  Variable min(this, MachineRepresentation::kTagged);
+  Label if_a(this), if_b(this), join(this);
+  BranchIfSmiLessThan(a, b, &if_a, &if_b);
+  Bind(&if_a);
+  min.Bind(a);
+  Goto(&join);
+  Bind(&if_b);
+  min.Bind(b);
+  Goto(&join);
+  Bind(&join);
+  return min.value();
+}
+
+Node* CodeStubAssembler::WordIsSmi(Node* a) {
+  return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
+  return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+                   IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
+                                              AllocationFlags flags,
+                                              Node* top_address,
+                                              Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+
+  // If there's not enough space, call the runtime.
+  Variable result(this, MachineRepresentation::kTagged);
+  Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
+  Label merge_runtime(this, &result);
+
+  Node* new_top = IntPtrAdd(top, size_in_bytes);
+  Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
+         &no_runtime_call);
+
+  Bind(&runtime_call);
+  // AllocateInTargetSpace does not use the context.
+  Node* context = SmiConstant(Smi::FromInt(0));
+
+  Node* runtime_result;
+  if (flags & kPretenured) {
+    Node* runtime_flags = SmiConstant(
+        Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+                     AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
+    runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
+                                 SmiTag(size_in_bytes), runtime_flags);
+  } else {
+    runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, context,
+                                 SmiTag(size_in_bytes));
+  }
+  result.Bind(runtime_result);
+  Goto(&merge_runtime);
+
+  // When there is enough space, return `top' and bump it up.
+  Bind(&no_runtime_call);
+  Node* no_runtime_result = top;
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      new_top);
+  no_runtime_result = BitcastWordToTagged(
+      IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag)));
+  result.Bind(no_runtime_result);
+  Goto(&merge_runtime);
+
+  Bind(&merge_runtime);
+  return result.value();
+}
+
+Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
+                                            AllocationFlags flags,
+                                            Node* top_address,
+                                            Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+  Variable adjusted_size(this, MachineType::PointerRepresentation());
+  adjusted_size.Bind(size_in_bytes);
+  if (flags & kDoubleAlignment) {
+    // TODO(epertoso): Simd128 alignment.
+    Label aligned(this), not_aligned(this), merge(this, &adjusted_size);
+    Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
+           &aligned);
+
+    Bind(&not_aligned);
+    Node* not_aligned_size =
+        IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
+    adjusted_size.Bind(not_aligned_size);
+    Goto(&merge);
+
+    Bind(&aligned);
+    Goto(&merge);
+
+    Bind(&merge);
+  }
+
+  Variable address(this, MachineRepresentation::kTagged);
+  address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
+
+  Label needs_filler(this), doesnt_need_filler(this),
+      merge_address(this, &address);
+  Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &doesnt_need_filler,
+         &needs_filler);
+
+  Bind(&needs_filler);
+  // Store a filler and increase the address by kPointerSize.
+  // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
+  // it when Simd128 alignment is supported.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
+                      LoadRoot(Heap::kOnePointerFillerMapRootIndex));
+  address.Bind(BitcastWordToTagged(
+      IntPtrAdd(address.value(), IntPtrConstant(kPointerSize))));
+  Goto(&merge_address);
+
+  Bind(&doesnt_need_filler);
+  Goto(&merge_address);
+
+  Bind(&merge_address);
+  // Update the top.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      IntPtrAdd(top, adjusted_size.value()));
+  return address.value();
+}
+
+Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
+  bool const new_space = !(flags & kPretenured);
+  Node* top_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_top_address(isolate())
+          : ExternalReference::old_space_allocation_top_address(isolate()));
+  Node* limit_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_limit_address(isolate())
+          : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+#ifdef V8_HOST_ARCH_32_BIT
+  if (flags & kDoubleAlignment) {
+    return AllocateRawAligned(size_in_bytes, flags, top_address, limit_address);
+  }
+#endif
+
+  return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address);
+}
+
+Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
+  return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
+}
+
+Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
+  return BitcastWordToTagged(IntPtrAdd(previous, offset));
+}
+
+Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
+  return InnerAllocate(previous, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
+                                          MachineType rep) {
+  return Load(rep, buffer, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
+                                         MachineType rep) {
+  return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
+  return Load(MachineType::Float64(), object,
+              IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMap(Node* object) {
+  return LoadObjectField(object, HeapObject::kMapOffset);
+}
+
+Node* CodeStubAssembler::LoadInstanceType(Node* object) {
+  return LoadMapInstanceType(LoadMap(object));
+}
+
+Node* CodeStubAssembler::LoadElements(Node* object) {
+  return LoadObjectField(object, JSObject::kElementsOffset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
+  return LoadObjectField(array, FixedArrayBase::kLengthOffset);
+}
+
+Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+  return Load(MachineType::Uint32(), map,
+              IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+  return LoadObjectField(map, Map::kDescriptorsOffset);
+}
+
+Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
+  return LoadObjectField(map, Map::kPrototypeOffset);
+}
+
+Node* CodeStubAssembler::LoadNameHash(Node* name) {
+  return Load(MachineType::Uint32(), name,
+              IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) {
+  Node* header_size = IntPtrConstant(FixedArray::kHeaderSize);
+  Node* data_size = WordShl(length, IntPtrConstant(kPointerSizeLog2));
+  Node* total_size = IntPtrAdd(data_size, header_size);
+
+  Node* result = Allocate(total_size, kNone);
+  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kFixedArrayMapRootIndex));
+  StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+      SmiTag(length));
+
+  return result;
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
+                                               int additional_offset,
+                                               ParameterMode parameter_mode) {
+  int32_t header_size =
+      FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
+  Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+                                        parameter_mode, header_size);
+  return Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kInstanceSizeOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadNativeContext(Node* context) {
+  return LoadFixedArrayElement(context,
+                               Int32Constant(Context::NATIVE_CONTEXT_INDEX));
+}
+
+Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
+                                                Node* native_context) {
+  return LoadFixedArrayElement(native_context,
+                               Int32Constant(Context::ArrayMapIndex(kind)));
+}
+
+Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kFloat64, object,
+      IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreObjectField(
+    Node* object, int offset, Node* value) {
+  return Store(MachineRepresentation::kTagged, object,
+               IntPtrConstant(offset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
+    Node* object, int offset, Node* value, MachineRepresentation rep) {
+  return StoreNoWriteBarrier(rep, object,
+                             IntPtrConstant(offset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kTagged, object,
+      IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
+                                                Node* value,
+                                                WriteBarrierMode barrier_mode,
+                                                ParameterMode parameter_mode) {
+  DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
+         barrier_mode == UPDATE_WRITE_BARRIER);
+  Node* offset =
+      ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, parameter_mode,
+                             FixedArray::kHeaderSize - kHeapObjectTag);
+  MachineRepresentation rep = MachineRepresentation::kTagged;
+  if (barrier_mode == SKIP_WRITE_BARRIER) {
+    return StoreNoWriteBarrier(rep, object, offset, value);
+  } else {
+    return Store(rep, object, offset, value);
+  }
+}
+
+Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
+    Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
+  Node* offset =
+      ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
+                             FixedArray::kHeaderSize - kHeapObjectTag);
+  MachineRepresentation rep = MachineRepresentation::kFloat64;
+  return StoreNoWriteBarrier(rep, object, offset, value);
+}
+
+Node* CodeStubAssembler::AllocateHeapNumber() {
+  Node* result = Allocate(HeapNumber::kSize, kNone);
+  StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
+  Node* result = AllocateHeapNumber();
+  StoreHeapNumberValue(result, value);
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
+  Node* result = Allocate(SeqOneByteString::SizeFor(length));
+  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
+  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
+                                 SmiConstant(Smi::FromInt(length)));
+  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
+                                 IntPtrConstant(String::kEmptyHashField));
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
+  Node* result = Allocate(SeqTwoByteString::SizeFor(length));
+  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
+                                 SmiConstant(Smi::FromInt(length)));
+  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
+                                 IntPtrConstant(String::kEmptyHashField));
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
+                                         Node* capacity_node, Node* length_node,
+                                         compiler::Node* allocation_site,
+                                         ParameterMode mode) {
+  bool is_double = IsFastDoubleElementsKind(kind);
+  int base_size = JSArray::kSize + FixedArray::kHeaderSize;
+  int elements_offset = JSArray::kSize;
+
+  if (allocation_site != nullptr) {
+    base_size += AllocationMemento::kSize;
+    elements_offset += AllocationMemento::kSize;
+  }
+
+  int32_t capacity;
+  bool constant_capacity = ToInt32Constant(capacity_node, capacity);
+  Node* total_size =
+      ElementOffsetFromIndex(capacity_node, kind, mode, base_size);
+
+  // Allocate both array and elements object, and initialize the JSArray.
+  Heap* heap = isolate()->heap();
+  Node* array = Allocate(total_size);
+  StoreMapNoWriteBarrier(array, array_map);
+  Node* empty_properties =
+      HeapConstant(Handle<HeapObject>(heap->empty_fixed_array()));
+  StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset,
+                                 empty_properties);
+  StoreObjectFieldNoWriteBarrier(
+      array, JSArray::kLengthOffset,
+      mode == SMI_PARAMETERS ? length_node : SmiTag(length_node));
+
+  if (allocation_site != nullptr) {
+    InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
+  }
+
+  // Setup elements object.
+  Node* elements = InnerAllocate(array, elements_offset);
+  StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
+  Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
+                                     : heap->fixed_array_map());
+  StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
+  StoreObjectFieldNoWriteBarrier(
+      elements, FixedArray::kLengthOffset,
+      mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node));
+
+  int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+  Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value()));
+  Node* double_hole =
+      Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
+  DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32);
+  if (constant_capacity && capacity <= kElementLoopUnrollThreshold) {
+    for (int i = 0; i < capacity; ++i) {
+      if (is_double) {
+        Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+                                              first_element_offset);
+        // Don't use doubles to store the hole double, since manipulating the
+        // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+        // change its value on ia32 (the x87 stack is used to return values
+        // and stores to the stack silently clear the signalling bit).
+        //
+        // TODO(danno): When we have a Float32/Float64 wrapper class that
+        // preserves double bits during manipulation, remove this code/change
+        // this to an indexed Float64 store.
+        if (Is64()) {
+          StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
+                              double_hole);
+        } else {
+          StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+                              double_hole);
+          offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+                                          first_element_offset + kPointerSize);
+          StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+                              double_hole);
+        }
+      } else {
+        StoreFixedArrayElement(elements, Int32Constant(i), hole,
+                               SKIP_WRITE_BARRIER);
+      }
+    }
+  } else {
+    // TODO(danno): Add a loop for initialization
+    UNIMPLEMENTED();
+  }
+
+  return array;
+}
+
+void CodeStubAssembler::InitializeAllocationMemento(
+    compiler::Node* base_allocation, int base_allocation_size,
+    compiler::Node* allocation_site) {
+  StoreObjectFieldNoWriteBarrier(
+      base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
+      HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+  StoreObjectFieldNoWriteBarrier(
+      base_allocation,
+      AllocationMemento::kAllocationSiteOffset + base_allocation_size,
+      allocation_site);
+  if (FLAG_allocation_site_pretenuring) {
+    Node* count = LoadObjectField(allocation_site,
+                                  AllocationSite::kPretenureCreateCountOffset);
+    Node* incremented_count = IntPtrAdd(count, SmiConstant(Smi::FromInt(1)));
+    StoreObjectFieldNoWriteBarrier(allocation_site,
+                                   AllocationSite::kPretenureCreateCountOffset,
+                                   incremented_count);
+  }
+}
+
+Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kFloat64);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToFloat64(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Load the floating point value.
+        var_result.Bind(LoadHeapNumberValue(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kWord32);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToWord32(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Truncate the floating point value.
+        var_result.Bind(TruncateHeapNumberValueToWord32(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+  Node* value = LoadHeapNumberValue(object);
+  return TruncateFloat64ToWord32(value);
+}
+
+Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
+  Node* value32 = RoundFloat64ToInt32(value);
+  Node* value64 = ChangeInt32ToFloat64(value32);
+
+  Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
+
+  Label if_valueisequal(this), if_valueisnotequal(this);
+  Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
+  Bind(&if_valueisequal);
+  {
+    GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
+    BranchIfInt32LessThan(Float64ExtractHighWord32(value), Int32Constant(0),
+                          &if_valueisheapnumber, &if_valueisint32);
+  }
+  Bind(&if_valueisnotequal);
+  Goto(&if_valueisheapnumber);
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Bind(&if_valueisint32);
+  {
+    if (Is64()) {
+      Node* result = SmiTag(ChangeInt32ToInt64(value32));
+      var_result.Bind(result);
+      Goto(&if_join);
+    } else {
+      Node* pair = Int32AddWithOverflow(value32, value32);
+      Node* overflow = Projection(1, pair);
+      Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+      Branch(overflow, &if_overflow, &if_notoverflow);
+      Bind(&if_overflow);
+      Goto(&if_valueisheapnumber);
+      Bind(&if_notoverflow);
+      {
+        Node* result = Projection(0, pair);
+        var_result.Bind(result);
+        Goto(&if_join);
+      }
+    }
+  }
+  Bind(&if_valueisheapnumber);
+  {
+    Node* result = AllocateHeapNumberWithValue(value);
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
+  if (Is64()) {
+    return SmiTag(ChangeInt32ToInt64(value));
+  }
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Node* pair = Int32AddWithOverflow(value, value);
+  Node* overflow = Projection(1, pair);
+  Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
+      if_join(this);
+  Branch(overflow, &if_overflow, &if_notoverflow);
+  Bind(&if_overflow);
+  {
+    Node* value64 = ChangeInt32ToFloat64(value);
+    Node* result = AllocateHeapNumberWithValue(value64);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_notoverflow);
+  {
+    Node* result = Projection(0, pair);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) {
+  Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
+      if_join(this);
+  Variable var_result(this, MachineRepresentation::kTagged);
+  // If {value} > 2^31 - 1, we need to store it in a HeapNumber.
+  Branch(Int32LessThan(value, Int32Constant(0)), &if_overflow,
+         &if_not_overflow);
+  Bind(&if_not_overflow);
+  {
+    if (Is64()) {
+      var_result.Bind(SmiTag(ChangeUint32ToUint64(value)));
+    } else {
+      // If tagging {value} results in an overflow, we need to use a HeapNumber
+      // to represent it.
+      Node* pair = Int32AddWithOverflow(value, value);
+      Node* overflow = Projection(1, pair);
+      GotoIf(overflow, &if_overflow);
+
+      Node* result = Projection(0, pair);
+      var_result.Bind(result);
+    }
+  }
+  Goto(&if_join);
+
+  Bind(&if_overflow);
+  {
+    Node* float64_value = ChangeUint32ToFloat64(value);
+    var_result.Bind(AllocateHeapNumberWithValue(float64_value));
+  }
+  Goto(&if_join);
+
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
+                                      char const* method_name) {
+  Variable var_value(this, MachineRepresentation::kTagged);
+  var_value.Bind(value);
+
+  // Check if the {value} is a Smi or a HeapObject.
+  Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
+      if_valueisstring(this);
+  Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+  Bind(&if_valueisnotsmi);
+  {
+    // Load the instance type of the {value}.
+    Node* value_instance_type = LoadInstanceType(value);
+
+    // Check if the {value} is already String.
+    Label if_valueisnotstring(this, Label::kDeferred);
+    Branch(
+        Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
+        &if_valueisstring, &if_valueisnotstring);
+    Bind(&if_valueisnotstring);
+    {
+      // Check if the {value} is null.
+      Label if_valueisnullorundefined(this, Label::kDeferred),
+          if_valueisnotnullorundefined(this, Label::kDeferred),
+          if_valueisnotnull(this, Label::kDeferred);
+      Branch(WordEqual(value, NullConstant()), &if_valueisnullorundefined,
+             &if_valueisnotnull);
+      Bind(&if_valueisnotnull);
+      {
+        // Check if the {value} is undefined.
+        Branch(WordEqual(value, UndefinedConstant()),
+               &if_valueisnullorundefined, &if_valueisnotnullorundefined);
+        Bind(&if_valueisnotnullorundefined);
+        {
+          // Convert the {value} to a String.
+          Callable callable = CodeFactory::ToString(isolate());
+          var_value.Bind(CallStub(callable, context, value));
+          Goto(&if_valueisstring);
+        }
+      }
+
+      Bind(&if_valueisnullorundefined);
+      {
+        // The {value} is either null or undefined.
+        CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
+                    HeapConstant(factory()->NewStringFromAsciiChecked(
+                        method_name, TENURED)));
+        Goto(&if_valueisstring);  // Never reached.
+      }
+    }
+  }
+  Bind(&if_valueissmi);
+  {
+    // The {value} is a Smi, convert it to a String.
+    Callable callable = CodeFactory::NumberToString(isolate());
+    var_value.Bind(CallStub(callable, context, value));
+    Goto(&if_valueisstring);
+  }
+  Bind(&if_valueisstring);
+  return var_value.value();
+}
+
+Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+  // Translate the {index} into a Word.
+  index = SmiToWord(index);
+
+  // We may need to loop in case of cons or sliced strings.
+  Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_result(this, MachineRepresentation::kWord32);
+  Variable var_string(this, MachineRepresentation::kTagged);
+  Variable* loop_vars[] = {&var_index, &var_string};
+  Label done_loop(this, &var_result), loop(this, 2, loop_vars);
+  var_string.Bind(string);
+  var_index.Bind(index);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {index}.
+    index = var_index.value();
+
+    // Load the current {string}.
+    string = var_string.value();
+
+    // Load the instance type of the {string}.
+    Node* string_instance_type = LoadInstanceType(string);
+
+    // Check if the {string} is a SeqString.
+    Label if_stringissequential(this), if_stringisnotsequential(this);
+    Branch(Word32Equal(Word32And(string_instance_type,
+                                 Int32Constant(kStringRepresentationMask)),
+                       Int32Constant(kSeqStringTag)),
+           &if_stringissequential, &if_stringisnotsequential);
+
+    Bind(&if_stringissequential);
+    {
+      // Check if the {string} is a TwoByteSeqString or a OneByteSeqString.
+      Label if_stringistwobyte(this), if_stringisonebyte(this);
+      Branch(Word32Equal(Word32And(string_instance_type,
+                                   Int32Constant(kStringEncodingMask)),
+                         Int32Constant(kTwoByteStringTag)),
+             &if_stringistwobyte, &if_stringisonebyte);
+
+      Bind(&if_stringisonebyte);
+      {
+        var_result.Bind(
+            Load(MachineType::Uint8(), string,
+                 IntPtrAdd(index, IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                                 kHeapObjectTag))));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_stringistwobyte);
+      {
+        var_result.Bind(
+            Load(MachineType::Uint16(), string,
+                 IntPtrAdd(WordShl(index, IntPtrConstant(1)),
+                           IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                          kHeapObjectTag))));
+        Goto(&done_loop);
+      }
+    }
+
+    Bind(&if_stringisnotsequential);
+    {
+      // Check if the {string} is a ConsString.
+      Label if_stringiscons(this), if_stringisnotcons(this);
+      Branch(Word32Equal(Word32And(string_instance_type,
+                                   Int32Constant(kStringRepresentationMask)),
+                         Int32Constant(kConsStringTag)),
+             &if_stringiscons, &if_stringisnotcons);
+
+      Bind(&if_stringiscons);
+      {
+        // Check whether the right hand side is the empty string (i.e. if
+        // this is really a flat string in a cons string). If that is not
+        // the case we flatten the string first.
+        Label if_rhsisempty(this), if_rhsisnotempty(this, Label::kDeferred);
+        Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+        Branch(WordEqual(rhs, EmptyStringConstant()), &if_rhsisempty,
+               &if_rhsisnotempty);
+
+        Bind(&if_rhsisempty);
+        {
+          // Just operate on the left hand side of the {string}.
+          var_string.Bind(LoadObjectField(string, ConsString::kFirstOffset));
+          Goto(&loop);
+        }
+
+        Bind(&if_rhsisnotempty);
+        {
+          // Flatten the {string} and lookup in the resulting string.
+          var_string.Bind(CallRuntime(Runtime::kFlattenString,
+                                      NoContextConstant(), string));
+          Goto(&loop);
+        }
+      }
+
+      Bind(&if_stringisnotcons);
+      {
+        // Check if the {string} is an ExternalString.
+        Label if_stringisexternal(this), if_stringisnotexternal(this);
+        Branch(Word32Equal(Word32And(string_instance_type,
+                                     Int32Constant(kStringRepresentationMask)),
+                           Int32Constant(kExternalStringTag)),
+               &if_stringisexternal, &if_stringisnotexternal);
+
+        Bind(&if_stringisexternal);
+        {
+          // Check if the {string} is a short external string.
+          Label if_stringisshort(this),
+              if_stringisnotshort(this, Label::kDeferred);
+          Branch(Word32Equal(Word32And(string_instance_type,
+                                       Int32Constant(kShortExternalStringMask)),
+                             Int32Constant(0)),
+                 &if_stringisshort, &if_stringisnotshort);
+
+          Bind(&if_stringisshort);
+          {
+            // Load the actual resource data from the {string}.
+            Node* string_resource_data =
+                LoadObjectField(string, ExternalString::kResourceDataOffset,
+                                MachineType::Pointer());
+
+            // Check if the {string} is a TwoByteExternalString or a
+            // OneByteExternalString.
+            Label if_stringistwobyte(this), if_stringisonebyte(this);
+            Branch(Word32Equal(Word32And(string_instance_type,
+                                         Int32Constant(kStringEncodingMask)),
+                               Int32Constant(kTwoByteStringTag)),
+                   &if_stringistwobyte, &if_stringisonebyte);
+
+            Bind(&if_stringisonebyte);
+            {
+              var_result.Bind(
+                  Load(MachineType::Uint8(), string_resource_data, index));
+              Goto(&done_loop);
+            }
+
+            Bind(&if_stringistwobyte);
+            {
+              var_result.Bind(Load(MachineType::Uint16(), string_resource_data,
+                                   WordShl(index, IntPtrConstant(1))));
+              Goto(&done_loop);
+            }
+          }
+
+          Bind(&if_stringisnotshort);
+          {
+            // The {string} might be compressed, call the runtime.
+            var_result.Bind(SmiToWord32(
+                CallRuntime(Runtime::kExternalStringGetChar,
+                            NoContextConstant(), string, SmiTag(index))));
+            Goto(&done_loop);
+          }
+        }
+
+        Bind(&if_stringisnotexternal);
+        {
+          // The {string} is a SlicedString, continue with its parent.
+          Node* string_offset =
+              SmiToWord(LoadObjectField(string, SlicedString::kOffsetOffset));
+          Node* string_parent =
+              LoadObjectField(string, SlicedString::kParentOffset);
+          var_index.Bind(IntPtrAdd(index, string_offset));
+          var_string.Bind(string_parent);
+          Goto(&loop);
+        }
+      }
+    }
+  }
+
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::StringFromCharCode(Node* code) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  // Check if the {code} is a one-byte char code.
+  Label if_codeisonebyte(this), if_codeistwobyte(this, Label::kDeferred),
+      if_done(this);
+  Branch(Int32LessThanOrEqual(code, Int32Constant(String::kMaxOneByteCharCode)),
+         &if_codeisonebyte, &if_codeistwobyte);
+  Bind(&if_codeisonebyte);
+  {
+    // Load the isolate wide single character string cache.
+    Node* cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex);
+
+    // Check if we have an entry for the {code} in the single character string
+    // cache already.
+    Label if_entryisundefined(this, Label::kDeferred),
+        if_entryisnotundefined(this);
+    Node* entry = LoadFixedArrayElement(cache, code);
+    Branch(WordEqual(entry, UndefinedConstant()), &if_entryisundefined,
+           &if_entryisnotundefined);
+
+    Bind(&if_entryisundefined);
+    {
+      // Allocate a new SeqOneByteString for {code} and store it in the {cache}.
+      Node* result = AllocateSeqOneByteString(1);
+      StoreNoWriteBarrier(
+          MachineRepresentation::kWord8, result,
+          IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag), code);
+      StoreFixedArrayElement(cache, code, result);
+      var_result.Bind(result);
+      Goto(&if_done);
+    }
+
+    Bind(&if_entryisnotundefined);
+    {
+      // Return the entry from the {cache}.
+      var_result.Bind(entry);
+      Goto(&if_done);
+    }
+  }
+
+  Bind(&if_codeistwobyte);
+  {
+    // Allocate a new SeqTwoByteString for {code}.
+    Node* result = AllocateSeqTwoByteString(1);
+    StoreNoWriteBarrier(
+        MachineRepresentation::kWord16, result,
+        IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), code);
+    var_result.Bind(result);
+    Goto(&if_done);
+  }
+
+  Bind(&if_done);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
+                                        uint32_t mask) {
+  return Word32Shr(Word32And(word32, Int32Constant(mask)),
+                   Int32Constant(shift));
+}
+
+void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
+                                  Variable* var_index, Label* if_keyisunique,
+                                  Label* call_runtime) {
+  DCHECK_EQ(MachineRepresentation::kWord32, var_index->rep());
+
+  Label if_keyissmi(this), if_keyisnotsmi(this);
+  Branch(WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
+  Bind(&if_keyissmi);
+  {
+    // Negative smi keys are named properties. Handle in the runtime.
+    Label if_keyispositive(this);
+    Branch(WordIsPositiveSmi(key), &if_keyispositive, call_runtime);
+    Bind(&if_keyispositive);
+
+    var_index->Bind(SmiToWord32(key));
+    Goto(if_keyisindex);
+  }
+
+  Bind(&if_keyisnotsmi);
+
+  Node* key_instance_type = LoadInstanceType(key);
+  Label if_keyisnotsymbol(this);
+  Branch(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
+         if_keyisunique, &if_keyisnotsymbol);
+  Bind(&if_keyisnotsymbol);
+  {
+    Label if_keyisinternalized(this);
+    Node* bits =
+        WordAnd(key_instance_type,
+                Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
+    Branch(Word32Equal(bits, Int32Constant(kStringTag | kInternalizedTag)),
+           &if_keyisinternalized, call_runtime);
+    Bind(&if_keyisinternalized);
+
+    // Check whether the key is an array index passed in as string. Handle
+    // uniform with smi keys if so.
+    // TODO(verwaest): Also support non-internalized strings.
+    Node* hash = LoadNameHash(key);
+    Node* bit =
+        Word32And(hash, Int32Constant(internal::Name::kIsNotArrayIndexMask));
+    Label if_isarrayindex(this);
+    Branch(Word32Equal(bit, Int32Constant(0)), &if_isarrayindex,
+           if_keyisunique);
+    Bind(&if_isarrayindex);
+    var_index->Bind(BitFieldDecode<internal::Name::ArrayIndexValueBits>(hash));
+    Goto(if_keyisindex);
+  }
+}
+
+void CodeStubAssembler::TryLookupProperty(Node* object, Node* map,
+                                          Node* instance_type, Node* name,
+                                          Label* if_found, Label* if_not_found,
+                                          Label* call_runtime) {
+  {
+    Label if_objectissimple(this);
+    Branch(Int32LessThanOrEqual(instance_type,
+                                Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+           call_runtime, &if_objectissimple);
+    Bind(&if_objectissimple);
+  }
+
+  // TODO(verwaest): Perform a dictonary lookup on slow-mode receivers.
+  Node* bit_field3 = LoadMapBitField3(map);
+  Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
+  Label if_isfastmap(this);
+  Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, call_runtime);
+  Bind(&if_isfastmap);
+  Node* nof = BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+  // Bail out to the runtime for large numbers of own descriptors. The stub only
+  // does linear search, which becomes too expensive in that case.
+  {
+    static const int32_t kMaxLinear = 210;
+    Label above_max(this), below_max(this);
+    Branch(Int32LessThanOrEqual(nof, Int32Constant(kMaxLinear)), &below_max,
+           call_runtime);
+    Bind(&below_max);
+  }
+  Node* descriptors = LoadMapDescriptors(map);
+
+  Variable var_descriptor(this, MachineRepresentation::kWord32);
+  Label loop(this, &var_descriptor);
+  var_descriptor.Bind(Int32Constant(0));
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* index = var_descriptor.value();
+    Node* offset = Int32Constant(DescriptorArray::ToKeyIndex(0));
+    Node* factor = Int32Constant(DescriptorArray::kDescriptorSize);
+    Label if_notdone(this);
+    Branch(Word32Equal(index, nof), if_not_found, &if_notdone);
+    Bind(&if_notdone);
+    {
+      Node* array_index = Int32Add(offset, Int32Mul(index, factor));
+      Node* current = LoadFixedArrayElement(descriptors, array_index);
+      Label if_unequal(this);
+      Branch(WordEqual(current, name), if_found, &if_unequal);
+      Bind(&if_unequal);
+
+      var_descriptor.Bind(Int32Add(index, Int32Constant(1)));
+      Goto(&loop);
+    }
+  }
+}
+
+void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
+                                         Node* instance_type, Node* index,
+                                         Label* if_found, Label* if_not_found,
+                                         Label* call_runtime) {
+  {
+    Label if_objectissimple(this);
+    Branch(Int32LessThanOrEqual(instance_type,
+                                Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+           call_runtime, &if_objectissimple);
+    Bind(&if_objectissimple);
+  }
+
+  Node* bit_field2 = LoadMapBitField2(map);
+  Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+
+  // TODO(verwaest): Support other elements kinds as well.
+  Label if_isobjectorsmi(this);
+  Branch(
+      Int32LessThanOrEqual(elements_kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
+      &if_isobjectorsmi, call_runtime);
+  Bind(&if_isobjectorsmi);
+  {
+    Node* elements = LoadElements(object);
+    Node* length = LoadFixedArrayBaseLength(elements);
+
+    Label if_iskeyinrange(this);
+    Branch(Int32LessThan(index, SmiToWord32(length)), &if_iskeyinrange,
+           if_not_found);
+
+    Bind(&if_iskeyinrange);
+    Node* element = LoadFixedArrayElement(elements, index);
+    Node* the_hole = LoadRoot(Heap::kTheHoleValueRootIndex);
+    Branch(WordEqual(element, the_hole), if_not_found, if_found);
+  }
+}
+
+Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
+                                             Node* object) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label return_false(this), return_true(this),
+      return_runtime(this, Label::kDeferred), return_result(this);
+
+  // Goto runtime if {object} is a Smi.
+  GotoIf(WordIsSmi(object), &return_runtime);
+
+  // Load map of {object}.
+  Node* object_map = LoadMap(object);
+
+  // Lookup the {callable} and {object} map in the global instanceof cache.
+  // Note: This is safe because we clear the global instanceof cache whenever
+  // we change the prototype of any object.
+  Node* instanceof_cache_function =
+      LoadRoot(Heap::kInstanceofCacheFunctionRootIndex);
+  Node* instanceof_cache_map = LoadRoot(Heap::kInstanceofCacheMapRootIndex);
+  {
+    Label instanceof_cache_miss(this);
+    GotoUnless(WordEqual(instanceof_cache_function, callable),
+               &instanceof_cache_miss);
+    GotoUnless(WordEqual(instanceof_cache_map, object_map),
+               &instanceof_cache_miss);
+    var_result.Bind(LoadRoot(Heap::kInstanceofCacheAnswerRootIndex));
+    Goto(&return_result);
+    Bind(&instanceof_cache_miss);
+  }
+
+  // Goto runtime if {callable} is a Smi.
+  GotoIf(WordIsSmi(callable), &return_runtime);
+
+  // Load map of {callable}.
+  Node* callable_map = LoadMap(callable);
+
+  // Goto runtime if {callable} is not a JSFunction.
+  Node* callable_instance_type = LoadMapInstanceType(callable_map);
+  GotoUnless(
+      Word32Equal(callable_instance_type, Int32Constant(JS_FUNCTION_TYPE)),
+      &return_runtime);
+
+  // Goto runtime if {callable} is not a constructor or has
+  // a non-instance "prototype".
+  Node* callable_bitfield = LoadMapBitField(callable_map);
+  GotoUnless(
+      Word32Equal(Word32And(callable_bitfield,
+                            Int32Constant((1 << Map::kHasNonInstancePrototype) |
+                                          (1 << Map::kIsConstructor))),
+                  Int32Constant(1 << Map::kIsConstructor)),
+      &return_runtime);
+
+  // Get the "prototype" (or initial map) of the {callable}.
+  Node* callable_prototype =
+      LoadObjectField(callable, JSFunction::kPrototypeOrInitialMapOffset);
+  {
+    Variable var_callable_prototype(this, MachineRepresentation::kTagged);
+    Label callable_prototype_valid(this);
+    var_callable_prototype.Bind(callable_prototype);
+
+    // Resolve the "prototype" if the {callable} has an initial map.  Afterwards
+    // the {callable_prototype} will be either the JSReceiver prototype object
+    // or the hole value, which means that no instances of the {callable} were
+    // created so far and hence we should return false.
+    Node* callable_prototype_instance_type =
+        LoadInstanceType(callable_prototype);
+    GotoUnless(
+        Word32Equal(callable_prototype_instance_type, Int32Constant(MAP_TYPE)),
+        &callable_prototype_valid);
+    var_callable_prototype.Bind(
+        LoadObjectField(callable_prototype, Map::kPrototypeOffset));
+    Goto(&callable_prototype_valid);
+    Bind(&callable_prototype_valid);
+    callable_prototype = var_callable_prototype.value();
+  }
+
+  // Update the global instanceof cache with the current {object} map and
+  // {callable}.  The cached answer will be set when it is known below.
+  StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, callable);
+  StoreRoot(Heap::kInstanceofCacheMapRootIndex, object_map);
+
+  // Loop through the prototype chain looking for the {callable} prototype.
+  Variable var_object_map(this, MachineRepresentation::kTagged);
+  var_object_map.Bind(object_map);
+  Label loop(this, &var_object_map);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* object_map = var_object_map.value();
+
+    // Check if the current {object} needs to be access checked.
+    Node* object_bitfield = LoadMapBitField(object_map);
+    GotoUnless(
+        Word32Equal(Word32And(object_bitfield,
+                              Int32Constant(1 << Map::kIsAccessCheckNeeded)),
+                    Int32Constant(0)),
+        &return_runtime);
+
+    // Check if the current {object} is a proxy.
+    Node* object_instance_type = LoadMapInstanceType(object_map);
+    GotoIf(Word32Equal(object_instance_type, Int32Constant(JS_PROXY_TYPE)),
+           &return_runtime);
+
+    // Check the current {object} prototype.
+    Node* object_prototype = LoadMapPrototype(object_map);
+    GotoIf(WordEqual(object_prototype, callable_prototype), &return_true);
+    GotoIf(WordEqual(object_prototype, NullConstant()), &return_false);
+
+    // Continue with the prototype.
+    var_object_map.Bind(LoadMap(object_prototype));
+    Goto(&loop);
+  }
+
+  Bind(&return_true);
+  StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(true));
+  var_result.Bind(BooleanConstant(true));
+  Goto(&return_result);
+
+  Bind(&return_false);
+  StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(false));
+  var_result.Bind(BooleanConstant(false));
+  Goto(&return_result);
+
+  Bind(&return_runtime);
+  {
+    // Invalidate the global instanceof cache.
+    StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, SmiConstant(0));
+    // Fallback to the runtime implementation.
+    var_result.Bind(
+        CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object));
+  }
+  Goto(&return_result);
+
+  Bind(&return_result);
+  return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
+                                                          ElementsKind kind,
+                                                          ParameterMode mode,
+                                                          int base_size) {
+  bool is_double = IsFastDoubleElementsKind(kind);
+  int element_size_shift = is_double ? kDoubleSizeLog2 : kPointerSizeLog2;
+  int element_size = 1 << element_size_shift;
+  int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+  int32_t index = 0;
+  bool constant_index = false;
+  if (mode == SMI_PARAMETERS) {
+    element_size_shift -= kSmiShiftBits;
+    intptr_t temp = 0;
+    constant_index = ToIntPtrConstant(index_node, temp);
+    index = temp >> kSmiShiftBits;
+  } else {
+    constant_index = ToInt32Constant(index_node, index);
+  }
+  if (constant_index) {
+    return IntPtrConstant(base_size + element_size * index);
+  }
+  if (Is64() && mode == INTEGER_PARAMETERS) {
+    index_node = ChangeInt32ToInt64(index_node);
+  }
+  if (base_size == 0) {
+    return (element_size_shift >= 0)
+               ? WordShl(index_node, IntPtrConstant(element_size_shift))
+               : WordShr(index_node, IntPtrConstant(-element_size_shift));
+  }
+  return IntPtrAdd(
+      IntPtrConstant(base_size),
+      (element_size_shift >= 0)
+          ? WordShl(index_node, IntPtrConstant(element_size_shift))
+          : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+}
+
+}  // namespace internal
+}  // namespace v8