Upgrade to 3.29

Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.

Bug: 17370214

Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/lithium.cc b/src/lithium.cc
index aefd8b6..7d992a1 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -1,32 +1,38 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
 
-#include "v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
+
+#include "src/v8.h"
+
+#include "src/scopes.h"
+#include "src/serialize.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/lithium-ia32.h"  // NOLINT
+#include "src/ia32/lithium-codegen-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/lithium-x64.h"  // NOLINT
+#include "src/x64/lithium-codegen-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/lithium-arm.h"  // NOLINT
+#include "src/arm/lithium-codegen-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/lithium-mips.h"  // NOLINT
+#include "src/mips/lithium-codegen-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"  // NOLINT
+#include "src/arm64/lithium-codegen-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h"  // NOLINT
+#include "src/mips64/lithium-codegen-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"  // NOLINT
+#include "src/x87/lithium-codegen-x87.h"  // NOLINT
+#else
+#error "Unknown architecture."
+#endif
 
 namespace v8 {
 namespace internal {
@@ -41,27 +47,43 @@
     case UNALLOCATED:
       unalloc = LUnallocated::cast(this);
       stream->Add("v%d", unalloc->virtual_register());
-      switch (unalloc->policy()) {
+      if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
+        stream->Add("(=%dS)", unalloc->fixed_slot_index());
+        break;
+      }
+      switch (unalloc->extended_policy()) {
         case LUnallocated::NONE:
           break;
         case LUnallocated::FIXED_REGISTER: {
-          const char* register_name =
-              Register::AllocationIndexToString(unalloc->fixed_index());
-          stream->Add("(=%s)", register_name);
+          int reg_index = unalloc->fixed_register_index();
+          if (reg_index < 0 ||
+              reg_index >= Register::kMaxNumAllocatableRegisters) {
+            stream->Add("(=invalid_reg#%d)", reg_index);
+          } else {
+            const char* register_name =
+                Register::AllocationIndexToString(reg_index);
+            stream->Add("(=%s)", register_name);
+          }
           break;
         }
         case LUnallocated::FIXED_DOUBLE_REGISTER: {
-          const char* double_register_name =
-              DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
-          stream->Add("(=%s)", double_register_name);
+          int reg_index = unalloc->fixed_register_index();
+          if (reg_index < 0 ||
+              reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+            stream->Add("(=invalid_double_reg#%d)", reg_index);
+          } else {
+            const char* double_register_name =
+                DoubleRegister::AllocationIndexToString(reg_index);
+            stream->Add("(=%s)", double_register_name);
+          }
           break;
         }
-        case LUnallocated::FIXED_SLOT:
-          stream->Add("(=%dS)", unalloc->fixed_index());
-          break;
         case LUnallocated::MUST_HAVE_REGISTER:
           stream->Add("(R)");
           break;
+        case LUnallocated::MUST_HAVE_DOUBLE_REGISTER:
+          stream->Add("(D)");
+          break;
         case LUnallocated::WRITABLE_REGISTER:
           stream->Add("(WR)");
           break;
@@ -82,44 +104,66 @@
     case DOUBLE_STACK_SLOT:
       stream->Add("[double_stack:%d]", index());
       break;
-    case REGISTER:
-      stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
+    case REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 || reg_index >= Register::kMaxNumAllocatableRegisters) {
+        stream->Add("(=invalid_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]", Register::AllocationIndexToString(reg_index));
+      }
       break;
-    case DOUBLE_REGISTER:
-      stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
+    }
+    case DOUBLE_REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 ||
+          reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+        stream->Add("(=invalid_double_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]",
+                    DoubleRegister::AllocationIndexToString(reg_index));
+      }
       break;
-    case ARGUMENT:
-      stream->Add("[arg:%d]", index());
-      break;
+    }
   }
 }
 
-#define DEFINE_OPERAND_CACHE(name, type)                      \
-  name* name::cache = NULL;                                   \
-  void name::SetUpCache() {                                   \
-    if (cache) return;                                        \
-    cache = new name[kNumCachedOperands];                     \
-    for (int i = 0; i < kNumCachedOperands; i++) {            \
-      cache[i].ConvertTo(type, i);                            \
-    }                                                         \
-  }                                                           \
 
-DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
-DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
-DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
-DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
-DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+LSubKindOperand<kOperandKind, kNumCachedOperands>*
+LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
 
-#undef DEFINE_OPERAND_CACHE
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+  if (cache) return;
+  cache = new LSubKindOperand[kNumCachedOperands];
+  for (int i = 0; i < kNumCachedOperands; i++) {
+    cache[i].ConvertTo(kOperandKind, i);
+  }
+}
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+  delete[] cache;
+  cache = NULL;
+}
+
 
 void LOperand::SetUpCaches() {
-  LConstantOperand::SetUpCache();
-  LStackSlot::SetUpCache();
-  LDoubleStackSlot::SetUpCache();
-  LRegister::SetUpCache();
-  LDoubleRegister::SetUpCache();
+#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
+#undef LITHIUM_OPERAND_SETUP
 }
 
+
+void LOperand::TearDownCaches() {
+#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
+#undef LITHIUM_OPERAND_TEARDOWN
+}
+
+
 bool LParallelMove::IsRedundant() const {
   for (int i = 0; i < move_operands_.length(); ++i) {
     if (!move_operands_[i].IsRedundant()) return false;
@@ -150,9 +194,12 @@
 
 
 void LEnvironment::PrintTo(StringStream* stream) {
-  stream->Add("[id=%d|", ast_id());
-  stream->Add("[parameters=%d|", parameter_count());
-  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+  stream->Add("[id=%d|", ast_id().ToInt());
+  if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+    stream->Add("deopt_id=%d|", deoptimization_index());
+  }
+  stream->Add("parameters=%d|", parameter_count());
+  stream->Add("arguments_stack_height=%d|", arguments_stack_height());
   for (int i = 0; i < values_.length(); ++i) {
     if (i != 0) stream->Add(";");
     if (values_[i] == NULL) {
@@ -165,18 +212,18 @@
 }
 
 
-void LPointerMap::RecordPointer(LOperand* op) {
+void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
   // Do not record arguments as pointers.
   if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
-  pointer_operands_.Add(op);
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op, zone);
 }
 
 
 void LPointerMap::RemovePointer(LOperand* op) {
   // Do not record arguments as pointers.
   if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
   for (int i = 0; i < pointer_operands_.length(); ++i) {
     if (pointer_operands_[i]->Equals(op)) {
       pointer_operands_.Remove(i);
@@ -186,11 +233,11 @@
 }
 
 
-void LPointerMap::RecordUntagged(LOperand* op) {
+void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
   // Do not record arguments as pointers.
   if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
-  untagged_operands_.Add(op);
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op, zone);
 }
 
 
@@ -200,34 +247,424 @@
     if (i != 0) stream->Add(";");
     pointer_operands_[i]->PrintTo(stream);
   }
-  stream->Add("} @%d", position());
+  stream->Add("}");
 }
 
 
-int ElementsKindToShiftSize(ElementsKind elements_kind) {
-  switch (elements_kind) {
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      return 0;
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      return 1;
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-      return 2;
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case FAST_DOUBLE_ELEMENTS:
-      return 3;
-    case FAST_SMI_ONLY_ELEMENTS:
-    case FAST_ELEMENTS:
-    case DICTIONARY_ELEMENTS:
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      return kPointerSizeLog2;
+int StackSlotOffset(int index) {
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return -(index + 1) * kPointerSize -
+        StandardFrameConstants::kFixedFrameSizeFromFp;
+  } else {
+    // Incoming parameter. Skip the return address.
+    return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
   }
-  UNREACHABLE();
-  return 0;
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+    : spill_slot_count_(0),
+      info_(info),
+      graph_(graph),
+      instructions_(32, info->zone()),
+      pointer_maps_(8, info->zone()),
+      inlined_closures_(1, info->zone()),
+      deprecation_dependencies_(MapLess(), MapAllocator(info->zone())),
+      stability_dependencies_(MapLess(), MapAllocator(info->zone())) {}
+
+
+LLabel* LChunk::GetLabel(int block_id) const {
+  HBasicBlock* block = graph_->blocks()->at(block_id);
+  int first_instruction = block->first_instruction_index();
+  return LLabel::cast(instructions_[first_instruction]);
+}
+
+
+int LChunk::LookupDestination(int block_id) const {
+  LLabel* cur = GetLabel(block_id);
+  while (cur->replacement() != NULL) {
+    cur = cur->replacement();
+  }
+  return cur->block_id();
+}
+
+Label* LChunk::GetAssemblyLabel(int block_id) const {
+  LLabel* label = GetLabel(block_id);
+  DCHECK(!label->HasReplacement());
+  return label->label();
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  LPhase phase("L_Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LInstructionGap* gap = new (zone()) LInstructionGap(block);
+  gap->set_hydrogen_value(instr->hydrogen_value());
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap, zone());
+    index = instructions_.length();
+    instructions_.Add(instr, zone());
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr, zone());
+    instructions_.Add(gap, zone());
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map(), zone());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id(), zone());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - info()->num_parameters() - 1;
+
+  DCHECK(result < 0);
+  return result;
+}
+
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  DCHECK(-1 <= index);  // -1 is the receiver.
+  return (1 + info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(
+      LGap::START, zone())->AddMove(from, to, zone());
+}
+
+
+HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()));
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+void LChunk::CommitDependencies(Handle<Code> code) const {
+  for (MapSet::const_iterator it = deprecation_dependencies_.begin(),
+       iend = deprecation_dependencies_.end(); it != iend; ++it) {
+    Handle<Map> map = *it;
+    DCHECK(!map->is_deprecated());
+    DCHECK(map->CanBeDeprecated());
+    Map::AddDependentCode(map, DependentCode::kTransitionGroup, code);
+  }
+
+  for (MapSet::const_iterator it = stability_dependencies_.begin(),
+       iend = stability_dependencies_.end(); it != iend; ++it) {
+    Handle<Map> map = *it;
+    DCHECK(map->is_stable());
+    DCHECK(map->CanTransition());
+    Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code);
+  }
+
+  info_->CommitDependencies(code);
+}
+
+
+LChunk* LChunk::NewChunk(HGraph* graph) {
+  DisallowHandleAllocation no_handles;
+  DisallowHeapAllocation no_gc;
+  graph->DisallowAddingNewValues();
+  int values = graph->GetMaximumValueID();
+  CompilationInfo* info = graph->info();
+  if (values > LUnallocated::kMaxVirtualRegisters) {
+    info->AbortOptimization(kNotEnoughVirtualRegistersForValues);
+    return NULL;
+  }
+  LAllocator allocator(values, graph);
+  LChunkBuilder builder(info, graph, &allocator);
+  LChunk* chunk = builder.Build();
+  if (chunk == NULL) return NULL;
+
+  if (!allocator.Allocate(chunk)) {
+    info->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+    return NULL;
+  }
+
+  chunk->set_allocated_double_registers(
+      allocator.assigned_double_registers());
+
+  return chunk;
+}
+
+
+Handle<Code> LChunk::Codegen() {
+  MacroAssembler assembler(info()->isolate(), NULL, 0);
+  LOG_CODE_EVENT(info()->isolate(),
+                 CodeStartLinePosInfoRecordEvent(
+                     assembler.positions_recorder()));
+  // TODO(yangguo) remove this once the code serializer handles code stubs.
+  if (info()->will_serialize()) assembler.enable_serializer();
+  LCodeGen generator(this, &assembler, info());
+
+  MarkEmptyBlocks();
+
+  if (generator.GenerateCode()) {
+    generator.CheckEnvironmentUsage();
+    CodeGenerator::MakeCodePrologue(info(), "optimized");
+    Code::Flags flags = info()->flags();
+    Handle<Code> code =
+        CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
+    generator.FinishCode(code);
+    CommitDependencies(code);
+    code->set_is_crankshafted(true);
+    void* jit_handler_data =
+        assembler.positions_recorder()->DetachJITHandlerData();
+    LOG_CODE_EVENT(info()->isolate(),
+                   CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
+
+    CodeGenerator::PrintCode(code, info());
+    DCHECK(!(info()->isolate()->serializer_enabled() &&
+             info()->GetMustNotHaveEagerFrame() &&
+             generator.NeedsEagerFrame()));
+    return code;
+  }
+  assembler.AbortedCodeGeneration();
+  return Handle<Code>::null();
+}
+
+
+void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
+  allocated_double_registers_ = allocated_registers;
+  BitVector* doubles = allocated_double_registers();
+  BitVector::Iterator iterator(doubles);
+  while (!iterator.Done()) {
+    if (info()->saves_caller_doubles()) {
+      if (kDoubleSize == kPointerSize * 2) {
+        spill_slot_count_ += 2;
+      } else {
+        spill_slot_count_++;
+      }
+    }
+    iterator.Advance();
+  }
+}
+
+
+void LChunkBuilderBase::Abort(BailoutReason reason) {
+  info()->AbortOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+void LChunkBuilderBase::Retry(BailoutReason reason) {
+  info()->RetryOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+LEnvironment* LChunkBuilderBase::CreateEnvironment(
+    HEnvironment* hydrogen_env, int* argument_index_accumulator,
+    ZoneList<HValue*>* objects_to_materialize) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
+                        objects_to_materialize);
+  BailoutId ast_id = hydrogen_env->ast_id();
+  DCHECK(!ast_id.IsNone() ||
+         hydrogen_env->frame_type() != JS_FUNCTION);
+
+  int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
+                          ? 0
+                          : hydrogen_env->specials_count();
+
+  int value_count = hydrogen_env->length() - omitted_count;
+  LEnvironment* result =
+      new(zone()) LEnvironment(hydrogen_env->closure(),
+                               hydrogen_env->frame_type(),
+                               ast_id,
+                               hydrogen_env->parameter_count(),
+                               argument_count_,
+                               value_count,
+                               outer,
+                               hydrogen_env->entry(),
+                               zone());
+  int argument_index = *argument_index_accumulator;
+
+  // Store the environment description into the environment
+  // (with holes for nested objects)
+  for (int i = 0; i < hydrogen_env->length(); ++i) {
+    if (hydrogen_env->is_special_index(i) &&
+        hydrogen_env->frame_type() != JS_FUNCTION) {
+      continue;
+    }
+    LOperand* op;
+    HValue* value = hydrogen_env->values()->at(i);
+    CHECK(!value->IsPushArguments());  // Do not deopt outgoing arguments
+    if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+      op = LEnvironment::materialization_marker();
+    } else {
+      op = UseAny(value);
+    }
+    result->AddValue(op,
+                     value->representation(),
+                     value->CheckFlag(HInstruction::kUint32));
+  }
+
+  // Recursively store the nested objects into the environment
+  for (int i = 0; i < hydrogen_env->length(); ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
+    HValue* value = hydrogen_env->values()->at(i);
+    if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+      AddObjectToMaterialize(value, objects_to_materialize, result);
+    }
+  }
+
+  if (hydrogen_env->frame_type() == JS_FUNCTION) {
+    *argument_index_accumulator = argument_index;
+  }
+
+  return result;
+}
+
+
+// Add an object to the supplied environment and object materialization list.
+//
+// Notes:
+//
+// We are building three lists here:
+//
+// 1. In the result->object_mapping_ list (added to by the
+//    LEnvironment::Add*Object methods), we store the lengths (number
+//    of fields) of the captured objects in depth-first traversal order, or
+//    in case of duplicated objects, we store the index to the duplicate object
+//    (with a tag to differentiate between captured and duplicated objects).
+//
+// 2. The object fields are stored in the result->values_ list
+//    (added to by the LEnvironment.AddValue method) sequentially as lists
+//    of fields with holes for nested objects (the holes will be expanded
+//    later by LCodegen::AddToTranslation according to the
+//    LEnvironment.object_mapping_ list).
+//
+// 3. The auxiliary objects_to_materialize array stores the hydrogen values
+//    in the same order as result->object_mapping_ list. This is used
+//    to detect duplicate values and calculate the corresponding object index.
+void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
+    ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
+  int object_index = objects_to_materialize->length();
+  // Store the hydrogen value into the de-duplication array
+  objects_to_materialize->Add(value, zone());
+  // Find out whether we are storing a duplicated value
+  int previously_materialized_object = -1;
+  for (int prev = 0; prev < object_index; ++prev) {
+    if (objects_to_materialize->at(prev) == value) {
+      previously_materialized_object = prev;
+      break;
+    }
+  }
+  // Store the captured object length (or duplicated object index)
+  // into the environment. For duplicated objects, we stop here.
+  int length = value->OperandCount();
+  bool is_arguments = value->IsArgumentsObject();
+  if (previously_materialized_object >= 0) {
+    result->AddDuplicateObject(previously_materialized_object);
+    return;
+  } else {
+    result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+  }
+  // Store the captured object's fields into the environment
+  for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+    LOperand* op;
+    HValue* arg_value = value->OperandAt(i);
+    if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+      // Insert a hole for nested objects
+      op = LEnvironment::materialization_marker();
+    } else {
+      DCHECK(!arg_value->IsPushArguments());
+      // For ordinary values, tell the register allocator we need the value
+      // to be alive here
+      op = UseAny(arg_value);
+    }
+    result->AddValue(op,
+                     arg_value->representation(),
+                     arg_value->CheckFlag(HInstruction::kUint32));
+  }
+  // Recursively store all the nested captured objects into the environment
+  for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+    HValue* arg_value = value->OperandAt(i);
+    if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+      AddObjectToMaterialize(arg_value, objects_to_materialize, result);
+    }
+  }
+}
+
+
+LPhase::~LPhase() {
+  if (ShouldProduceTraceOutput()) {
+    isolate()->GetHTracer()->TraceLithium(name(), chunk_);
+  }
 }