am 3795f251: Use non-PHONY dependency to avoid rebuilding art gtests repeatedly.

* commit '3795f2517f47702764f372086367c47d4cd776d9':
  Use non-PHONY dependency to avoid rebuilding art gtests repeatedly.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index faff498..8a9b5dd 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -57,6 +57,11 @@
 	compiler/elf_writer_test.cc \
 	compiler/jni/jni_compiler_test.cc
 
+ifeq ($(ART_SEA_IR_MODE),true)
+TEST_COMMON_SRC_FILES += \
+	compiler/utils/scoped_hashtable_test.cc
+endif
+
 TEST_TARGET_SRC_FILES := \
 	$(TEST_COMMON_SRC_FILES)
 
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 2471244..ea7b0b0 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -75,8 +75,8 @@
 include $(CLEAR_VARS)
 LOCAL_MODULE := core.art-host
 LOCAL_MODULE_TAGS := optional
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/build/Android.common.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/build/Android.oat.mk
+LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
+LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk
 LOCAL_ADDITIONAL_DEPENDENCIES += $(HOST_CORE_IMG_OUT)
 include $(BUILD_PHONY_PACKAGE)
 endif
@@ -107,8 +107,8 @@
 include $(CLEAR_VARS)
 LOCAL_MODULE := boot.art
 LOCAL_MODULE_TAGS := optional
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/build/Android.common.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/build/Android.oat.mk
+LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
+LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk
 LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_BOOT_IMG_OUT) $(TARGET_BOOT_OAT_OUT)
 include $(BUILD_PHONY_PACKAGE)
 endif
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c99d103..8388cfb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2268,7 +2268,7 @@
       CompilerFn compiler = compiler_;
 #ifdef ART_SEA_IR_MODE
       bool use_sea = Runtime::Current()->IsSeaIRMode();
-      use_sea &&= (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
+      use_sea = use_sea && (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
       if (use_sea) {
         compiler = sea_ir_compiler_;
       }
diff --git a/compiler/sea_ir/sea.cc b/compiler/sea_ir/sea.cc
index 95c36e5..c5ec2b9 100644
--- a/compiler/sea_ir/sea.cc
+++ b/compiler/sea_ir/sea.cc
@@ -17,6 +17,8 @@
 #include "sea.h"
 
 #include "file_output_stream.h"
+#include "instruction_tools.h"
+
 
 #define MAX_REACHING_DEF_ITERERATIONS (10)
 
@@ -31,6 +33,7 @@
 }
 
 void SeaGraph::DumpSea(std::string filename) const {
+  LOG(INFO) << "Starting to write SEA string to file.";
   std::string result;
   result += "digraph seaOfNodes {\n";
   for (std::vector<Region*>::const_iterator cit = regions_.begin(); cit != regions_.end(); cit++) {
@@ -48,6 +51,97 @@
   dst->AddPredecessor(src);
 }
 
+void SeaGraph::ComputeRPO(Region* current_region, int& current_rpo) {
+  current_region->SetRPO(VISITING);
+  std::vector<sea_ir::Region*>* succs = current_region->GetSuccessors();
+  for (std::vector<sea_ir::Region*>::iterator succ_it = succs->begin();
+      succ_it != succs->end(); ++succ_it) {
+    if (NOT_VISITED == (*succ_it)->GetRPO()) {
+      SeaGraph::ComputeRPO(*succ_it, current_rpo);
+    }
+  }
+  current_region->SetRPO(current_rpo--);
+}
+
+void SeaGraph::ComputeIDominators() {
+  bool changed = true;
+  while (changed) {
+    changed = false;
+    // Entry node has itself as IDOM.
+    std::vector<Region*>::iterator crt_it;
+    std::set<Region*> processedNodes;
+    // Find and mark the entry node(s).
+    for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
+      if ((*crt_it)->GetPredecessors()->size() == 0) {
+        processedNodes.insert(*crt_it);
+        (*crt_it)->SetIDominator(*crt_it);
+      }
+    }
+    for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
+      if ((*crt_it)->GetPredecessors()->size() == 0) {
+        continue;
+      }
+      // NewIDom = first (processed) predecessor of b.
+      Region* new_dom = NULL;
+      std::vector<Region*>* preds = (*crt_it)->GetPredecessors();
+      DCHECK(NULL != preds);
+      Region* root_pred = NULL;
+      for (std::vector<Region*>::iterator pred_it = preds->begin();
+          pred_it != preds->end(); ++pred_it) {
+        if (processedNodes.end() != processedNodes.find((*pred_it))) {
+          root_pred = *pred_it;
+          new_dom = root_pred;
+          break;
+        }
+      }
+      // For all other predecessors p of b, if idom is not set,
+      // then NewIdom = Intersect(p, NewIdom)
+      for (std::vector<Region*>::const_iterator pred_it = preds->begin();
+          pred_it != preds->end(); ++pred_it) {
+        DCHECK(NULL != *pred_it);
+        // if IDOMS[p] != UNDEFINED
+        if ((*pred_it != root_pred) && (*pred_it)->GetIDominator() != NULL) {
+          DCHECK(NULL != new_dom);
+          new_dom = SeaGraph::Intersect(*pred_it, new_dom);
+        }
+      }
+      DCHECK(NULL != *crt_it);
+      if ((*crt_it)->GetIDominator() != new_dom) {
+        (*crt_it)->SetIDominator(new_dom);
+        changed = true;
+      }
+      processedNodes.insert(*crt_it);
+    }
+  }
+
+  // For easily ordering of regions we need edges dominator->dominated.
+  for (std::vector<Region*>::iterator region_it = regions_.begin();
+      region_it != regions_.end(); region_it++) {
+    Region* idom = (*region_it)->GetIDominator();
+    if (idom != *region_it) {
+      idom->AddToIDominatedSet(*region_it);
+    }
+  }
+}
+
+Region* SeaGraph::Intersect(Region* i, Region* j) {
+  Region* finger1 = i;
+  Region* finger2 = j;
+  while (finger1 != finger2) {
+    while (finger1->GetRPO() > finger2->GetRPO()) {
+      DCHECK(NULL != finger1);
+      finger1 = finger1->GetIDominator(); // should have: finger1 != NULL
+      DCHECK(NULL != finger1);
+    }
+    while (finger1->GetRPO() < finger2->GetRPO()) {
+      DCHECK(NULL != finger2);
+      finger2 = finger2->GetIDominator(); // should have: finger1 != NULL
+      DCHECK(NULL != finger2);
+    }
+  }
+  return finger1; // finger1 should be equal to finger2 at this point.
+}
+
 void SeaGraph::ComputeDownExposedDefs() {
   for (std::vector<Region*>::iterator region_it = regions_.begin();
         region_it != regions_.end(); region_it++) {
@@ -74,39 +168,39 @@
 }
 
 
-void SeaGraph::CompileMethod(const art::DexFile::CodeItem* code_item,
-  uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file) {
+void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
+    const art::DexFile& dex_file) {
   const uint16_t* code = code_item->insns_;
   const size_t size_in_code_units = code_item->insns_size_in_code_units_;
-
-  Region* r = NULL;
-  // This maps  target instruction pointers to their corresponding region objects.
+  // This maps target instruction pointers to their corresponding region objects.
   std::map<const uint16_t*, Region*> target_regions;
   size_t i = 0;
-
   // Pass: Find the start instruction of basic blocks
   //         by locating targets and flow-though instructions of branches.
   while (i < size_in_code_units) {
     const art::Instruction* inst = art::Instruction::At(&code[i]);
-    if (inst->IsBranch()||inst->IsUnconditional()) {
+    if (inst->IsBranch() || inst->IsUnconditional()) {
       int32_t offset = inst->GetTargetOffset();
-      if (target_regions.end() == target_regions.find(&code[i+offset])) {
+      if (target_regions.end() == target_regions.find(&code[i + offset])) {
         Region* region = GetNewRegion();
-        target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i+offset], region));
+        target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i + offset], region));
       }
-      if (inst->CanFlowThrough() &&
-          (target_regions.end() == target_regions.find(&code[i+inst->SizeInCodeUnits()]))) {
+      if (inst->CanFlowThrough()
+          && (target_regions.end() == target_regions.find(&code[i + inst->SizeInCodeUnits()]))) {
         Region* region = GetNewRegion();
-        target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i+inst->SizeInCodeUnits()], region));
+        target_regions.insert(
+            std::pair<const uint16_t*, Region*>(&code[i + inst->SizeInCodeUnits()], region));
       }
     }
     i += inst->SizeInCodeUnits();
   }
-
   // Pass: Assign instructions to region nodes and
   //         assign branches their control flow successors.
   i = 0;
-  r = GetNewRegion();
+  Region* r = GetNewRegion();
+  SignatureNode* parameter_def_node = new sea_ir::SignatureNode(code_item->registers_size_-1,
+        code_item->ins_size_);
+  r->AddChild(parameter_def_node);
   sea_ir::InstructionNode* last_node = NULL;
   sea_ir::InstructionNode* node = NULL;
   while (i < size_in_code_units) {
@@ -116,7 +210,7 @@
 
     if (inst->IsBranch() || inst->IsUnconditional()) {
       int32_t offset = inst->GetTargetOffset();
-      std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i+offset]);
+      std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i + offset]);
       DCHECK(it != target_regions.end());
       AddEdge(r, it->second); // Add edge to branch target.
     }
@@ -125,24 +219,187 @@
     if (target_regions.end() != it) {
       // Get the already created region because this is a branch target.
       Region* nextRegion = it->second;
-      if (last_node->GetInstruction()->IsBranch() && last_node->GetInstruction()->CanFlowThrough()) {
+      if (last_node->GetInstruction()->IsBranch()
+          && last_node->GetInstruction()->CanFlowThrough()) {
         AddEdge(r, it->second); // Add flow-through edge.
       }
       r = nextRegion;
     }
-    bool definesRegister = (0 !=
-            InstructionTools::instruction_attributes_[inst->Opcode()] && (1 << kDA));
-    LOG(INFO) << inst->GetDexPc(code) << "*** " << inst->DumpString(&dex_file)
-            << " region:" <<r->StringId() << "Definition?" << definesRegister << std::endl;
+    bool definesRegister = (0 != InstructionTools::instruction_attributes_[inst->Opcode()]
+        && (1 << kDA));
+    LOG(INFO)<< inst->GetDexPc(code) << "*** " << inst->DumpString(&dex_file)
+    << " region:" <<r->StringId() << "Definition?" << definesRegister << std::endl;
     r->AddChild(node);
     i += inst->SizeInCodeUnits();
   }
+}
 
+void SeaGraph::ComputeRPO() {
+  int rpo_id = regions_.size() - 1;
+  for (std::vector<Region*>::const_iterator crt_it = regions_.begin(); crt_it != regions_.end();
+      ++crt_it) {
+    if ((*crt_it)->GetPredecessors()->size() == 0) {
+      ComputeRPO(*crt_it, rpo_id);
+    }
+  }
+}
+
+// Performs the renaming phase in traditional SSA transformations.
+// See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
+void SeaGraph::RenameAsSSA() {
+  utils::ScopedHashtable<int, InstructionNode*> scoped_table;
+  scoped_table.OpenScope();
+  for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
+      region_it++) {
+    if ((*region_it)->GetIDominator() == *region_it) {
+      RenameAsSSA(*region_it, &scoped_table);
+    }
+  }
+
+  scoped_table.CloseScope();
+}
+
+void SeaGraph::ConvertToSSA() {
+  // Pass: find global names.
+  // The map @block maps registers to the blocks in which they are defined.
+  std::map<int, std::set<Region*> > blocks;
+  // The set @globals records registers whose use
+  // is in a different block than the corresponding definition.
+  std::set<int> globals;
+  for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
+      region_it++) {
+    std::set<int> var_kill;
+    std::vector<InstructionNode*>* instructions = (*region_it)->GetInstructions();
+    for (std::vector<InstructionNode*>::iterator inst_it = instructions->begin();
+        inst_it != instructions->end(); inst_it++) {
+      std::vector<int> used_regs = (*inst_it)->GetUses();
+      for (std::size_t i = 0; i < used_regs.size(); i++) {
+        int used_reg = used_regs[i];
+        if (var_kill.find(used_reg) == var_kill.end()) {
+          globals.insert(used_reg);
+        }
+      }
+      const int reg_def = (*inst_it)->GetResultRegister();
+      if (reg_def != NO_REGISTER) {
+        var_kill.insert(reg_def);
+      }
+
+      blocks.insert(std::pair<int, std::set<Region*> >(reg_def, std::set<Region*>()));
+      std::set<Region*>* reg_def_blocks = &(blocks.find(reg_def)->second);
+      reg_def_blocks->insert(*region_it);
+    }
+  }
+
+  // Pass: Actually add phi-nodes to regions.
+  for (std::set<int>::const_iterator globals_it = globals.begin();
+      globals_it != globals.end(); globals_it++) {
+    int global = *globals_it;
+    // Copy the set, because we will modify the worklist as we go.
+    std::set<Region*> worklist((*(blocks.find(global))).second);
+    for (std::set<Region*>::const_iterator b_it = worklist.begin(); b_it != worklist.end(); b_it++) {
+      std::set<Region*>* df = (*b_it)->GetDominanceFrontier();
+      for (std::set<Region*>::const_iterator df_it = df->begin(); df_it != df->end(); df_it++) {
+        if ((*df_it)->InsertPhiFor(global)) {
+          // Check that the dominance frontier element is in the worklist already
+          // because we only want to break if the element is actually not there yet.
+          if (worklist.find(*df_it) == worklist.end()) {
+            worklist.insert(*df_it);
+            b_it = worklist.begin();
+            break;
+          }
+        }
+      }
+    }
+  }
+  // Pass: Build edges to the definition corresponding to each use.
+  // (This corresponds to the renaming phase in traditional SSA transformations.
+  // See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
+  RenameAsSSA();
+}
+
+void SeaGraph::RenameAsSSA(Region* crt_region,
+    utils::ScopedHashtable<int, InstructionNode*>* scoped_table) {
+  scoped_table->OpenScope();
+  // Rename phi nodes defined in the current region.
+  std::vector<PhiInstructionNode*>* phis = crt_region->GetPhiNodes();
+  for (std::vector<PhiInstructionNode*>::iterator phi_it = phis->begin();
+      phi_it != phis->end(); phi_it++) {
+    int reg_no = (*phi_it)->GetRegisterNumber();
+    scoped_table->Add(reg_no, (*phi_it));
+  }
+  // Rename operands of instructions from the current region.
+  std::vector<InstructionNode*>* instructions = crt_region->GetInstructions();
+  for (std::vector<InstructionNode*>::const_iterator instructions_it = instructions->begin();
+      instructions_it != instructions->end(); instructions_it++) {
+    InstructionNode* current_instruction = (*instructions_it);
+    // Rename uses.
+    std::vector<int> used_regs = current_instruction->GetUses();
+    for (std::vector<int>::const_iterator reg_it = used_regs.begin();
+        reg_it != used_regs.end(); reg_it++) {
+      int current_used_reg = (*reg_it);
+      InstructionNode* definition = scoped_table->Lookup(current_used_reg);
+      current_instruction->RenameToSSA(current_used_reg, definition);
+    }
+    // Update scope table with latest definitions.
+    std::vector<int> def_regs = current_instruction->GetDefinitions();
+    for (std::vector<int>::const_iterator reg_it = def_regs.begin();
+            reg_it != def_regs.end(); reg_it++) {
+      int current_defined_reg = (*reg_it);
+      scoped_table->Add(current_defined_reg, current_instruction);
+    }
+  }
+  // Fill in uses of phi functions in CFG successor regions.
+  const std::vector<Region*>* successors = crt_region->GetSuccessors();
+  for (std::vector<Region*>::const_iterator successors_it = successors->begin();
+      successors_it != successors->end(); successors_it++) {
+    Region* successor = (*successors_it);
+    successor->SetPhiDefinitionsForUses(scoped_table, crt_region);
+  }
+
+  // Rename all successors in the dominators tree.
+  const std::set<Region*>* dominated_nodes = crt_region->GetIDominatedSet();
+  for (std::set<Region*>::const_iterator dominated_nodes_it = dominated_nodes->begin();
+      dominated_nodes_it != dominated_nodes->end(); dominated_nodes_it++) {
+    Region* dominated_node = (*dominated_nodes_it);
+    RenameAsSSA(dominated_node, scoped_table);
+  }
+  scoped_table->CloseScope();
+}
+
+void SeaGraph::CompileMethod(const art::DexFile::CodeItem* code_item,
+  uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file) {
+  // Two passes: Builds the intermediate structure (non-SSA) of the sea-ir for the function.
+  BuildMethodSeaGraph(code_item, dex_file);
+  //Pass: Compute reverse post-order of regions.
+  ComputeRPO();
+  // Multiple passes: compute immediate dominators.
+  ComputeIDominators();
   // Pass: compute downward-exposed definitions.
   ComputeDownExposedDefs();
-
-  // Multiple Passes: Compute reaching definitions (iterative fixed-point algorithm)
+  // Multiple Passes (iterative fixed-point algorithm): Compute reaching definitions
   ComputeReachingDefs();
+  // Pass (O(nlogN)): Compute the dominance frontier for region nodes.
+  ComputeDominanceFrontier();
+  // Two Passes: Phi node insertion.
+  ConvertToSSA();
+}
+
+
+void SeaGraph::ComputeDominanceFrontier() {
+  for (std::vector<Region*>::iterator region_it = regions_.begin();
+      region_it != regions_.end(); region_it++) {
+    std::vector<Region*>* preds = (*region_it)->GetPredecessors();
+    if (preds->size() > 1) {
+      for (std::vector<Region*>::iterator pred_it = preds->begin();
+          pred_it != preds->end(); pred_it++) {
+        Region* runner = *pred_it;
+        while (runner != (*region_it)->GetIDominator()) {
+          runner->AddToDominanceFrontier(*region_it);
+          runner = runner->GetIDominator();
+        }
+      }
+    }
+  }
 }
 
 Region* SeaGraph::GetNewRegion() {
@@ -156,6 +413,17 @@
   regions_.push_back(r);
 }
 
+void SeaNode::AddSuccessor(Region* successor) {
+  DCHECK(successor) << "Tried to add NULL successor to SEA node.";
+  successors_.push_back(successor);
+  return;
+}
+
+void SeaNode::AddPredecessor(Region* predecessor) {
+  DCHECK(predecessor) << "Tried to add NULL predecessor to SEA node.";
+  predecessors_.push_back(predecessor);
+}
+
 void Region::AddChild(sea_ir::InstructionNode* instruction) {
   DCHECK(instruction) << "Tried to add NULL instruction to region node.";
   instructions_.push_back(instruction);
@@ -168,33 +436,28 @@
   return NULL;
 }
 
-void InstructionNode::ToDot(std::string& result) const {
-  result += "// Instruction: \n" + StringId() +
-      " [label=\"" + instruction_->DumpString(NULL) + "\"";
-  if (de_def_) {
-    result += "style=bold";
-  }
-  result += "];\n";
-}
-
-int InstructionNode::GetResultRegister() const {
-  if (!InstructionTools::IsDefinition(instruction_)) {
-    return NO_REGISTER;
-  }
-  return instruction_->VRegA();
-}
-
-void InstructionNode::MarkAsDEDef() {
-  de_def_ = true;
-}
-
 void Region::ToDot(std::string& result) const {
-  result += "\n// Region: \n" + StringId() + " [label=\"region " + StringId() + "\"];";
-  // Save instruction nodes that belong to this region.
+  result += "\n// Region: \n" + StringId() + " [label=\"region " + StringId() + "(rpo=";
+  std::stringstream ss;
+  ss << rpo_;
+  result.append(ss.str());
+  if (NULL != GetIDominator()) {
+    result += " dom=" + GetIDominator()->StringId();
+  }
+  result += ")\"];\n";
+
+  // Save phi-nodes.
+  for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions_.begin();
+      cit != phi_instructions_.end(); cit++) {
+    (*cit)->ToDot(result);
+    result += StringId() + " -> " + (*cit)->StringId() + "; // phi-function \n";
+  }
+
+  // Save instruction nodes.
   for (std::vector<InstructionNode*>::const_iterator cit = instructions_.begin();
       cit != instructions_.end(); cit++) {
     (*cit)->ToDot(result);
-    result += StringId() + " -> " + (*cit)->StringId() + ";\n";
+    result += StringId() + " -> " + (*cit)->StringId() + "; // region -> instruction \n";
   }
 
   for (std::vector<Region*>::const_iterator cit = successors_.begin(); cit != successors_.end();
@@ -202,7 +465,6 @@
     DCHECK(NULL != *cit) << "Null successor found for SeaNode" << GetLastChild()->StringId() << ".";
     result += GetLastChild()->StringId() + " -> " + (*cit)->StringId() + ";\n\n";
   }
-
   // Save reaching definitions.
   for (std::map<int, std::set<sea_ir::InstructionNode*>* >::const_iterator cit =
       reaching_defs_.begin();
@@ -216,11 +478,15 @@
          " [style=dotted]; // Reaching def.\n";
     }
   }
-
+  // Save dominance frontier.
+  for (std::set<Region*>::const_iterator cit = df_.begin(); cit != df_.end(); cit++) {
+    result += StringId() +
+        " -> " + (*cit)->StringId() +
+        " [color=gray]; // Dominance frontier.\n";
+  }
   result += "// End Region.\n";
 }
 
-
 void Region::ComputeDownExposedDefs() {
   for (std::vector<InstructionNode*>::const_iterator inst_it = instructions_.begin();
       inst_it != instructions_.end(); inst_it++) {
@@ -232,14 +498,12 @@
       res->second = *inst_it;
     }
   }
-
   for (std::map<int, sea_ir::InstructionNode*>::const_iterator cit = de_defs_.begin();
       cit != de_defs_.end(); cit++) {
     (*cit).second->MarkAsDEDef();
   }
 }
 
-
 const std::map<int, sea_ir::InstructionNode*>* Region::GetDownExposedDefs() const {
   return &de_defs_;
 }
@@ -268,7 +532,6 @@
       reaching_defs.insert(
           std::pair<int const, std::set<InstructionNode*>*>(de_def->first, solo_def));
     }
-    LOG(INFO) << "Adding to " <<StringId() << "reaching set of " << (*pred_it)->StringId();
     reaching_defs.insert(pred_reaching->begin(), pred_reaching->end());
 
     // Now we combine the reaching map coming from the current predecessor (reaching_defs)
@@ -315,15 +578,124 @@
   return changed;
 }
 
-void SeaNode::AddSuccessor(Region* successor) {
-  DCHECK(successor) << "Tried to add NULL successor to SEA node.";
-  successors_.push_back(successor);
-  return;
+bool Region::InsertPhiFor(int reg_no) {
+  if (!ContainsPhiFor(reg_no)) {
+    phi_set_.insert(reg_no);
+    PhiInstructionNode* new_phi = new PhiInstructionNode(reg_no);
+    phi_instructions_.push_back(new_phi);
+    return true;
+  }
+  return false;
 }
 
-void SeaNode::AddPredecessor(Region* predecessor) {
-  DCHECK(predecessor) << "Tried to add NULL predecessor to SEA node.";
-  predecessors_.push_back(predecessor);
+void Region::SetPhiDefinitionsForUses(
+    const utils::ScopedHashtable<int, InstructionNode*>* scoped_table, Region* predecessor) {
+  int predecessor_id = -1;
+  for (unsigned int crt_pred_id = 0; crt_pred_id < predecessors_.size(); crt_pred_id++) {
+    if (predecessors_.at(crt_pred_id) == predecessor) {
+      predecessor_id = crt_pred_id;
+    }
+  }
+  DCHECK_NE(-1, predecessor_id);
+  for (std::vector<PhiInstructionNode*>::iterator phi_it = phi_instructions_.begin();
+      phi_it != phi_instructions_.end(); phi_it++) {
+    PhiInstructionNode* phi = (*phi_it);
+    int reg_no = phi->GetRegisterNumber();
+    InstructionNode* definition = scoped_table->Lookup(reg_no);
+    phi->RenameToSSA(reg_no, definition, predecessor_id);
+  }
 }
 
+void InstructionNode::ToDot(std::string& result) const {
+  result += "// Instruction: \n" + StringId() +
+      " [label=\"" + instruction_->DumpString(NULL) + "\"";
+  if (de_def_) {
+    result += "style=bold";
+  }
+  result += "];\n";
+  // SSA definitions:
+  for (std::map<int, InstructionNode* >::const_iterator def_it = definition_edges_.begin();
+      def_it != definition_edges_.end(); def_it++) {
+    if (NULL != def_it->second) {
+      result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\"";
+      std::stringstream ss;
+      ss << def_it->first;
+      result.append(ss.str());
+      result += "\"] ; // ssa edge\n";
+    }
+  }
+}
+
+void InstructionNode::MarkAsDEDef() {
+  de_def_ = true;
+}
+
+int InstructionNode::GetResultRegister() const {
+  if (instruction_->HasVRegA()) {
+    return instruction_->VRegA();
+  }
+  return NO_REGISTER;
+}
+
+std::vector<int> InstructionNode::GetDefinitions() const {
+  // TODO: Extend this to handle instructions defining more than one register (if any)
+  // The return value should be changed to pointer to field then; for now it is an object
+  // so that we avoid possible memory leaks from allocating objects dynamically.
+  std::vector<int> definitions;
+  int result = GetResultRegister();
+  if (NO_REGISTER != result) {
+    definitions.push_back(result);
+  }
+  return definitions;
+}
+
+std::vector<int> InstructionNode::GetUses() {
+  std::vector<int> uses; // Using vector<> instead of set<> because order matters.
+
+  if (!InstructionTools::IsDefinition(instruction_) && (instruction_->HasVRegA())) {
+    int vA = instruction_->VRegA();
+    uses.push_back(vA);
+  }
+  if (instruction_->HasVRegB()) {
+    int vB = instruction_->VRegB();
+    uses.push_back(vB);
+  }
+  if (instruction_->HasVRegC()) {
+    int vC = instruction_->VRegC();
+    uses.push_back(vC);
+  }
+  // TODO: Add support for function argument registers.
+  return uses;
+}
+
+void PhiInstructionNode::ToDot(std::string& result) const {
+  result += "// PhiInstruction: \n" + StringId() +
+      " [label=\"" + "PHI(";
+  std::stringstream phi_reg_stream;
+  phi_reg_stream << register_no_;
+  result.append(phi_reg_stream.str());
+  result += ")\"";
+  result += "];\n";
+
+  for (std::vector<std::map<int, InstructionNode*>*>::const_iterator pred_it = definition_edges_.begin();
+      pred_it != definition_edges_.end(); pred_it++) {
+    std::map<int, InstructionNode*>* defs_from_pred = *pred_it;
+    for (std::map<int, InstructionNode* >::const_iterator def_it = defs_from_pred->begin();
+        def_it != defs_from_pred->end(); def_it++) {
+      if (NULL != def_it->second) {
+        result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\"vR = ";
+        std::stringstream ss;
+        ss << def_it->first;
+        result.append(ss.str());
+        result += "\"] ; // phi-ssa edge\n";
+      } else {
+        result += StringId() + " -> " + StringId() +"[color=blue,label=\"vR = ";
+        std::stringstream ss;
+        ss << def_it->first;
+        result.append(ss.str());
+        result += "\"] ; // empty phi-ssa edge\n";
+      }
+    }
+  }
+}
 } // end namespace sea_ir
diff --git a/compiler/sea_ir/sea.h b/compiler/sea_ir/sea.h
index f2c7146..a133678 100644
--- a/compiler/sea_ir/sea.h
+++ b/compiler/sea_ir/sea.h
@@ -24,11 +24,22 @@
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "sea_ir/instruction_tools.h"
+#include "utils/scoped_hashtable.h"
 
-#define NO_REGISTER       (-1)
 
 namespace sea_ir {
+
+#define NO_REGISTER             (-1)
+
+// Reverse post-order numbering constants
+enum RegionNumbering {
+  NOT_VISITED = -1,
+  VISITING = -2
+};
+
 class Region;
+class InstructionNode;
+class PhiInstructionNode;
 
 class SeaNode {
  public:
@@ -37,16 +48,20 @@
     ss << id_;
     string_id_.append(ss.str());
   }
-
   // Adds CFG predecessors and successors to each block.
   void AddSuccessor(Region* successor);
   void AddPredecessor(Region* predecesor);
 
+  std::vector<sea_ir::Region*>* GetSuccessors() {
+    return &successors_;
+  }
+  std::vector<sea_ir::Region*>* GetPredecessors() {
+    return &predecessors_;
+  }
   // Returns the id of the current block as string
   const std::string& StringId() const {
     return string_id_;
   }
-
   // Appends to @result a dot language formatted string representing the node and
   //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
   //    builds a complete dot graph, but without prolog ("digraph {") and epilog ("}").
@@ -70,93 +85,252 @@
 
 class InstructionNode: public SeaNode {
  public:
-  explicit InstructionNode(const art::Instruction* in):SeaNode(), instruction_(in), de_def_(false) {}
-
+  explicit InstructionNode(const art::Instruction* in):
+    SeaNode(), instruction_(in), de_def_(false) {}
+  // Returns the Dalvik instruction around which this InstructionNode is wrapped.
   const art::Instruction* GetInstruction() const {
     DCHECK(NULL != instruction_) << "Tried to access NULL instruction in an InstructionNode.";
     return instruction_;
   }
   // Returns the register that is defined by the current instruction, or NO_REGISTER otherwise.
-  int GetResultRegister() const;
+  virtual int GetResultRegister() const;
+  // Returns the set of registers defined by the current instruction.
+  virtual std::vector<int> GetDefinitions() const;
+  // Returns the set of register numbers that are used by the instruction.
+  virtual std::vector<int> GetUses();
+  // Appends to @result the .dot string representation of the instruction.
   void ToDot(std::string& result) const;
+  // Mark the current instruction as a dowward exposed definition.
   void MarkAsDEDef();
+  // Rename the use of @reg_no to refer to the instruction @definition,
+  // essentially creating SSA form.
+  void RenameToSSA(int reg_no, InstructionNode* definition) {
+    definition_edges_.insert(std::pair<int, InstructionNode*>(reg_no, definition));
+  }
 
  private:
   const art::Instruction* const instruction_;
+  std::map<int, InstructionNode* > definition_edges_;
   bool de_def_;
 };
 
+class SignatureNode: public InstructionNode {
+ public:
+  explicit SignatureNode(unsigned int start_register, unsigned int count):
+      InstructionNode(NULL), defined_regs_() {
+    for (unsigned int crt_offset = 0; crt_offset < count; crt_offset++) {
+      defined_regs_.push_back(start_register - crt_offset);
+    }
+  }
 
+  void ToDot(std::string& result) const {
+    result += StringId() +"[label=\"signature:";
+    std::stringstream vector_printer;
+    if (!defined_regs_.empty()) {
+      for (unsigned int crt_el = 0; crt_el < defined_regs_.size()-1; crt_el++) {
+        vector_printer << defined_regs_[crt_el] <<",";
+      }
+      vector_printer << defined_regs_[defined_regs_.size()-1] <<";";
+    }
+    result += "\"] // signature node\n";
+  }
+
+  std::vector<int> GetDefinitions() const {
+    return defined_regs_;
+  }
+
+  int GetResultRegister() const {
+    return NO_REGISTER;
+  }
+
+  std::vector<int> GetUses() {
+    return std::vector<int>();
+  }
+
+ private:
+  std::vector<int> defined_regs_;
+};
+
+
+class PhiInstructionNode: public InstructionNode {
+ public:
+  explicit PhiInstructionNode(int register_no):
+    InstructionNode(NULL), register_no_(register_no), definition_edges_() {}
+  // Appends to @result the .dot string representation of the instruction.
+  void ToDot(std::string& result) const;
+  // Returns the register on which this phi-function is used.
+  int GetRegisterNumber() {
+    return register_no_;
+  }
+
+  // Rename the use of @reg_no to refer to the instruction @definition.
+  // Phi-functions are different than normal instructions in that they
+  // have multiple predecessor regions; this is why RenameToSSA has
+  // the additional parameter specifying that @parameter_id is the incoming
+  // edge for @definition, essentially creating SSA form.
+  void RenameToSSA(int reg_no, InstructionNode* definition, unsigned int predecessor_id) {
+    DCHECK(NULL != definition) << "Tried to rename to SSA using a NULL definition for "
+        << StringId() << " register " << reg_no;
+    if (definition_edges_.size() < predecessor_id+1) {
+      definition_edges_.resize(predecessor_id+1, NULL);
+    }
+
+    if (NULL == definition_edges_.at(predecessor_id)) {
+      definition_edges_[predecessor_id] = new std::map<int, InstructionNode*>();
+    }
+    definition_edges_[predecessor_id]->insert(std::pair<int, InstructionNode*>(reg_no, definition));
+  }
+
+ private:
+  int register_no_;
+  std::vector<std::map<int, InstructionNode*>*> definition_edges_;
+};
 
 class Region : public SeaNode {
  public:
-  explicit Region():SeaNode(), reaching_defs_size_(-1) {}
+  explicit Region():
+    SeaNode(), reaching_defs_size_(0), rpo_(NOT_VISITED), idom_(NULL),
+    idominated_set_(), df_(), phi_set_() {}
 
-  // Adds @inst as an instruction node child in the current region.
-  void AddChild(sea_ir::InstructionNode* inst);
+  // Adds @instruction as an instruction node child in the current region.
+  void AddChild(sea_ir::InstructionNode* insttruction);
 
   // Returns the last instruction node child of the current region.
   // This child has the CFG successors pointing to the new regions.
   SeaNode* GetLastChild() const;
-
+  // Returns all the child instructions of this region, in program order.
+  std::vector<InstructionNode*>* GetInstructions() {
+    return &instructions_;
+  }
   // Appends to @result a dot language formatted string representing the node and
   //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
   //    builds a complete dot graph (without prolog and epilog though).
   virtual void ToDot(std::string& result) const;
-
   // Computes Downward Exposed Definitions for the current node.
+
   void ComputeDownExposedDefs();
   const std::map<int, sea_ir::InstructionNode*>* GetDownExposedDefs() const;
 
   // Performs one iteration of the reaching definitions algorithm
   // and returns true if the reaching definitions set changed.
   bool UpdateReachingDefs();
-
   // Returns the set of reaching definitions for the current region.
   std::map<int, std::set<sea_ir::InstructionNode*>* >* GetReachingDefs();
 
+  void SetRPO(int rpo) {
+    rpo_ = rpo;
+  }
+
+  int GetRPO() {
+    return rpo_;
+  }
+
+  void SetIDominator(Region* dom) {
+    idom_ = dom;
+  }
+
+  Region* GetIDominator() const {
+    return idom_;
+  }
+
+  void AddToIDominatedSet(Region* dominated) {
+    idominated_set_.insert(dominated);
+  }
+
+  const std::set<Region*>* GetIDominatedSet() {
+    return &idominated_set_;
+  }
+
+  // Adds @df_reg to the dominance frontier of the current region.
+  void AddToDominanceFrontier(Region* df_reg) {
+    df_.insert(df_reg);
+  }
+  // Returns the dominance frontier of the current region.
+  // Preconditions: SeaGraph.ComputeDominanceFrontier()
+  std::set<Region*>* GetDominanceFrontier() {
+    return &df_;
+  }
+  // Returns true if the region contains a phi function for @reg_no.
+  bool ContainsPhiFor(int reg_no) {
+    return (phi_set_.end() != phi_set_.find(reg_no));
+  }
+  // Returns the phi-functions from the region.
+  std::vector<PhiInstructionNode*>* GetPhiNodes() {
+    return &phi_instructions_;
+  }
+  // Adds a phi-function for @reg_no to this region.
+  // Note: The insertion order does not matter, as phi-functions
+  //       are conceptually executed at the same time.
+  bool InsertPhiFor(int reg_no);
+  // Sets the phi-function uses to be as defined in @scoped_table for predecessor @@predecessor.
+  void SetPhiDefinitionsForUses(const utils::ScopedHashtable<int, InstructionNode*>* scoped_table,
+      Region* predecessor);
+
  private:
   std::vector<sea_ir::InstructionNode*> instructions_;
   std::map<int, sea_ir::InstructionNode*> de_defs_;
   std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs_;
   int reaching_defs_size_;
+  int rpo_;
+  // Immediate dominator node.
+  Region* idom_;
+  // The set of nodes immediately dominated by the region.
+  std::set<Region*> idominated_set_;
+  // Records the dominance frontier.
+  std::set<Region*> df_;
+  // Records the set of register numbers that have phi nodes in this region.
+  std::set<int> phi_set_;
+  std::vector<PhiInstructionNode*> phi_instructions_;
 };
 
-
-
 class SeaGraph {
  public:
   static SeaGraph* GetCurrentGraph();
+
   void CompileMethod(const art::DexFile::CodeItem* code_item,
       uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file);
-
-  // Returns a string representation of the region and its Instruction children
+  // Returns a string representation of the region and its Instruction children.
   void DumpSea(std::string filename) const;
-
-  // Adds a CFG edge from @src node to @dst node.
-  void AddEdge(Region* src, Region* dst) const;
-
-  // Computes Downward Exposed Definitions for all regions in the graph.
-  void ComputeDownExposedDefs();
-
-  // Computes the reaching definitions set following the equations from
-  // Cooper & Torczon, "Engineering a Compiler", second edition, page 491
-  void ComputeReachingDefs();
-
-  /*** Static helper functions follow: ***/
-  static int ParseInstruction(const uint16_t* code_ptr,
-      art::DecodedInstruction* decoded_instruction);
-  static bool IsInstruction(const uint16_t* code_ptr);
+  // Recursively computes the reverse postorder value for @crt_bb and successors.
+  static void ComputeRPO(Region* crt_bb, int& crt_rpo);
+  // Returns the "lowest common ancestor" of @i and @j in the dominator tree.
+  static Region* Intersect(Region* i, Region* j);
 
  private:
-  // Registers the parameter as a child region of the SeaGraph instance
-  void AddRegion(Region* r);
-  // Returns new region and registers it with the  SeaGraph instance
+  // Registers @childReg as a region belonging to the SeaGraph instance.
+  void AddRegion(Region* childReg);
+  // Returns new region and registers it with the  SeaGraph instance.
   Region* GetNewRegion();
+  // Adds a CFG edge from @src node to @dst node.
+  void AddEdge(Region* src, Region* dst) const;
+  // Builds the non-SSA sea-ir representation of the function @code_item from @dex_file.
+  void BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item, const art::DexFile& dex_file);
+  // Computes immediate dominators for each region.
+  // Precondition: ComputeMethodSeaGraph()
+  void ComputeIDominators();
+  // Computes Downward Exposed Definitions for all regions in the graph.
+  void ComputeDownExposedDefs();
+  // Computes the reaching definitions set following the equations from
+  // Cooper & Torczon, "Engineering a Compiler", second edition, page 491.
+  // Precondition: ComputeDEDefs()
+  void ComputeReachingDefs();
+  // Computes the reverse-postorder numbering for the region nodes.
+  // Precondition: ComputeDEDefs()
+  void ComputeRPO();
+  // Computes the dominance frontier for all regions in the graph,
+  // following the algorithm from
+  // Cooper & Torczon, "Engineering a Compiler", second edition, page 499.
+  // Precondition: ComputeIDominators()
+  void ComputeDominanceFrontier();
+
+  void ConvertToSSA();
+  // Identifies the definitions corresponding to uses for region @node
+  // by using the scoped hashtable of names @ scoped_table.
+  void RenameAsSSA(Region* node, utils::ScopedHashtable<int, InstructionNode*>* scoped_table);
+  void RenameAsSSA();
+
   static SeaGraph graph_;
   std::vector<Region*> regions_;
 };
-
-
 } // end namespace sea_ir
 #endif
diff --git a/compiler/utils/scoped_hashtable.h b/compiler/utils/scoped_hashtable.h
new file mode 100644
index 0000000..5e6c64b
--- /dev/null
+++ b/compiler/utils/scoped_hashtable.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <map>
+#include <list>
+
+#ifndef SCOPED_HASHTABLE_
+#define SCOPED_HASHTABLE_
+
+namespace utils {
+template <typename K, typename V>
+class ScopedHashtable {
+ public:
+  explicit ScopedHashtable():scopes() {
+  }
+
+  void OpenScope() {
+    scopes.push_front(std::map<K, V>());
+  }
+
+  // Lookups entry K starting from the current (topmost) scope
+  // and returns its value if found or NULL.
+  V Lookup(K k) const {
+    for (typename std::list<std::map<K, V> >::const_iterator scopes_it = scopes.begin();
+        scopes_it != scopes.end(); scopes_it++) {
+      typename std::map<K, V>::const_iterator result_it = (*scopes_it).find(k);
+      if (result_it != (*scopes_it).end()) {
+        return (*result_it).second;
+      }
+    }
+    return NULL;
+  }
+
+  // Adds a new entry in the current (topmost) scope.
+  void Add(K k, V v) {
+    scopes.front().erase(k);
+    scopes.front().insert(std::pair< K, V >(k, v));
+  }
+
+  // Removes the topmost scope.
+  bool CloseScope() {
+    // Added check to uniformly handle undefined behavior
+    // when removing scope and the list of scopes is empty.
+    if (scopes.size() > 0) {
+      scopes.pop_front();
+      return true;
+    }
+    return false;
+  }
+
+ private:
+  std::list<std::map<K, V> > scopes;
+};
+} // end namespace utils
+
+#endif
diff --git a/compiler/utils/scoped_hashtable_test.cc b/compiler/utils/scoped_hashtable_test.cc
new file mode 100644
index 0000000..072da8c
--- /dev/null
+++ b/compiler/utils/scoped_hashtable_test.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_test.h"
+#include "scoped_hashtable.h"
+
+using utils::ScopedHashtable;
+
+namespace art {
+
+class Value {
+ public:
+  explicit Value(int v):value_(v) {}
+  int value_;
+};
+
+class ScopedHashtableTest : public CommonTest {
+};
+
+TEST_F(ScopedHashtableTest, Basics) {
+  ScopedHashtable<int, Value*> sht;
+  // Check table is empty when no scope is open.
+  EXPECT_TRUE(NULL == sht.Lookup(1));
+
+  // Check table is empty when scope open.
+  sht.OpenScope();
+  EXPECT_TRUE(NULL == sht.Lookup(1));
+  // Check table is empty after closing scope.
+  EXPECT_EQ(sht.CloseScope(), true);
+  // Check closing scope on empty table is no-op.
+  EXPECT_EQ(sht.CloseScope(), false);
+  // Check that find in current scope works.
+  sht.OpenScope();
+  sht.Add(1, new Value(1));
+  EXPECT_EQ(sht.Lookup(1)->value_, 1);
+  // Check that updating values in current scope works.
+  sht.Add(1, new Value(2));
+  EXPECT_EQ(sht.Lookup(1)->value_, 2);
+  // Check that find works in previous scope.
+  sht.OpenScope();
+  EXPECT_EQ(sht.Lookup(1)->value_, 2);
+  // Check that shadowing scopes works.
+  sht.Add(1, new Value(3));
+  EXPECT_EQ(sht.Lookup(1)->value_, 3);
+  // Check that having multiple keys work correctly.
+  sht.Add(2, new Value(4));
+  EXPECT_EQ(sht.Lookup(1)->value_, 3);
+  EXPECT_EQ(sht.Lookup(2)->value_, 4);
+  // Check that scope removal works corectly.
+  sht.CloseScope();
+  EXPECT_EQ(sht.Lookup(1)->value_, 2);
+  EXPECT_TRUE(NULL == sht.Lookup(2));
+}
+
+} // end namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index b627559..24df572 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -53,7 +53,7 @@
 class ScopedContentionRecorder;
 class Thread;
 
-const bool kDebugLocking = kIsDebugBuild;
+const bool kDebugLocking = true || kIsDebugBuild;
 
 // Base class for all Mutex implementations
 class BaseMutex {
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 6527f10..427baf2 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -82,54 +82,84 @@
   return insns[offset] | ((uint32_t) insns[offset+1] << 16);
 }
 
+
+bool Instruction::HasVRegC() const {
+  switch (FormatOf(Opcode())) {
+    case k23x: return true;
+    case k35c: return true;
+    case k3rc: return true;
+    default: return false;
+  }
+}
+
+bool Instruction::HasVRegB() const {
+  switch (FormatOf(Opcode())) {
+    case k12x: return true;
+    case k22b: return true;
+    case k22c: return true;
+    case k22s: return true;
+    case k22t: return true;
+    case k22x: return true;
+    case k32x: return true;
+    default: return false;
+  }
+}
+
+bool Instruction::HasVRegA() const {
+  switch (FormatOf(Opcode())) {
+    case k11n: return true;
+    case k11x: return true;
+    case k12x: return true;
+    case k21c: return true;
+    case k21h: return true;
+    case k21s: return true;
+    case k21t: return true;
+    case k22b: return true;
+    case k22c: return true;
+    case k22s: return true;
+    case k22t: return true;
+    case k22x: return true;
+    case k23x: return true;
+    case k31c: return true;
+    case k31i: return true;
+    case k31t: return true;
+    case k32x: return true;
+    case k51l: return true;
+    default: return false;
+  }
+}
+
 int32_t Instruction::VRegC() const {
   switch (FormatOf(Opcode())) {
-    case k22b: return VRegC_22b();
-    case k22c: return VRegC_22c();
-    case k22s: return VRegC_22s();
-    case k22t: return VRegC_22t();
     case k23x: return VRegC_23x();
     case k35c: return VRegC_35c();
     case k3rc: return VRegC_3rc();
     default: LOG(FATAL) << "Tried to access vC of instruction " << Name() <<
         " which has no C operand.";
   }
-  return 0;
+  return -1;
 }
 
 int32_t Instruction::VRegB() const {
   switch (FormatOf(Opcode())) {
-    case k11n: return VRegB_11n();
     case k12x: return VRegB_12x();
-    case k21c: return VRegB_21c();
-    case k21h: return VRegB_21h();
-    case k21t: return VRegB_21t();
     case k22b: return VRegB_22b();
     case k22c: return VRegB_22c();
     case k22s: return VRegB_22s();
     case k22t: return VRegB_22t();
     case k22x: return VRegB_22x();
-    case k31c: return VRegB_31c();
-    case k31i: return VRegB_31i();
-    case k31t: return VRegB_31t();
     case k32x: return VRegB_32x();
-    case k35c: return VRegB_35c();
-    case k3rc: return VRegB_3rc();
-    case k51l: return VRegB_51l();
     default: LOG(FATAL) << "Tried to access vB of instruction " << Name() <<
         " which has no B operand.";
   }
-  return 0;
+  return -1;
 }
 
 int32_t Instruction::VRegA() const {
   switch (FormatOf(Opcode())) {
-    case k10t: return VRegA_10t();
-    case k10x: return VRegA_10x();
     case k11n: return VRegA_11n();
     case k11x: return VRegA_11x();
     case k12x: return VRegA_12x();
-    case k20t: return VRegA_20t();
     case k21c: return VRegA_21c();
     case k21h: return VRegA_21h();
     case k21s: return VRegA_21s();
@@ -140,18 +170,15 @@
     case k22t: return VRegA_22t();
     case k22x: return VRegA_22x();
     case k23x: return VRegA_23x();
-    case k30t: return VRegA_30t();
     case k31c: return VRegA_31c();
     case k31i: return VRegA_31i();
     case k31t: return VRegA_31t();
     case k32x: return VRegA_32x();
-    case k35c: return VRegA_35c();
-    case k3rc: return VRegA_3rc();
     case k51l: return VRegA_51l();
-    default: LOG(FATAL) << "Tried to access vA of instruction "<< Name() <<
+    default: LOG(FATAL) << "Tried to access vA of instruction " << Name() <<
         " which has no A operand.";
   }
-  return 0;
+  return -1;
 }
 
 int32_t Instruction::GetTargetOffset() const {
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 0407c57..7d078f9 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -215,6 +215,7 @@
   }
 
   // VRegA
+  bool HasVRegA() const;
   int32_t VRegA() const;
   int8_t VRegA_10t() const;
   uint8_t VRegA_10x() const;
@@ -242,6 +243,7 @@
   uint8_t VRegA_51l() const;
 
   // VRegB
+  bool HasVRegB() const;
   int32_t VRegB() const;
   int4_t VRegB_11n() const;
   uint4_t VRegB_12x() const;
@@ -264,6 +266,7 @@
   uint64_t VRegB_51l() const;  // vB_wide
 
   // VRegC
+  bool HasVRegC() const;
   int32_t VRegC() const;
   int8_t VRegC_22b() const;
   uint16_t VRegC_22c() const;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a68cc02..eae1520 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -25,6 +25,7 @@
 #include <vector>
 
 #include "base/stl_util.h"
+#include "common_throws.h"
 #include "cutils/sched_policy.h"
 #include "debugger.h"
 #include "gc/accounting/atomic_stack.h"
@@ -170,12 +171,15 @@
       capacity_(capacity),
       growth_limit_(growth_limit),
       max_allowed_footprint_(initial_size),
+      native_footprint_gc_watermark_(initial_size),
+      native_footprint_limit_(2 * initial_size),
       concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
                                             :  std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       large_object_threshold_(3 * kPageSize),
       num_bytes_allocated_(0),
+      native_bytes_allocated_(0),
       verify_missing_card_marks_(false),
       verify_system_weaks_(false),
       verify_pre_gc_heap_(false),
@@ -569,9 +573,6 @@
       Dbg::RecordAllocation(c, byte_count);
     }
     if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) {
-      // We already have a request pending, no reason to start more until we update
-      // concurrent_start_bytes_.
-      concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
       // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
       SirtRef<mirror::Object> ref(self, obj);
       RequestConcurrentGC(self);
@@ -1690,6 +1691,19 @@
   max_allowed_footprint_ = max_allowed_footprint;
 }
 
+void Heap::UpdateMaxNativeFootprint() {
+  size_t native_size = native_bytes_allocated_;
+  // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
+  size_t target_size = native_size / GetTargetHeapUtilization();
+  if (target_size > native_size + max_free_) {
+    target_size = native_size + max_free_;
+  } else if (target_size < native_size + min_free_) {
+    target_size = native_size + min_free_;
+  }
+  native_footprint_gc_watermark_ = target_size;
+  native_footprint_limit_ = 2 * target_size - native_size;
+}
+
 void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
   // We know what our utilization is at this moment.
   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
@@ -1746,6 +1760,8 @@
     DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
     DCHECK_LE(max_allowed_footprint_, growth_limit_);
   }
+
+  UpdateMaxNativeFootprint();
 }
 
 void Heap::ClearGrowthLimit() {
@@ -1881,6 +1897,10 @@
     return;
   }
 
+  // We already have a request pending, no reason to start more until we update
+  // concurrent_start_bytes_.
+  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+
   JNIEnv* env = self->GetJniEnv();
   DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
   DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
@@ -1958,5 +1978,64 @@
   return alloc_space_->Trim();
 }
 
+bool Heap::IsGCRequestPending() const {
+  return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
+}
+
+void Heap::RegisterNativeAllocation(int bytes) {
+  // Total number of native bytes allocated.
+  native_bytes_allocated_ += bytes;
+  Thread* self = Thread::Current();
+  if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
+    // The second watermark is higher than the gc watermark. If you hit this it means you are
+    // allocating native objects faster than the GC can keep up with.
+    if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+        JNIEnv* env = self->GetJniEnv();
+        // Can't do this in WellKnownClasses::Init since System is not properly set up at that
+        // point.
+        if (WellKnownClasses::java_lang_System_runFinalization == NULL) {
+          DCHECK(WellKnownClasses::java_lang_System != NULL);
+          WellKnownClasses::java_lang_System_runFinalization =
+              CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
+          assert(WellKnownClasses::java_lang_System_runFinalization != NULL);
+        }
+        if (WaitForConcurrentGcToComplete(self) != collector::kGcTypeNone) {
+          // Just finished a GC, attempt to run finalizers.
+          env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+                                    WellKnownClasses::java_lang_System_runFinalization);
+          CHECK(!env->ExceptionCheck());
+        }
+
+        // If we still are over the watermark, attempt a GC for alloc and run finalizers.
+        if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+          CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+          env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+                                    WellKnownClasses::java_lang_System_runFinalization);
+          CHECK(!env->ExceptionCheck());
+        }
+        // We have just run finalizers, update the native watermark since it is very likely that
+        // finalizers released native managed allocations.
+        UpdateMaxNativeFootprint();
+    } else {
+      if (!IsGCRequestPending()) {
+        RequestConcurrentGC(self);
+      }
+    }
+  }
+}
+
+void Heap::RegisterNativeFree(int bytes) {
+  int expected_size, new_size;
+  do {
+      expected_size = native_bytes_allocated_.get();
+      new_size = expected_size - bytes;
+      if (new_size < 0) {
+        ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated",
+                              bytes, expected_size);
+        break;
+      }
+  } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 790ab02..980f3bc 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -126,6 +126,10 @@
   mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void RegisterNativeAllocation(int bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void RegisterNativeFree(int bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
   void VerifyObjectImpl(const mirror::Object* o);
   void VerifyObject(const mirror::Object* o) {
@@ -403,6 +407,7 @@
 
   void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
   void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+  bool IsGCRequestPending() const;
 
   void RecordAllocation(size_t size, mirror::Object* object)
       LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
@@ -421,6 +426,10 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void PostGcVerification(collector::GarbageCollector* gc);
 
+  // Update the watermark for the native allocated bytes based on the current number of native
+  // bytes allocated and the target utilization ratio.
+  void UpdateMaxNativeFootprint();
+
   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   // the target utilization ratio.  This should only be called immediately after a full garbage
   // collection.
@@ -498,6 +507,10 @@
   // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
   // a GC should be triggered.
   size_t max_allowed_footprint_;
+  // The watermark at which a concurrent GC is requested by registerNativeAllocation.
+  size_t native_footprint_gc_watermark_;
+  // The watermark at which a GC is performed inside of registerNativeAllocation.
+  size_t native_footprint_limit_;
 
   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   // it completes ahead of an allocation failing.
@@ -515,6 +528,9 @@
   // Number of bytes allocated.  Adjusted after each allocation and free.
   AtomicInteger num_bytes_allocated_;
 
+  // Bytes which are allocated and managed by native code but still need to be accounted for.
+  AtomicInteger native_bytes_allocated_;
+
   // Heap verification flags.
   const bool verify_missing_card_marks_;
   const bool verify_system_weaks_;
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index ce3cc93..baae8a3 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -165,6 +165,24 @@
   }
 }
 
+static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
+  ScopedObjectAccess soa(env);
+  if (bytes < 0) {
+    ThrowRuntimeException("allocation size negative %d", bytes);
+    return;
+  }
+  Runtime::Current()->GetHeap()->RegisterNativeAllocation(bytes);
+}
+
+static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
+  ScopedObjectAccess soa(env);
+  if (bytes < 0) {
+    ThrowRuntimeException("allocation size negative %d", bytes);
+    return;
+  }
+  Runtime::Current()->GetHeap()->RegisterNativeFree(bytes);
+}
+
 static void VMRuntime_trimHeap(JNIEnv*, jobject) {
   uint64_t start_ns = NanoTime();
 
@@ -210,10 +228,13 @@
   NATIVE_METHOD(VMRuntime, newNonMovableArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
   NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, setTargetSdkVersion, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
   NATIVE_METHOD(VMRuntime, startJitCompilation, "()V"),
   NATIVE_METHOD(VMRuntime, trimHeap, "()V"),
   NATIVE_METHOD(VMRuntime, vmVersion, "()Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, vmLibrary, "()Ljava/lang/String;"),
+
 };
 
 void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 4d34c73..434fcf0 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -37,6 +37,7 @@
 jclass WellKnownClasses::java_lang_reflect_Proxy;
 jclass WellKnownClasses::java_lang_RuntimeException;
 jclass WellKnownClasses::java_lang_StackOverflowError;
+jclass WellKnownClasses::java_lang_System;
 jclass WellKnownClasses::java_lang_Thread;
 jclass WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler;
 jclass WellKnownClasses::java_lang_ThreadGroup;
@@ -63,6 +64,7 @@
 jmethodID WellKnownClasses::java_lang_reflect_InvocationHandler_invoke;
 jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
 jmethodID WellKnownClasses::java_lang_Short_valueOf;
+jmethodID WellKnownClasses::java_lang_System_runFinalization = NULL;
 jmethodID WellKnownClasses::java_lang_Thread_init;
 jmethodID WellKnownClasses::java_lang_Thread_run;
 jmethodID WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException;
@@ -105,7 +107,7 @@
   return fid;
 }
 
-static jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) {
+jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) {
   jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) : env->GetMethodID(c, name, signature);
   if (mid == NULL) {
     LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature << "\"";
@@ -132,6 +134,7 @@
   java_lang_reflect_Proxy = CacheClass(env, "java/lang/reflect/Proxy");
   java_lang_RuntimeException = CacheClass(env, "java/lang/RuntimeException");
   java_lang_StackOverflowError = CacheClass(env, "java/lang/StackOverflowError");
+  java_lang_System = CacheClass(env, "java/lang/System");
   java_lang_Thread = CacheClass(env, "java/lang/Thread");
   java_lang_Thread$UncaughtExceptionHandler = CacheClass(env, "java/lang/Thread$UncaughtExceptionHandler");
   java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 8170520..6e19f86 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -29,6 +29,8 @@
 // them up. Similar to libcore's JniConstants (except there's no overlap, so
 // we keep them separate).
 
+jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature);
+
 struct WellKnownClasses {
   static void InitClasses(JNIEnv* env);
   static void Init(JNIEnv* env);  // Run before native methods are registered.
@@ -49,6 +51,7 @@
   static jclass java_lang_reflect_Proxy;
   static jclass java_lang_RuntimeException;
   static jclass java_lang_StackOverflowError;
+  static jclass java_lang_System;
   static jclass java_lang_Thread;
   static jclass java_lang_ThreadGroup;
   static jclass java_lang_Thread$UncaughtExceptionHandler;
@@ -75,6 +78,7 @@
   static jmethodID java_lang_reflect_InvocationHandler_invoke;
   static jmethodID java_lang_Runtime_nativeLoad;
   static jmethodID java_lang_Short_valueOf;
+  static jmethodID java_lang_System_runFinalization;
   static jmethodID java_lang_Thread_init;
   static jmethodID java_lang_Thread_run;
   static jmethodID java_lang_Thread$UncaughtExceptionHandler_uncaughtException;
diff --git a/test/Android.mk b/test/Android.mk
index fdb7273..a91ed3d 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -44,6 +44,7 @@
 	Main \
 	HelloWorld \
 	\
+	NativeAllocations \
 	ParallelGC \
 	ReferenceMap \
 	StackWalk \
diff --git a/test/NativeAllocations/NativeAllocations.java b/test/NativeAllocations/NativeAllocations.java
new file mode 100644
index 0000000..9423b91
--- /dev/null
+++ b/test/NativeAllocations/NativeAllocations.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.*;
+
+class NativeAllocations {
+    static Object nativeLock = new Object();
+    static int nativeBytes = 0;
+    static Object runtime;
+    static Method register_native_allocation;
+    static Method register_native_free;
+    static int maxMem = 64 * 1024 * 1024;
+
+    static class NativeAllocation {
+        private int bytes;
+
+        NativeAllocation(int bytes) throws Exception {
+            this.bytes = bytes;
+            register_native_allocation.invoke(runtime, bytes);
+            synchronized (nativeLock) {
+                nativeBytes += bytes;
+                if (nativeBytes > maxMem) {
+                    throw new OutOfMemoryError();
+                }
+            }
+        }
+
+        protected void finalize() throws Exception {
+            synchronized (nativeLock) {
+                nativeBytes -= bytes;
+            }
+            register_native_free.invoke(runtime, bytes);
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
+        Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
+        runtime = get_runtime.invoke(null);
+        register_native_allocation = vm_runtime.getDeclaredMethod("registerNativeAllocation", Integer.TYPE);
+        register_native_free = vm_runtime.getDeclaredMethod("registerNativeFree", Integer.TYPE);
+        int count = 16;
+        int size = 512 * 0x400;
+        int allocation_count = 256;
+        NativeAllocation[] allocations = new NativeAllocation[count];
+        for (int i = 0; i < allocation_count; ++i) {
+            allocations[i % count] = new NativeAllocation(size);
+        }
+        System.out.println("Test complete");
+    }
+}
+