Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.

Fix associated errors about unused paramenters and implict sign conversions.
For sign conversion this was largely in the area of enums, so add ostream
operators for the effected enums and fix tools/generate-operator-out.py.
Tidy arena allocation code and arena allocated data types, rather than fixing
new and delete operators.
Remove dead code.

Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h
index cab3427..9cad933 100644
--- a/compiler/dex/backend.h
+++ b/compiler/dex/backend.h
@@ -38,7 +38,7 @@
 
     /*
      * Return the number of reservable vector registers supported
-     * @param long_or_fp ‘true’ if floating point computations will be
+     * @param long_or_fp, true if floating point computations will be
      * executed or the operations will be long type while vector
      * registers are reserved.
      * @return the number of vector registers that are available
@@ -46,7 +46,10 @@
      * are held back to generate scalar code without exhausting vector
      * registers, if scalar code also uses the vector registers.
      */
-    virtual int NumReservableVectorRegisters(bool long_or_fp) { return 0; }
+    virtual int NumReservableVectorRegisters(bool long_or_fp) {
+      UNUSED(long_or_fp);
+      return 0;
+    }
 
   protected:
     explicit Backend(ArenaAllocator* arena) : arena_(arena) {}
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 0b76999..1297ba9 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -28,6 +28,7 @@
   kRefReg,
   kAnyReg,
 };
+std::ostream& operator<<(std::ostream& os, const RegisterClass& rhs);
 
 enum BitsUsed {
   kSize32Bits,
@@ -82,6 +83,7 @@
   kLocCompilerTemp,
   kLocInvalid
 };
+std::ostream& operator<<(std::ostream& os, const RegLocationType& rhs);
 
 enum BBType {
   kNullBlock,
@@ -91,6 +93,7 @@
   kExceptionHandling,
   kDead,
 };
+std::ostream& operator<<(std::ostream& os, const BBType& code);
 
 // Shared pseudo opcodes - must be < 0.
 enum LIRPseudoOpcode {
@@ -111,6 +114,7 @@
   kPseudoEHBlockLabel = -2,
   kPseudoNormalBlockLabel = -1,
 };
+std::ostream& operator<<(std::ostream& os, const LIRPseudoOpcode& rhs);
 
 enum ExtendedMIROpcode {
   kMirOpFirst = kNumPackedOpcodes,
@@ -334,6 +338,7 @@
   kPackedSwitch,
   kSparseSwitch,
 };
+std::ostream& operator<<(std::ostream& os, const BlockListType& rhs);
 
 enum AssemblerStatus {
   kSuccess,
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f9a05c2..205a521 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -282,10 +282,11 @@
 }  // namespace art
 
 extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item,
-                  uint32_t access_flags, art::InvokeType invoke_type,
-                  uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
-                  const art::DexFile& dex_file,
-                  art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+                              uint32_t access_flags, art::InvokeType invoke_type,
+                              uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
+                              const art::DexFile& dex_file,
+                              art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+  UNUSED(invoke_type);
   if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
     art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
                                  dex_file, code_item, class_def_idx, method_idx, access_flags,
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index a4a7602..72d1112 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -19,14 +19,14 @@
 
 #include "base/macros.h"
 #include "compiler_internals.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
 
 namespace art {
 
 class LocalValueNumbering;
 class MirFieldInfo;
 
-class GlobalValueNumbering {
+class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
  public:
   enum Mode {
     kModeGvn,
@@ -55,33 +55,17 @@
   }
 
   // Allow modifications.
-  void StartPostProcessing() {
-    DCHECK(Good());
-    DCHECK_EQ(mode_, kModeGvn);
-    mode_ = kModeGvnPostProcessing;
-  }
+  void StartPostProcessing();
 
   bool CanModify() const {
     return modifications_allowed_ && Good();
   }
 
-  // GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
-  static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
-    return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
-  }
-
-  // Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
-  static void operator delete(void* ptr) { UNUSED(ptr); }
-
  private:
   static constexpr uint16_t kNoValue = 0xffffu;
 
   // Allocate a new value name.
-  uint16_t NewValueName() {
-    DCHECK_NE(mode_, kModeGvnPostProcessing);
-    ++last_value_;
-    return last_value_;
-  }
+  uint16_t NewValueName();
 
   // Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
   typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
@@ -228,7 +212,7 @@
   }
 
   CompilationUnit* const cu_;
-  MIRGraph* mir_graph_;
+  MIRGraph* const mir_graph_;
   ScopedArenaAllocator* const allocator_;
 
   // The maximum number of nested loops that we accept for GVN.
@@ -270,6 +254,19 @@
 
   DISALLOW_COPY_AND_ASSIGN(GlobalValueNumbering);
 };
+std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
+
+inline  void GlobalValueNumbering::StartPostProcessing() {
+  DCHECK(Good());
+  DCHECK_EQ(mode_, kModeGvn);
+  mode_ = kModeGvnPostProcessing;
+}
+
+inline uint16_t GlobalValueNumbering::NewValueName() {
+  DCHECK_NE(mode_, kModeGvnPostProcessing);
+  ++last_value_;
+  return last_value_;
+}
 
 }  // namespace art
 
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a171d7c..a7d9353 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -107,7 +107,8 @@
 
 class LocalValueNumbering::NonAliasingArrayVersions {
  public:
-  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn,
+                                     const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
                                      uint16_t array) {
     return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
   }
@@ -129,8 +130,9 @@
         gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
   }
 
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                uint16_t array) {
+  static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+                                const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
+                                uint16_t array ATTRIBUTE_UNUSED) {
     return false;  // Not affected by global_memory_version_.
   }
 
@@ -164,8 +166,9 @@
     return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
   }
 
-  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                   uint16_t type, uint16_t location) {
+  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+                                   const LocalValueNumbering* lvn,
+                                   uint16_t type ATTRIBUTE_UNUSED, uint16_t location) {
     // If the location is non-aliasing in lvn, use the non-aliasing value.
     uint16_t array = gvn->GetArrayLocationBase(location);
     if (lvn->IsNonAliasingArray(array, type)) {
@@ -176,8 +179,11 @@
         gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
   }
 
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                uint16_t type) {
+  static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+                                const LocalValueNumbering* lvn,
+                                uint16_t type ATTRIBUTE_UNUSED) {
+    UNUSED(gvn);
+    UNUSED(type);
     return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
   }
 
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index dd8d2db..535fba1 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,7 @@
 
 #include "compiler_internals.h"
 #include "global_value_numbering.h"
-#include "utils/scoped_arena_allocator.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
 
 namespace art {
 
@@ -31,7 +30,7 @@
 // Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
 static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
 
-class LocalValueNumbering {
+class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
  private:
   static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
 
@@ -69,14 +68,6 @@
 
   uint16_t GetValueNumber(MIR* mir);
 
-  // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
-  static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
-    return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
-  }
-
-  // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
-  static void operator delete(void* ptr) { UNUSED(ptr); }
-
  private:
   // A set of value names.
   typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index e6a8cea..5b7ac3c 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1343,7 +1343,7 @@
  * counts explicitly used s_regs.  A later phase will add implicit
  * counts for things such as Method*, null-checked references, etc.
  */
-void MIRGraph::CountUses(struct BasicBlock* bb) {
+void MIRGraph::CountUses(class BasicBlock* bb) {
   if (bb->block_type != kDalvikByteCode) {
     return;
   }
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e0f471e..b87ab66 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -302,28 +302,28 @@
  * (by the caller)
  * Utilizes a map for fast lookup of the typical cases.
  */
-BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
+BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
                                 BasicBlock** immed_pred_block_p) {
   if (code_offset >= current_code_item_->insns_size_in_code_units_) {
-    return NULL;
+    return nullptr;
   }
 
   int block_id = dex_pc_to_block_map_[code_offset];
   BasicBlock* bb = GetBasicBlock(block_id);
 
-  if ((bb != NULL) && (bb->start_offset == code_offset)) {
+  if ((bb != nullptr) && (bb->start_offset == code_offset)) {
     // Does this containing block start with the desired instruction?
     return bb;
   }
 
   // No direct hit.
   if (!create) {
-    return NULL;
+    return nullptr;
   }
 
-  if (bb != NULL) {
+  if (bb != nullptr) {
     // The target exists somewhere in an existing block.
-    return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?  immed_pred_block_p : NULL);
+    return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?  immed_pred_block_p : nullptr);
   }
 
   // Create a new block.
@@ -360,8 +360,7 @@
     CatchHandlerIterator iterator(handlers_ptr);
     for (; iterator.HasNext(); iterator.Next()) {
       uint32_t address = iterator.GetHandlerAddress();
-      FindBlock(address, false /* split */, true /*create*/,
-                /* immed_pred_block_p */ NULL);
+      FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr);
     }
     handlers_ptr = iterator.EndDataPointer();
   }
@@ -466,7 +465,7 @@
       LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
   }
   CountBranch(target);
-  BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
+  BasicBlock* taken_block = FindBlock(target, /* create */ true,
                                       /* immed_pred_block_p */ &cur_block);
   cur_block->taken = taken_block->id;
   taken_block->predecessors.push_back(cur_block->id);
@@ -474,19 +473,6 @@
   /* Always terminate the current block for conditional branches */
   if (flags & Instruction::kContinue) {
     BasicBlock* fallthrough_block = FindBlock(cur_offset +  width,
-                                             /*
-                                              * If the method is processed
-                                              * in sequential order from the
-                                              * beginning, we don't need to
-                                              * specify split for continue
-                                              * blocks. However, this
-                                              * routine can be called by
-                                              * compileLoop, which starts
-                                              * parsing the method from an
-                                              * arbitrary address in the
-                                              * method body.
-                                              */
-                                             true,
                                              /* create */
                                              true,
                                              /* immed_pred_block_p */
@@ -494,8 +480,7 @@
     cur_block->fall_through = fallthrough_block->id;
     fallthrough_block->predecessors.push_back(cur_block->id);
   } else if (code_ptr < code_end) {
-    FindBlock(cur_offset + width, /* split */ false, /* create */ true,
-                /* immed_pred_block_p */ NULL);
+    FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
   }
   return cur_block;
 }
@@ -503,6 +488,7 @@
 /* Process instructions with the kSwitch flag */
 BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
                                        int width, int flags) {
+  UNUSED(flags);
   const uint16_t* switch_data =
       reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
   int size;
@@ -554,8 +540,8 @@
   cur_block->successor_blocks.reserve(size);
 
   for (i = 0; i < size; i++) {
-    BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
-                                      /* create */ true, /* immed_pred_block_p */ &cur_block);
+    BasicBlock* case_block = FindBlock(cur_offset + target_table[i],  /* create */ true,
+                                       /* immed_pred_block_p */ &cur_block);
     SuccessorBlockInfo* successor_block_info =
         static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
                                                        kArenaAllocSuccessor));
@@ -568,8 +554,8 @@
   }
 
   /* Fall-through case */
-  BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* split */ false,
-                                            /* create */ true, /* immed_pred_block_p */ NULL);
+  BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* create */ true,
+                                            /* immed_pred_block_p */ nullptr);
   cur_block->fall_through = fallthrough_block->id;
   fallthrough_block->predecessors.push_back(cur_block->id);
   return cur_block;
@@ -579,6 +565,7 @@
 BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
                                       int width, int flags, ArenaBitVector* try_block_addr,
                                       const uint16_t* code_ptr, const uint16_t* code_end) {
+  UNUSED(flags);
   bool in_try_block = try_block_addr->IsBitSet(cur_offset);
   bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
 
@@ -593,8 +580,8 @@
     }
 
     for (; iterator.HasNext(); iterator.Next()) {
-      BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
-                                         false /* creat */, NULL  /* immed_pred_block_p */);
+      BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
+                                          nullptr /* immed_pred_block_p */);
       if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
           IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
         // Don't allow monitor-exit to catch its own exception, http://b/15745363 .
@@ -629,8 +616,7 @@
     cur_block->explicit_throw = true;
     if (code_ptr < code_end) {
       // Force creation of new block following THROW via side-effect.
-      FindBlock(cur_offset + width, /* split */ false, /* create */ true,
-                /* immed_pred_block_p */ NULL);
+      FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
     }
     if (!in_try_block) {
        // Don't split a THROW that can't rethrow - we're done.
@@ -813,8 +799,7 @@
          * Create a fallthrough block for real instructions
          * (incl. NOP).
          */
-         FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
-                   /* immed_pred_block_p */ NULL);
+         FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr);
       }
     } else if (flags & Instruction::kThrow) {
       cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
@@ -837,8 +822,8 @@
       }
     }
     current_offset_ += width;
-    BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
-                                      false, /* immed_pred_block_p */ NULL);
+    BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
+                                       /* immed_pred_block_p */ nullptr);
     if (next_block) {
       /*
        * The next instruction could be the target of a previously parsed
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index fd4c473..76f68e2 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -228,7 +228,8 @@
  * The Midlevel Intermediate Representation node, which may be largely considered a
  * wrapper around a Dalvik byte code.
  */
-struct MIR {
+class MIR : public ArenaObject<kArenaAllocMIR> {
+ public:
   /*
    * TODO: remove embedded DecodedInstruction to save space, keeping only opcode.  Recover
    * additional fields on as-needed basis.  Question: how to support MIR Pseudo-ops; probably
@@ -344,16 +345,12 @@
 
   MIR* Copy(CompilationUnit *c_unit);
   MIR* Copy(MIRGraph* mir_Graph);
-
-  static void* operator new(size_t size, ArenaAllocator* arena) {
-    return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
-  }
-  static void operator delete(void* p) {}  // Nop.
 };
 
 struct SuccessorBlockInfo;
 
-struct BasicBlock {
+class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
+ public:
   BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
       : id(block_id),
         dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
@@ -457,10 +454,8 @@
   MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
   bool IsExceptionBlock() const;
 
-  static void* operator new(size_t size, ArenaAllocator* arena) {
-    return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
-  }
-  static void operator delete(void* p) {}  // Nop.
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicBlock);
 };
 
 /*
@@ -548,7 +543,7 @@
 
   /* Find existing block */
   BasicBlock* FindBlock(DexOffset code_offset) {
-    return FindBlock(code_offset, false, false, NULL);
+    return FindBlock(code_offset, false, NULL);
   }
 
   const uint16_t* GetCurrentInsns() const {
@@ -627,7 +622,7 @@
     return def_count_;
   }
 
-  ArenaAllocator* GetArena() {
+  ArenaAllocator* GetArena() const {
     return arena_;
   }
 
@@ -1135,7 +1130,7 @@
    * @brief Count the uses in the BasicBlock
    * @param bb the BasicBlock
    */
-  void CountUses(struct BasicBlock* bb);
+  void CountUses(class BasicBlock* bb);
 
   static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
   static uint64_t GetDataFlowAttributes(MIR* mir);
@@ -1208,8 +1203,7 @@
   bool ContentIsInsn(const uint16_t* code_ptr);
   BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
                          BasicBlock** immed_pred_block_p);
-  BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
-                        BasicBlock** immed_pred_block_p);
+  BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p);
   void ProcessTryCatchBlocks();
   bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
   BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
@@ -1233,7 +1227,7 @@
   void ComputeDomPostOrderTraversal(BasicBlock* bb);
   int GetSSAUseCount(int s_reg);
   bool BasicBlockOpt(BasicBlock* bb);
-  bool BuildExtendedBBList(struct BasicBlock* bb);
+  bool BuildExtendedBBList(class BasicBlock* bb);
   bool FillDefBlockMatrix(BasicBlock* bb);
   void InitializeDominationInfo(BasicBlock* bb);
   bool ComputeblockIDom(BasicBlock* bb);
@@ -1305,7 +1299,7 @@
   int method_sreg_;
   unsigned int attributes_;
   Checkstats* checkstats_;
-  ArenaAllocator* arena_;
+  ArenaAllocator* const arena_;
   int backward_branches_;
   int forward_branches_;
   size_t num_non_special_compiler_temps_;  // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 8e583cc..3604eb9 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -16,12 +16,11 @@
 
 #include "base/bit_vector-inl.h"
 #include "compiler_internals.h"
+#include "dataflow_iterator-inl.h"
 #include "global_value_numbering.h"
 #include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/global_value_numbering.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "quick/dex_file_method_inliner.h"
+#include "quick/dex_file_to_method_inliner_map.h"
 #include "stack.h"
 #include "utils/scoped_arena_containers.h"
 
@@ -660,7 +659,7 @@
 }
 
 /* Collect stats on number of checks removed */
-void MIRGraph::CountChecks(struct BasicBlock* bb) {
+void MIRGraph::CountChecks(class BasicBlock* bb) {
   if (bb->data_flow_info != NULL) {
     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
       if (mir->ssa_rep == NULL) {
@@ -750,7 +749,7 @@
 }
 
 /* Combine any basic blocks terminated by instructions that we now know can't throw */
-void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
+void MIRGraph::CombineBlocks(class BasicBlock* bb) {
   // Loop here to allow combining a sequence of blocks
   while ((bb->block_type == kDalvikByteCode) &&
       (bb->last_mir_insn != nullptr) &&
@@ -1510,7 +1509,7 @@
   }
 }
 
-bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
+bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
   if (bb->visited) return false;
   if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
       || (bb->block_type == kExitBlock))) {
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index e349eed..d3e54a0 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -19,11 +19,13 @@
 
 #include <string>
 
-#include "base/macros.h"
+#include "compiler_ir.h"
+#include "base/logging.h"
+
 namespace art {
 
 // Forward declarations.
-struct BasicBlock;
+class BasicBlock;
 struct CompilationUnit;
 class Pass;
 
@@ -81,15 +83,10 @@
    * @param data the object containing data necessary for the pass.
    * @return whether or not there is a change when walking the BasicBlock
    */
-  virtual bool Worker(PassDataHolder* data) const {
-    // Unused parameter.
-    UNUSED(data);
-
+  virtual bool Worker(PassDataHolder* data ATTRIBUTE_UNUSED) const {
     // Passes that do all their work in Start() or End() should not allow useless node iteration.
-    DCHECK(false) << "Unsupported default Worker() used for " << GetName();
-
-    // BasicBlock did not change.
-    return false;
+    LOG(FATAL) << "Unsupported default Worker() used for " << GetName();
+    UNREACHABLE();
   }
 
   static void BasePrintMessage(CompilationUnit* c_unit, const char* pass_name, const char* message, ...) {
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index 2f3c8b2..d0b450a 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -23,7 +23,7 @@
 namespace art {
 
 // Forward declarations.
-struct BasicBlock;
+class BasicBlock;
 struct CompilationUnit;
 class Pass;
 
@@ -32,10 +32,11 @@
  * @details Each enum should be a power of 2 to be correctly used.
  */
 enum OptimizationFlag {
-  kOptimizationBasicBlockChange = 1,  /**< @brief Has there been a change to a BasicBlock? */
-  kOptimizationDefUsesChange = 2,     /**< @brief Has there been a change to a def-use? */
-  kLoopStructureChange = 4,           /**< @brief Has there been a loop structural change? */
+  kOptimizationBasicBlockChange = 1,  /// @brief Has there been a change to a BasicBlock?
+  kOptimizationDefUsesChange = 2,     /// @brief Has there been a change to a def-use?
+  kLoopStructureChange = 4,           /// @brief Has there been a loop structural change?
 };
+std::ostream& operator<<(std::ostream& os, const OptimizationFlag& rhs);
 
 // Data holder class.
 class PassMEDataHolder: public PassDataHolder {
@@ -47,24 +48,25 @@
 };
 
 enum DataFlowAnalysisMode {
-  kAllNodes = 0,                           /**< @brief All nodes. */
-  kPreOrderDFSTraversal,                   /**< @brief Depth-First-Search / Pre-Order. */
-  kRepeatingPreOrderDFSTraversal,          /**< @brief Depth-First-Search / Repeating Pre-Order. */
-  kReversePostOrderDFSTraversal,           /**< @brief Depth-First-Search / Reverse Post-Order. */
-  kRepeatingPostOrderDFSTraversal,         /**< @brief Depth-First-Search / Repeating Post-Order. */
-  kRepeatingReversePostOrderDFSTraversal,  /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
-  kPostOrderDOMTraversal,                  /**< @brief Dominator tree / Post-Order. */
-  kTopologicalSortTraversal,               /**< @brief Topological Order traversal. */
-  kLoopRepeatingTopologicalSortTraversal,  /**< @brief Loop-repeating Topological Order traversal. */
-  kNoNodes,                                /**< @brief Skip BasicBlock traversal. */
+  kAllNodes = 0,                           /// @brief All nodes.
+  kPreOrderDFSTraversal,                   /// @brief Depth-First-Search / Pre-Order.
+  kRepeatingPreOrderDFSTraversal,          /// @brief Depth-First-Search / Repeating Pre-Order.
+  kReversePostOrderDFSTraversal,           /// @brief Depth-First-Search / Reverse Post-Order.
+  kRepeatingPostOrderDFSTraversal,         /// @brief Depth-First-Search / Repeating Post-Order.
+  kRepeatingReversePostOrderDFSTraversal,  /// @brief Depth-First-Search / Repeating Reverse Post-Order.
+  kPostOrderDOMTraversal,                  /// @brief Dominator tree / Post-Order.
+  kTopologicalSortTraversal,               /// @brief Topological Order traversal.
+  kLoopRepeatingTopologicalSortTraversal,  /// @brief Loop-repeating Topological Order traversal.
+  kNoNodes,                                /// @brief Skip BasicBlock traversal.
 };
+std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& rhs);
 
 /**
  * @class Pass
  * @brief Pass is the Pass structure for the optimizations.
  * @details The following structure has the different optimization passes that we are going to do.
  */
-class PassME: public Pass {
+class PassME : public Pass {
  public:
   explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
           unsigned int flags = 0u, const char* dump = "")
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 94ae3f7..bc4f5c4 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -73,7 +73,7 @@
     std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
 };
 
-struct BasicBlock;
+class BasicBlock;
 struct CallInfo;
 struct CompilationUnit;
 struct MIR;
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 36cb7a4..b2db36d 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -109,7 +109,7 @@
   kArmRegEnd   = 48,
 };
 
-enum ArmNativeRegisterPool {
+enum ArmNativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
   r0           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
   r1           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
   r2           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
@@ -546,6 +546,7 @@
   kThumb2StrdI8,     // strd rt, rt2, [rn +-/1024].
   kArmLast,
 };
+std::ostream& operator<<(std::ostream& os, const ArmOpcode& rhs);
 
 enum ArmOpDmbOptions {
   kSY = 0xf,
@@ -577,6 +578,7 @@
   kFmtOff24,       // 24-bit Thumb2 unconditional branch encoding.
   kFmtSkip,        // Unused field, but continue to next.
 };
+std::ostream& operator<<(std::ostream& os, const ArmEncodingKind& rhs);
 
 // Struct used to define the snippet positions for each Thumb opcode.
 struct ArmEncodingMap {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 13b9bf0..b4eebb3 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -475,9 +475,9 @@
  * Bit of a hack here - in the absence of a real scheduling pass,
  * emit the next instruction in static & direct invoke sequences.
  */
-static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
                              int state, const MethodReference& target_method,
-                             uint32_t unused,
+                             uint32_t unused_idx ATTRIBUTE_UNUSED,
                              uintptr_t direct_code, uintptr_t direct_method,
                              InvokeType type) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 442c4fc..179ba02 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -196,7 +196,7 @@
     void GenSelect(BasicBlock* bb, MIR* mir);
     void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                           int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                          int dest_reg_class) OVERRIDE;
+                          RegisterClass dest_reg_class) OVERRIDE;
     bool GenMemBarrier(MemBarrierKind barrier_kind);
     void GenMonitorEnter(int opt_flags, RegLocation rl_src);
     void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -251,10 +251,10 @@
     RegStorage AllocPreservedDouble(int s_reg);
     RegStorage AllocPreservedSingle(int s_reg);
 
-    bool WideGPRsAreAliases() OVERRIDE {
+    bool WideGPRsAreAliases() const OVERRIDE {
       return false;  // Wide GPRs are formed by pairing.
     }
-    bool WideFPRsAreAliases() OVERRIDE {
+    bool WideFPRsAreAliases() const OVERRIDE {
       return false;  // Wide FPRs are formed by pairing.
     }
 
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index ce31b27..ebf1905 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -209,7 +209,8 @@
 
 void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                                   int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                  int dest_reg_class) {
+                                  RegisterClass dest_reg_class) {
+  UNUSED(dest_reg_class);
   // TODO: Generalize the IT below to accept more than one-instruction loads.
   DCHECK(InexpensiveConstantInt(true_val));
   DCHECK(InexpensiveConstantInt(false_val));
@@ -232,6 +233,7 @@
 }
 
 void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb);
   RegLocation rl_result;
   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -504,6 +506,7 @@
 // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
 bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
                                     RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(dalvik_opcode);
   if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
     return false;
   }
@@ -687,14 +690,17 @@
 }
 
 RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
-                      RegLocation rl_src2, bool is_div, int flags) {
+                                  RegLocation rl_src2, bool is_div, int flags) {
+  UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
   LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
-  return rl_dest;
+  UNREACHABLE();
 }
 
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+                                     bool is_div) {
+  UNUSED(rl_dest, rl_src1, lit, is_div);
   LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -1072,6 +1078,7 @@
 void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                RegLocation rl_result, int lit,
                                                int first_bit, int second_bit) {
+  UNUSED(lit);
   OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
                    EncodeShift(kArmLsl, second_bit - first_bit));
   if (first_bit != 0) {
@@ -1168,108 +1175,109 @@
 
 void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
                             RegLocation rl_src1, RegLocation rl_src2) {
-    /*
-     * tmp1     = src1.hi * src2.lo;  // src1.hi is no longer needed
-     * dest     = src1.lo * src2.lo;
-     * tmp1    += src1.lo * src2.hi;
-     * dest.hi += tmp1;
-     *
-     * To pull off inline multiply, we have a worst-case requirement of 7 temporary
-     * registers.  Normally for Arm, we get 5.  We can get to 6 by including
-     * lr in the temp set.  The only problematic case is all operands and result are
-     * distinct, and none have been promoted.  In that case, we can succeed by aggressively
-     * freeing operand temp registers after they are no longer needed.  All other cases
-     * can proceed normally.  We'll just punt on the case of the result having a misaligned
-     * overlap with either operand and send that case to a runtime handler.
-     */
-    RegLocation rl_result;
-    if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
-      FlushAllRegs();
-      CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
-      rl_result = GetReturnWide(kCoreReg);
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    }
-
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
-    int reg_status = 0;
-    RegStorage res_lo;
-    RegStorage res_hi;
-    bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
-        !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
-    bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
-    bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
-    // Check if rl_dest is *not* either operand and we have enough temp registers.
-    if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
-        (dest_promoted || src1_promoted || src2_promoted)) {
-      // In this case, we do not need to manually allocate temp registers for result.
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      res_lo = rl_result.reg.GetLow();
-      res_hi = rl_result.reg.GetHigh();
-    } else {
-      res_lo = AllocTemp();
-      if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
-        // In this case, we have enough temp registers to be allocated for result.
-        res_hi = AllocTemp();
-        reg_status = 1;
-      } else {
-        // In this case, all temps are now allocated.
-        // res_hi will be allocated after we can free src1_hi.
-        reg_status = 2;
-      }
-    }
-
-    // Temporarily add LR to the temp pool, and assign it to tmp1
-    MarkTemp(rs_rARM_LR);
-    FreeTemp(rs_rARM_LR);
-    RegStorage tmp1 = rs_rARM_LR;
-    LockTemp(rs_rARM_LR);
-
-    if (rl_src1.reg == rl_src2.reg) {
-      DCHECK(res_hi.Valid());
-      DCHECK(res_lo.Valid());
-      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
-      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
-              rl_src1.reg.GetLowReg());
-      OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
-    } else {
-      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
-      if (reg_status == 2) {
-        DCHECK(!res_hi.Valid());
-        DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
-        DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
-        // Will force free src1_hi, so must clobber.
-        Clobber(rl_src1.reg);
-        FreeTemp(rl_src1.reg.GetHigh());
-        res_hi = AllocTemp();
-      }
-      DCHECK(res_hi.Valid());
-      DCHECK(res_lo.Valid());
-      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
-              rl_src1.reg.GetLowReg());
-      NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
-              tmp1.GetReg());
-      NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
-      if (reg_status == 2) {
-        FreeTemp(rl_src1.reg.GetLow());
-      }
-    }
-
-    // Now, restore lr to its non-temp status.
-    FreeTemp(tmp1);
-    Clobber(rs_rARM_LR);
-    UnmarkTemp(rs_rARM_LR);
-
-    if (reg_status != 0) {
-      // We had manually allocated registers for rl_result.
-      // Now construct a RegLocation.
-      rl_result = GetReturnWide(kCoreReg);  // Just using as a template.
-      rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
-    }
-
+  UNUSED(opcode);
+  /*
+   * tmp1     = src1.hi * src2.lo;  // src1.hi is no longer needed
+   * dest     = src1.lo * src2.lo;
+   * tmp1    += src1.lo * src2.hi;
+   * dest.hi += tmp1;
+   *
+   * To pull off inline multiply, we have a worst-case requirement of 7 temporary
+   * registers.  Normally for Arm, we get 5.  We can get to 6 by including
+   * lr in the temp set.  The only problematic case is all operands and result are
+   * distinct, and none have been promoted.  In that case, we can succeed by aggressively
+   * freeing operand temp registers after they are no longer needed.  All other cases
+   * can proceed normally.  We'll just punt on the case of the result having a misaligned
+   * overlap with either operand and send that case to a runtime handler.
+   */
+  RegLocation rl_result;
+  if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
+    FlushAllRegs();
+    CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
+    rl_result = GetReturnWide(kCoreReg);
     StoreValueWide(rl_dest, rl_result);
+    return;
+  }
+
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+  int reg_status = 0;
+  RegStorage res_lo;
+  RegStorage res_hi;
+  bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
+      !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
+  bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
+  bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
+  // Check if rl_dest is *not* either operand and we have enough temp registers.
+  if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
+      (dest_promoted || src1_promoted || src2_promoted)) {
+    // In this case, we do not need to manually allocate temp registers for result.
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    res_lo = rl_result.reg.GetLow();
+    res_hi = rl_result.reg.GetHigh();
+  } else {
+    res_lo = AllocTemp();
+    if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
+      // In this case, we have enough temp registers to be allocated for result.
+      res_hi = AllocTemp();
+      reg_status = 1;
+    } else {
+      // In this case, all temps are now allocated.
+      // res_hi will be allocated after we can free src1_hi.
+      reg_status = 2;
+    }
+  }
+
+  // Temporarily add LR to the temp pool, and assign it to tmp1
+  MarkTemp(rs_rARM_LR);
+  FreeTemp(rs_rARM_LR);
+  RegStorage tmp1 = rs_rARM_LR;
+  LockTemp(rs_rARM_LR);
+
+  if (rl_src1.reg == rl_src2.reg) {
+    DCHECK(res_hi.Valid());
+    DCHECK(res_lo.Valid());
+    NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+    NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
+            rl_src1.reg.GetLowReg());
+    OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+  } else {
+    NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+    if (reg_status == 2) {
+      DCHECK(!res_hi.Valid());
+      DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+      DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+      // Will force free src1_hi, so must clobber.
+      Clobber(rl_src1.reg);
+      FreeTemp(rl_src1.reg.GetHigh());
+      res_hi = AllocTemp();
+    }
+    DCHECK(res_hi.Valid());
+    DCHECK(res_lo.Valid());
+    NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
+            rl_src1.reg.GetLowReg());
+    NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
+            tmp1.GetReg());
+    NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
+    if (reg_status == 2) {
+      FreeTemp(rl_src1.reg.GetLow());
+    }
+  }
+
+  // Now, restore lr to its non-temp status.
+  FreeTemp(tmp1);
+  Clobber(rs_rARM_LR);
+  UnmarkTemp(rs_rARM_LR);
+
+  if (reg_status != 0) {
+    // We had manually allocated registers for rl_result.
+    // Now construct a RegLocation.
+    rl_result = GetReturnWide(kCoreReg);  // Just using as a template.
+    rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
+  }
+
+  StoreValueWide(rl_dest, rl_result);
 }
 
 void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -1471,6 +1479,7 @@
 void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
                                    RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
                                    int flags) {
+  UNUSED(flags);
   rl_src = LoadValueWide(rl_src, kCoreReg);
   // Per spec, we only care about low 6 bits of shift amount.
   int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7100a28..1f26a81 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -559,11 +559,10 @@
   call_method_insns_.reserve(100);
   // Sanity check - make sure encoding map lines up.
   for (int i = 0; i < kArmLast; i++) {
-    if (ArmMir2Lir::EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
-                 << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
-    }
+    DCHECK_EQ(ArmMir2Lir::EncodingMap[i].opcode, i)
+        << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
+        << " is wrong: expecting " << i << ", seeing "
+        << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
   }
 }
 
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index ce2de65..448e80f 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -373,18 +373,21 @@
 }
 
 LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+  UNUSED(r_dest, r_base, offset, move_type);
   UNIMPLEMENTED(FATAL);
-  return nullptr;
+  UNREACHABLE();
 }
 
 LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+  UNUSED(r_base, offset, r_src, move_type);
   UNIMPLEMENTED(FATAL);
-  return nullptr;
+  UNREACHABLE();
 }
 
 LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+  UNUSED(op, cc, r_dest, r_src);
   LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -1167,11 +1170,13 @@
 }
 
 LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+  UNUSED(op, r_base, disp);
   LOG(FATAL) << "Unexpected use of OpMem for Arm";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  UNUSED(trampoline);  // The address of the trampoline is already loaded into r_tgt.
   return OpReg(op, r_tgt);
 }
 
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a87b06a..973279e 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -127,7 +127,7 @@
   R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
 
 // Registers (integer) values.
-enum A64NativeRegisterPool {
+enum A64NativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
 #  define A64_DEFINE_REGISTERS(nr) \
     rw##nr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | nr, \
     rx##nr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | nr, \
@@ -362,9 +362,10 @@
   kA64Tbz3rht,       // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
   kA64Ubfm4rrdd,     // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
   kA64Last,
-  kA64NotWide = 0,   // Flag used to select the first instruction variant.
-  kA64Wide = 0x1000  // Flag used to select the second instruction variant.
+  kA64NotWide = kA64First,  // 0 - Flag used to select the first instruction variant.
+  kA64Wide = 0x1000         // Flag used to select the second instruction variant.
 };
+std::ostream& operator<<(std::ostream& os, const A64Opcode& rhs);
 
 /*
  * The A64 instruction set provides two variants for many instructions. For example, "mov wN, wM"
@@ -414,6 +415,7 @@
   kFmtExtend,     // Register extend, 9-bit at [23..21, 15..10].
   kFmtSkip,       // Unused field, but continue to next.
 };
+std::ostream& operator<<(std::ostream& os, const A64EncodingKind & rhs);
 
 // Struct used to define the snippet positions for each A64 opcode.
 struct A64EncodingMap {
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index c898e2d..a9a58a3 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -401,6 +401,7 @@
 }
 
 static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
+  UNUSED(cu, target_method);
   // Always emit relative calls.
   return true;
 }
@@ -411,9 +412,10 @@
  */
 static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
                                int state, const MethodReference& target_method,
-                               uint32_t unused,
+                               uint32_t unused_idx,
                                uintptr_t direct_code, uintptr_t direct_method,
                                InvokeType type) {
+  UNUSED(info, unused_idx);
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   if (direct_code != 0 && direct_method != 0) {
     switch (state) {
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 9f02606..bd363c4 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -188,7 +188,7 @@
   void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
   void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                         int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        int dest_reg_class) OVERRIDE;
+                        RegisterClass dest_reg_class) OVERRIDE;
 
   bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
   void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
@@ -249,10 +249,10 @@
                          uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
                          bool skip_this) OVERRIDE;
 
-  bool WideGPRsAreAliases() OVERRIDE {
+  bool WideGPRsAreAliases() const OVERRIDE {
     return true;  // 64b architecture.
   }
-  bool WideFPRsAreAliases() OVERRIDE {
+  bool WideFPRsAreAliases() const OVERRIDE {
     return true;  // 64b architecture.
   }
 
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 418d81e..965759b 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -32,11 +32,13 @@
 }
 
 LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
+  UNUSED(ccode, guide);
   LOG(FATAL) << "Unexpected use of OpIT for Arm64";
-  return NULL;
+  UNREACHABLE();
 }
 
 void Arm64Mir2Lir::OpEndIT(LIR* it) {
+  UNUSED(it);
   LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
 }
 
@@ -174,13 +176,14 @@
 
 void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                                     int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                    int dest_reg_class) {
+                                    RegisterClass dest_reg_class) {
   DCHECK(rs_dest.Valid());
   OpRegReg(kOpCmp, left_op, right_op);
   GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
 }
 
 void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb);
   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
   // rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -406,6 +409,7 @@
 // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
 bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
                                       RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(dalvik_opcode);
   if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
     return false;
   }
@@ -450,6 +454,7 @@
 
 bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
                                         RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
+  UNUSED(dalvik_opcode);
   if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
     return false;
   }
@@ -590,13 +595,16 @@
 }
 
 bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(rl_src, rl_dest, lit);
   LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
-  return false;
+  UNREACHABLE();
 }
 
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+                                       bool is_div) {
+  UNUSED(rl_dest, rl_src1, lit, is_div);
   LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -615,8 +623,9 @@
 
 RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
                                     RegLocation rl_src2, bool is_div, int flags) {
+  UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
   LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
@@ -929,25 +938,27 @@
 }
 
 LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
-  return NULL;
+  UNREACHABLE();
 }
 
 void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
-                                               RegLocation rl_result, int lit,
-                                               int first_bit, int second_bit) {
+                                                 RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
+                                                 int first_bit, int second_bit) {
   OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
   if (first_bit != 0) {
     OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
   }
 }
 
-void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
   LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
 }
 
@@ -1311,7 +1322,7 @@
 
 void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
                                      RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
-                                     int flags) {
+                                     int flags ATTRIBUTE_UNUSED) {
   OpKind op = kOpBkpt;
   // Per spec, we only care about low 6 bits of shift amount.
   int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
@@ -1467,8 +1478,8 @@
   }
 }
 
-static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
-                           uint32_t fp_reg_mask, int frame_size) {
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+                           int frame_size) {
   m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
 
   int core_count = POPCOUNT(core_reg_mask);
@@ -1490,7 +1501,7 @@
 }
 
 static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
-                               uint32_t fp_reg_mask, int frame_size) {
+                               uint32_t fp_reg_mask) {
   // Otherwise, spill both core and fp regs at the same time.
   // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
   // down. From then on, we fill upwards. This will generate overall the same number of instructions
@@ -1613,9 +1624,9 @@
   // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
   // number of fp spills.
   if ((RoundUp(frame_size, 8) / 8 <= 63)) {
-    return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+    return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
   } else {
-    return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+    return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
   }
 }
 
@@ -1653,6 +1664,7 @@
 
 void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
                                int frame_size) {
+  DCHECK(base == rs_sp);
   // Restore saves and drop stack frame.
   // 2 versions:
   //
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index ba47883..094ff51 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -589,11 +589,10 @@
       call_method_insns_(arena->Adapter()) {
   // Sanity check - make sure encoding map lines up.
   for (int i = 0; i < kA64Last; i++) {
-    if (UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode) != i) {
-      LOG(FATAL) << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
-                 << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
-    }
+    DCHECK_EQ(UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode), i)
+        << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
+        << " is wrong: expecting " << i << ", seeing "
+        << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
   }
 }
 
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 6985b73..47ccc46 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -306,7 +306,7 @@
 // algorithm will give it a low priority for promotion, even when it is referenced many times in
 // the code.
 
-bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
   // A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
   // We therefore return true and give it a low priority for promotion.
   return true;
@@ -673,19 +673,24 @@
   }
 }
 
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+                               MoveType move_type) {
+  UNUSED(r_dest, r_base, offset, move_type);
   UNIMPLEMENTED(FATAL);
-  return nullptr;
+  UNREACHABLE();
 }
 
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
+                               MoveType move_type) {
+  UNUSED(r_base, offset, r_src, move_type);
   UNIMPLEMENTED(FATAL);
   return nullptr;
 }
 
 LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+  UNUSED(op, cc, r_dest, r_src);
   LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -1386,16 +1391,20 @@
 }
 
 LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+  UNUSED(r_dest, r_src);
   LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+  UNUSED(op, r_base, disp);
   LOG(FATAL) << "Unexpected use of OpMem for Arm64";
-  return NULL;
+  UNREACHABLE();
 }
 
-LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt,
+                                    QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
+  // The address of the trampoline is already loaded into r_tgt.
   return OpReg(op, r_tgt);
 }
 
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 80a1ac4..4bc8967 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -105,17 +105,17 @@
 void Mir2Lir::UnlinkLIR(LIR* lir) {
   if (UNLIKELY(lir == first_lir_insn_)) {
     first_lir_insn_ = lir->next;
-    if (lir->next != NULL) {
-      lir->next->prev = NULL;
+    if (lir->next != nullptr) {
+      lir->next->prev = nullptr;
     } else {
-      DCHECK(lir->next == NULL);
+      DCHECK(lir->next == nullptr);
       DCHECK(lir == last_lir_insn_);
-      last_lir_insn_ = NULL;
+      last_lir_insn_ = nullptr;
     }
   } else if (lir == last_lir_insn_) {
     last_lir_insn_ = lir->prev;
-    lir->prev->next = NULL;
-  } else if ((lir->prev != NULL) && (lir->next != NULL)) {
+    lir->prev->next = nullptr;
+  } else if ((lir->prev != nullptr) && (lir->next != nullptr)) {
     lir->prev->next = lir->next;
     lir->next->prev = lir->prev;
   }
@@ -334,10 +334,10 @@
             << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
   DumpPromotionMap();
   UpdateLIROffsets();
-  for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
+  for (lir_insn = first_lir_insn_; lir_insn != nullptr; lir_insn = lir_insn->next) {
     DumpLIRInsn(lir_insn, 0);
   }
-  for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
+  for (lir_insn = literal_list_; lir_insn != nullptr; lir_insn = lir_insn->next) {
     LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
                               lir_insn->operands[0]);
   }
@@ -368,13 +368,13 @@
       return data_target;
     data_target = data_target->next;
   }
-  return NULL;
+  return nullptr;
 }
 
 /* Search the existing constants in the literal pool for an exact wide match */
 LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
   bool lo_match = false;
-  LIR* lo_target = NULL;
+  LIR* lo_target = nullptr;
   while (data_target) {
     if (lo_match && (data_target->operands[0] == val_hi)) {
       // Record high word in case we need to expand this later.
@@ -388,7 +388,7 @@
     }
     data_target = data_target->next;
   }
-  return NULL;
+  return nullptr;
 }
 
 /* Search the existing constants in the literal pool for an exact method match */
@@ -431,7 +431,7 @@
     estimated_native_code_size_ += sizeof(value);
     return new_value;
   }
-  return NULL;
+  return nullptr;
 }
 
 /* Add a 64-bit constant to the constant pool or mixed with code */
@@ -469,14 +469,14 @@
 void Mir2Lir::InstallLiteralPools() {
   AlignBuffer(code_buffer_, data_offset_);
   LIR* data_lir = literal_list_;
-  while (data_lir != NULL) {
+  while (data_lir != nullptr) {
     Push32(code_buffer_, data_lir->operands[0]);
     data_lir = NEXT_LIR(data_lir);
   }
   // TODO: patches_.reserve() as needed.
   // Push code and method literals, record offsets for the compiler to patch.
   data_lir = code_literal_list_;
-  while (data_lir != NULL) {
+  while (data_lir != nullptr) {
     uint32_t target_method_idx = data_lir->operands[0];
     const DexFile* target_dex_file =
         reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -486,7 +486,7 @@
     data_lir = NEXT_LIR(data_lir);
   }
   data_lir = method_literal_list_;
-  while (data_lir != NULL) {
+  while (data_lir != nullptr) {
     uint32_t target_method_idx = data_lir->operands[0];
     const DexFile* target_dex_file =
         reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -497,7 +497,7 @@
   }
   // Push class literals.
   data_lir = class_literal_list_;
-  while (data_lir != NULL) {
+  while (data_lir != nullptr) {
     uint32_t target_type_idx = data_lir->operands[0];
     const DexFile* class_dex_file =
       reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -577,7 +577,7 @@
 }
 
 static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
-  for (; lir != NULL; lir = lir->next) {
+  for (; lir != nullptr; lir = lir->next) {
     lir->offset = offset;
     offset += 4;
   }
@@ -588,7 +588,7 @@
                                             unsigned int element_size) {
   // Align to natural pointer size.
   offset = RoundUp(offset, element_size);
-  for (; lir != NULL; lir = lir->next) {
+  for (; lir != nullptr; lir = lir->next) {
     lir->offset = offset;
     offset += element_size;
   }
@@ -642,7 +642,7 @@
   uint32_t dex2pc_entries = 0u;
   uint32_t dex2pc_offset = 0u;
   uint32_t dex2pc_dalvik_offset = 0u;
-  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
     pc2dex_src_entries++;
     if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
       pc2dex_entries += 1;
@@ -682,7 +682,7 @@
   pc2dex_dalvik_offset = 0u;
   dex2pc_offset = 0u;
   dex2pc_dalvik_offset = 0u;
-  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
     if (generate_src_map && !tgt_lir->flags.is_nop) {
       src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
               static_cast<int32_t>(tgt_lir->dalvik_offset)}));
@@ -717,7 +717,7 @@
     CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
     auto it = table.PcToDexBegin();
     auto it2 = table.DexToPcBegin();
-    for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+    for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
       if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
         CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
         CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
@@ -758,7 +758,7 @@
     uint32_t native_offset = it.NativePcOffset();
     uint32_t dex_pc = it.DexPc();
     const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
-    CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
+    CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
         ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
     native_gc_map_builder.AddEntry(native_offset, references);
   }
@@ -904,6 +904,7 @@
 
 /* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
 void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+  UNUSED(offset);
   // NOTE: only used for debug listings.
   NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
 }
@@ -925,7 +926,7 @@
     case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
     default:
       LOG(FATAL) << "Unexpected opcode " << opcode;
-      is_taken = false;
+      UNREACHABLE();
   }
   return is_taken;
 }
@@ -941,8 +942,8 @@
     case kCondLe: res = kCondGe; break;
     case kCondGe: res = kCondLe; break;
     default:
-      res = static_cast<ConditionCode>(0);
       LOG(FATAL) << "Unexpected ccode " << before;
+      UNREACHABLE();
   }
   return res;
 }
@@ -957,8 +958,8 @@
     case kCondLe: res = kCondGt; break;
     case kCondGe: res = kCondLt; break;
     default:
-      res = static_cast<ConditionCode>(0);
       LOG(FATAL) << "Unexpected ccode " << before;
+      UNREACHABLE();
   }
   return res;
 }
@@ -966,11 +967,11 @@
 // TODO: move to mir_to_lir.cc
 Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
     : Backend(arena),
-      literal_list_(NULL),
-      method_literal_list_(NULL),
-      class_literal_list_(NULL),
-      code_literal_list_(NULL),
-      first_fixup_(NULL),
+      literal_list_(nullptr),
+      method_literal_list_(nullptr),
+      class_literal_list_(nullptr),
+      code_literal_list_(nullptr),
+      first_fixup_(nullptr),
       cu_(cu),
       mir_graph_(mir_graph),
       switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
@@ -980,8 +981,8 @@
       pointer_storage_(arena->Adapter()),
       data_offset_(0),
       total_size_(0),
-      block_label_list_(NULL),
-      promotion_map_(NULL),
+      block_label_list_(nullptr),
+      promotion_map_(nullptr),
       current_dalvik_offset_(0),
       estimated_native_code_size_(0),
       reg_pool_(nullptr),
@@ -994,8 +995,8 @@
       frame_size_(0),
       core_spill_mask_(0),
       fp_spill_mask_(0),
-      first_lir_insn_(NULL),
-      last_lir_insn_(NULL),
+      first_lir_insn_(nullptr),
+      last_lir_insn_(nullptr),
       slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
       mem_ref_type_(ResourceMask::kHeapRef),
       mask_cache_(arena) {
@@ -1005,8 +1006,8 @@
   reginfo_map_.reserve(RegStorage::kMaxRegs);
   pointer_storage_.reserve(128);
   slow_paths_.reserve(32);
-  // Reserve pointer id 0 for NULL.
-  size_t null_idx = WrapPointer(NULL);
+  // Reserve pointer id 0 for nullptr.
+  size_t null_idx = WrapPointer(nullptr);
   DCHECK_EQ(null_idx, 0U);
 }
 
@@ -1126,14 +1127,14 @@
  * unit
  */
 void Mir2Lir::AppendLIR(LIR* lir) {
-  if (first_lir_insn_ == NULL) {
-    DCHECK(last_lir_insn_ == NULL);
+  if (first_lir_insn_ == nullptr) {
+    DCHECK(last_lir_insn_ == nullptr);
     last_lir_insn_ = first_lir_insn_ = lir;
-    lir->prev = lir->next = NULL;
+    lir->prev = lir->next = nullptr;
   } else {
     last_lir_insn_->next = lir;
     lir->prev = last_lir_insn_;
-    lir->next = NULL;
+    lir->next = nullptr;
     last_lir_insn_ = lir;
   }
 }
@@ -1145,7 +1146,7 @@
  * prev_lir <-> new_lir <-> current_lir
  */
 void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
-  DCHECK(current_lir->prev != NULL);
+  DCHECK(current_lir->prev != nullptr);
   LIR *prev_lir = current_lir->prev;
 
   prev_lir->next = new_lir;
@@ -1216,7 +1217,7 @@
 void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
                               SpecialTargetRegister symbolic_reg) {
   LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
     data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
     // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1233,7 +1234,7 @@
 void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
                                 SpecialTargetRegister symbolic_reg) {
   LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
     data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
     // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1291,7 +1292,9 @@
 }
 
 void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
   LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
+  UNREACHABLE();
 }
 
 }  // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0f1d765..453a7f0 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -101,7 +101,7 @@
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
                SystemArrayCopyCharArray_must_be_static);
 
-MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
+MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
   MIR* insn = mir_graph->NewMIR();
   insn->offset = invoke->offset;
   insn->optimization_flags = MIR_CALLEE;
@@ -555,11 +555,11 @@
       break;
     case kInlineOpIGet:
       move_result = mir_graph->FindMoveResult(bb, invoke);
-      result = GenInlineIGet(mir_graph, bb, invoke, move_result, method, method_idx);
+      result = GenInlineIGet(mir_graph, bb, invoke, move_result, method);
       break;
     case kInlineOpIPut:
       move_result = mir_graph->FindMoveResult(bb, invoke);
-      result = GenInlineIPut(mir_graph, bb, invoke, move_result, method, method_idx);
+      result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
       break;
     default:
       LOG(FATAL) << "Unexpected inline op: " << method.opcode;
@@ -737,7 +737,7 @@
              method.d.data == 0u));
 
   // Insert the CONST instruction.
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
   insn->dalvikInsn.opcode = Instruction::CONST;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
   insn->dalvikInsn.vB = method.d.data;
@@ -775,7 +775,7 @@
   }
 
   // Insert the move instruction
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
   insn->dalvikInsn.opcode = opcode;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
   insn->dalvikInsn.vB = arg;
@@ -784,8 +784,7 @@
 }
 
 bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                         MIR* move_result, const InlineMethod& method,
-                                         uint32_t method_idx) {
+                                         MIR* move_result, const InlineMethod& method) {
   CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
   if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
     return false;
@@ -819,7 +818,7 @@
     invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
   }
 
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
   insn->offset = invoke->offset;
   insn->dalvikInsn.opcode = opcode;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
@@ -836,8 +835,7 @@
 }
 
 bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                         MIR* move_result, const InlineMethod& method,
-                                         uint32_t method_idx) {
+                                         MIR* move_result, const InlineMethod& method) {
   CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
   if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
     return false;
@@ -881,7 +879,7 @@
     invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
   }
 
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
   insn->dalvikInsn.opcode = opcode;
   insn->dalvikInsn.vA = src_reg;
   insn->dalvikInsn.vB = object_reg;
@@ -895,7 +893,7 @@
   bb->InsertMIRAfter(invoke, insn);
 
   if (move_result != nullptr) {
-    MIR* move = AllocReplacementMIR(mir_graph, invoke, move_result);
+    MIR* move = AllocReplacementMIR(mir_graph, invoke);
     move->offset = move_result->offset;
     if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
       move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 30a2d90..cb521da 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,9 +31,9 @@
 class MethodVerifier;
 }  // namespace verifier
 
-struct BasicBlock;
+class BasicBlock;
 struct CallInfo;
-struct MIR;
+class MIR;
 class MIRGraph;
 class Mir2Lir;
 
@@ -315,9 +315,9 @@
     static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
                                    MIR* move_result, const InlineMethod& method);
     static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                              MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+                              MIR* move_result, const InlineMethod& method);
     static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                              MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+                              MIR* move_result, const InlineMethod& method);
 
     ReaderWriterMutex lock_;
     /*
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 215257b..9410f7e 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -212,8 +212,7 @@
 }
 
 void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
-                                  RegLocation rl_src2, LIR* taken,
-                                  LIR* fall_through) {
+                                  RegLocation rl_src2, LIR* taken) {
   ConditionCode cond;
   RegisterClass reg_class = (rl_src1.ref || rl_src2.ref) ? kRefReg : kCoreReg;
   switch (opcode) {
@@ -276,8 +275,7 @@
   OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
 }
 
-void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
-                                      LIR* fall_through) {
+void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken) {
   ConditionCode cond;
   RegisterClass reg_class = rl_src.ref ? kRefReg : kCoreReg;
   rl_src = LoadValue(rl_src, reg_class);
@@ -2134,12 +2132,14 @@
 
 /* Call out to helper assembly routine that will null check obj and then lock it. */
 void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
+  UNUSED(opt_flags);  // TODO: avoid null check with specialized non-null helper.
   FlushAllRegs();
   CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
 }
 
 /* Call out to helper assembly routine that will null check obj and then unlock it. */
 void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
+  UNUSED(opt_flags);  // TODO: avoid null check with specialized non-null helper.
   FlushAllRegs();
   CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
 }
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index bc4d00b..c7449c8 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -473,8 +473,7 @@
   cg->MarkPossibleNullPointerException(info->opt_flags);
 }
 
-static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
-                                                       const RegStorage* alt_from,
+static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
                                                        const CompilationUnit* cu, Mir2Lir* cg) {
   if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
     // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
@@ -492,9 +491,10 @@
  */
 static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
                           int state, const MethodReference& target_method,
-                          uint32_t unused,
+                          uint32_t,
                           uintptr_t direct_code, uintptr_t direct_method,
                           InvokeType type) {
+  UNUSED(info);
   DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 &&
          cu->instruction_set != kThumb2 && cu->instruction_set != kArm &&
          cu->instruction_set != kArm64);
@@ -547,7 +547,7 @@
       break;
     case 3:  // Grab the code from the method*
       if (direct_code == 0) {
-        if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
+        if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) {
           break;                                    // kInvokeTgt := arg0_ref->entrypoint
         }
       } else {
@@ -571,8 +571,9 @@
  */
 static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
                          int state, const MethodReference& target_method,
-                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
-                         InvokeType unused3) {
+                         uint32_t method_idx, uintptr_t, uintptr_t,
+                         InvokeType) {
+  UNUSED(target_method);
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   /*
    * This is the fast path in which the target virtual method is
@@ -595,7 +596,7 @@
       break;
     }
     case 3:
-      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
         break;                                    // kInvokeTgt := kArg0->entrypoint
       }
       DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -614,8 +615,7 @@
  */
 static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
                                  const MethodReference& target_method,
-                                 uint32_t method_idx, uintptr_t unused,
-                                 uintptr_t direct_method, InvokeType unused2) {
+                                 uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
 
   switch (state) {
@@ -641,7 +641,7 @@
       break;
     }
     case 4:
-      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
         break;                                    // kInvokeTgt := kArg0->entrypoint
       }
       DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -655,9 +655,9 @@
 static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
                             QuickEntrypointEnum trampoline, int state,
                             const MethodReference& target_method, uint32_t method_idx) {
+  UNUSED(info, method_idx);
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
 
-
   /*
    * This handles the case in which the base method is not fully
    * resolved at compile time, we bail to a runtime helper.
@@ -684,32 +684,28 @@
 static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
                                 int state,
                                 const MethodReference& target_method,
-                                uint32_t unused, uintptr_t unused2,
-                                uintptr_t unused3, InvokeType unused4) {
+                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
   return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
                           target_method, 0);
 }
 
 static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                                 const MethodReference& target_method,
-                                uint32_t unused, uintptr_t unused2,
-                                uintptr_t unused3, InvokeType unused4) {
+                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
   return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
                           target_method, 0);
 }
 
 static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                                const MethodReference& target_method,
-                               uint32_t unused, uintptr_t unused2,
-                               uintptr_t unused3, InvokeType unused4) {
+                               uint32_t, uintptr_t, uintptr_t, InvokeType) {
   return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
                           target_method, 0);
 }
 
 static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                            const MethodReference& target_method,
-                           uint32_t unused, uintptr_t unused2,
-                           uintptr_t unused3, InvokeType unused4) {
+                           uint32_t, uintptr_t, uintptr_t, InvokeType) {
   return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
                           target_method, 0);
 }
@@ -717,8 +713,7 @@
 static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
                                                 CallInfo* info, int state,
                                                 const MethodReference& target_method,
-                                                uint32_t unused, uintptr_t unused2,
-                                                uintptr_t unused3, InvokeType unused4) {
+                                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
   return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
                           target_method, 0);
 }
@@ -1400,28 +1395,34 @@
 }
 
 bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
-  // Currently implemented only for ARM64
+  // Currently implemented only for ARM64.
+  UNUSED(info, size);
   return false;
 }
 
 bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
-  // Currently implemented only for ARM64
+  // Currently implemented only for ARM64.
+  UNUSED(info, is_min, is_double);
   return false;
 }
 
 bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+  UNUSED(info);
   return false;
 }
 
 bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+  UNUSED(info);
   return false;
 }
 
 bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+  UNUSED(info);
   return false;
 }
 
 bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+  UNUSED(info, is_double);
   return false;
 }
 
@@ -1448,6 +1449,7 @@
 }
 
 bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+  UNUSED(info);
   return false;
 }
 
@@ -1690,7 +1692,6 @@
 
   const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
   cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
-  BeginInvoke(info);
   InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
   info->type = method_info.GetSharpType();
   bool fast_path = method_info.FastPath();
@@ -1734,7 +1735,6 @@
                                 method_info.DirectCode(), method_info.DirectMethod(), original_type);
   }
   LIR* call_insn = GenCallInsn(method_info);
-  EndInvoke(info);
   MarkSafepointPC(call_insn);
 
   ClobberCallerSave();
@@ -1755,6 +1755,7 @@
 }
 
 LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
+  UNUSED(method_info);
   DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 &&
          cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm &&
          cu_->instruction_set != kArm64);
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 01d1a1e..310e1e9 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -146,12 +146,10 @@
                  kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
                  "div", "!0r,!1r", 4),
-#if __mips_isa_rev >= 2
     ENCODING_MAP(kMipsExt, 0x7c000000,
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
                  kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
                  "ext", "!0r,!1r,!2d,!3D", 4),
-#endif
     ENCODING_MAP(kMipsJal, 0x0c000000,
                  kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -240,7 +238,6 @@
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
                  "sb", "!0r,!1d(!2r)", 4),
-#if __mips_isa_rev >= 2
     ENCODING_MAP(kMipsSeb, 0x7c000420,
                  kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -249,7 +246,6 @@
                  kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "seh", "!0r,!1r", 4),
-#endif
     ENCODING_MAP(kMipsSh, 0xA4000000,
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8b5bc45..01784e2 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -24,9 +24,9 @@
 
 namespace art {
 
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
-                                 const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
   // TODO
+  UNUSED(bb, mir, special);
   return false;
 }
 
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 508d474..dc6930c 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -121,7 +121,7 @@
     void GenSelect(BasicBlock* bb, MIR* mir);
     void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                           int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                          int dest_reg_class) OVERRIDE;
+                          RegisterClass dest_reg_class) OVERRIDE;
     bool GenMemBarrier(MemBarrierKind barrier_kind);
     void GenMoveException(RegLocation rl_dest);
     void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -172,10 +172,10 @@
     bool InexpensiveConstantLong(int64_t value);
     bool InexpensiveConstantDouble(int64_t value);
 
-    bool WideGPRsAreAliases() OVERRIDE {
+    bool WideGPRsAreAliases() const OVERRIDE {
       return false;  // Wide GPRs are formed by pairing.
     }
-    bool WideFPRsAreAliases() OVERRIDE {
+    bool WideFPRsAreAliases() const OVERRIDE {
       return false;  // Wide FPRs are formed by pairing.
     }
 
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 3a4128a..4315915 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -207,8 +207,8 @@
   StoreValue(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
-                                bool gt_bias, bool is_double) {
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+  UNUSED(bb, mir, gt_bias, is_double);
   UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
 }
 
@@ -230,7 +230,8 @@
 }
 
 bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
-  // TODO: need Mips implementation
+  // TODO: need Mips implementation.
+  UNUSED(info, is_min, is_long);
   return false;
 }
 
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index baf7311..d58ddb0 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -217,7 +217,8 @@
 
 void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                                    int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                   int dest_reg_class) {
+                                   RegisterClass dest_reg_class) {
+  UNUSED(dest_reg_class);
   // Implement as a branch-over.
   // TODO: Conditional move?
   LoadConstant(rs_dest, true_val);
@@ -228,10 +229,12 @@
 }
 
 void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
   UNIMPLEMENTED(FATAL) << "Need codegen for select";
 }
 
 void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
   UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
 }
 
@@ -262,34 +265,39 @@
   return rl_result;
 }
 
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
-                      RegLocation rl_src2, bool is_div, int flags) {
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+                                   bool is_div, int flags) {
+  UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
   LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
-  return rl_dest;
+  UNREACHABLE();
 }
 
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+                                      bool is_div) {
+  UNUSED(rl_dest, rl_src1, lit, is_div);
   LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
-  DCHECK_NE(cu_->instruction_set, kThumb2);
+  UNUSED(info, is_long, is_object);
   return false;
 }
 
 bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
-  // TODO - add Mips implementation
+  UNUSED(info);
+  // TODO: add Mips implementation.
   return false;
 }
 
 bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
-  // TODO - add Mips implementation
+  UNUSED(info);
+  // TODO: add Mips implementation.
   return false;
 }
 
 bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
-  DCHECK_NE(cu_->instruction_set, kThumb2);
+  UNUSED(info);
   return false;
 }
 
@@ -325,23 +333,27 @@
 }
 
 LIR* MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+  UNUSED(reg, target);
   LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVldm for Mips";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVstm for Mips";
-  return NULL;
+  UNREACHABLE();
 }
 
 void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                 RegLocation rl_result, int lit,
                                                 int first_bit, int second_bit) {
+  UNUSED(lit);
   RegStorage t_reg = AllocTemp();
   OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
   OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -373,27 +385,31 @@
 
 bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
                                      RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
   LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
-  return false;
+  UNREACHABLE();
 }
 
 bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(rl_src, rl_dest, lit);
   LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
-  return false;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
+  UNUSED(cond, guide);
   LOG(FATAL) << "Unexpected use of OpIT in Mips";
-  return NULL;
+  UNREACHABLE();
 }
 
 void MipsMir2Lir::OpEndIT(LIR* it) {
+  UNUSED(it);
   LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
 }
 
-
 void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_src2) {
+  UNUSED(opcode);
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -416,6 +432,7 @@
 
 void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_src2) {
+  UNUSED(opcode);
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -629,6 +646,7 @@
 
 void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                     RegLocation rl_src1, RegLocation rl_shift, int flags) {
+  UNUSED(flags);
   // Default implementation is just to ignore the constant case.
   GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
 }
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 495eb16..3615916 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -142,7 +142,7 @@
 // This bit determines how the CPU access FP registers.
 #define FR_BIT   0
 
-enum MipsNativeRegisterPool {
+enum MipsNativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
   rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
   rAT   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
   rV0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
@@ -408,9 +408,7 @@
   kMipsBnez,  // bnez s,o [000101] s[25..21] [00000] o[15..0].
   kMipsBne,   // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
   kMipsDiv,   // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
-#if __mips_isa_rev >= 2
   kMipsExt,   // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
-#endif
   kMipsJal,   // jal t [000011] t[25..0].
   kMipsJalr,  // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
   kMipsJr,    // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
@@ -433,10 +431,8 @@
   kMipsOri,   // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
   kMipsPref,  // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
   kMipsSb,    // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
-#if __mips_isa_rev >= 2
   kMipsSeb,   // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
   kMipsSeh,   // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
-#endif
   kMipsSh,    // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
   kMipsSll,   // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
   kMipsSllv,  // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
@@ -481,15 +477,17 @@
   kMipsUndefined,  // undefined [011001xxxxxxxxxxxxxxxx].
   kMipsLast
 };
+std::ostream& operator<<(std::ostream& os, const MipsOpCode& rhs);
 
 // Instruction assembly field_loc kind.
 enum MipsEncodingKind {
   kFmtUnused,
-  kFmtBitBlt,    /* Bit string using end/start */
-  kFmtDfp,       /* Double FP reg */
-  kFmtSfp,       /* Single FP reg */
-  kFmtBlt5_2,    /* Same 5-bit field to 2 locations */
+  kFmtBitBlt,    // Bit string using end/start.
+  kFmtDfp,       // Double FP reg.
+  kFmtSfp,       // Single FP reg
+  kFmtBlt5_2,    // Same 5-bit field to 2 locations.
 };
+std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
 
 // Struct used to define the snippet positions for each MIPS opcode.
 struct MipsEncodingMap {
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index d3719ab..4a340ec 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -421,6 +421,7 @@
 }
 
 bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+  UNUSED(barrier_kind);
 #if ANDROID_SMP != 0
   NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
   return true;
@@ -574,11 +575,10 @@
 MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
     : Mir2Lir(cu, mir_graph, arena) {
   for (int i = 0; i < kMipsLast; i++) {
-    if (MipsMir2Lir::EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
-                 << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
-    }
+    DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
+        << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
+        << " is wrong: expecting " << i << ", seeing "
+        << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
   }
 }
 
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 7178ede..044972c 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -56,14 +56,17 @@
 }
 
 bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
+  UNUSED(value);
   return false;  // TUNING
 }
 
 bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
+  UNUSED(value);
   return false;  // TUNING
 }
 
 bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
+  UNUSED(value);
   return false;  // TUNING
 }
 
@@ -320,25 +323,28 @@
        return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
     default:
       LOG(FATAL) << "Bad case in OpRegReg";
-      break;
+      UNREACHABLE();
   }
   return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
 }
 
 LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
                               MoveType move_type) {
+  UNUSED(r_dest, r_base, offset, move_type);
   UNIMPLEMENTED(FATAL);
-  return nullptr;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+  UNUSED(r_base, offset, r_src, move_type);
   UNIMPLEMENTED(FATAL);
-  return nullptr;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+  UNUSED(op, cc, r_dest, r_src);
   LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
@@ -681,16 +687,19 @@
 }
 
 LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+  UNUSED(op, r_base, disp);
   LOG(FATAL) << "Unexpected use of OpMem for MIPS";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+  UNUSED(cc, target);
   LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  UNUSED(trampoline);  // The address of the trampoline is already loaded into r_tgt.
   return OpReg(op, r_tgt);
 }
 
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 408606d..533a677 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -647,7 +647,6 @@
     case Instruction::IF_GT:
     case Instruction::IF_LE: {
       LIR* taken = &label_list[bb->taken];
-      LIR* fall_through = &label_list[bb->fall_through];
       // Result known at compile time?
       if (rl_src[0].is_const && rl_src[1].is_const) {
         bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
@@ -664,7 +663,7 @@
              !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
           GenSuspendTest(opt_flags);
         }
-        GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
+        GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
       }
       break;
       }
@@ -676,7 +675,6 @@
     case Instruction::IF_GTZ:
     case Instruction::IF_LEZ: {
       LIR* taken = &label_list[bb->taken];
-      LIR* fall_through = &label_list[bb->fall_through];
       // Result known at compile time?
       if (rl_src[0].is_const) {
         bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
@@ -692,7 +690,7 @@
              !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
           GenSuspendTest(opt_flags);
         }
-        GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
+        GenCompareZeroAndBranch(opcode, rl_src[0], taken);
       }
       break;
       }
@@ -1377,8 +1375,9 @@
 }
 
 size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
-  UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
-  return 0;
+  UNUSED(lir);
+  UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
+  UNREACHABLE();
 }
 
 }  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f4e6dfe..ef1e7e3 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -33,6 +33,7 @@
 #include "utils/array_ref.h"
 #include "utils/arena_allocator.h"
 #include "utils/arena_containers.h"
+#include "utils/arena_object.h"
 #include "utils/stack_checks.h"
 
 namespace art {
@@ -129,11 +130,11 @@
 #define INVALID_SREG (-1)
 #endif
 
-struct BasicBlock;
+class BasicBlock;
 struct CallInfo;
 struct CompilationUnit;
 struct InlineMethod;
-struct MIR;
+class MIR;
 struct LIR;
 struct RegisterInfo;
 class DexFileMethodInliner;
@@ -501,12 +502,11 @@
     // has completed.
     //
 
-    class LIRSlowPath {
+    class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> {
      public:
       LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
                   LIR* cont = nullptr) :
         m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
-          m2l->StartSlowPath(this);
       }
       virtual ~LIRSlowPath() {}
       virtual void Compile() = 0;
@@ -694,11 +694,6 @@
     void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
     void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
 
-    virtual void StartSlowPath(LIRSlowPath* slowpath) {}
-    virtual void BeginInvoke(CallInfo* info) {}
-    virtual void EndInvoke(CallInfo* info) {}
-
-
     // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation.  No code generated.
     virtual RegLocation NarrowRegLoc(RegLocation loc);
 
@@ -822,10 +817,9 @@
     LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
     LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
     virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
-    void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
-                             RegLocation rl_src2, LIR* taken, LIR* fall_through);
-    void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
-                                 LIR* taken, LIR* fall_through);
+    void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2,
+                             LIR* taken);
+    void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
     virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
     void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
                          RegLocation rl_src);
@@ -1350,7 +1344,7 @@
      */
     virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                                   int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                  int dest_reg_class) = 0;
+                                  RegisterClass dest_reg_class) = 0;
 
     /**
      * @brief Used to generate a memory barrier in an architecture specific way.
@@ -1452,6 +1446,7 @@
     virtual bool InexpensiveConstantLong(int64_t value) = 0;
     virtual bool InexpensiveConstantDouble(int64_t value) = 0;
     virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+      UNUSED(opcode);
       return InexpensiveConstantInt(value);
     }
 
@@ -1642,12 +1637,12 @@
     /**
      * Returns true iff wide GPRs are just different views on the same physical register.
      */
-    virtual bool WideGPRsAreAliases() = 0;
+    virtual bool WideGPRsAreAliases() const = 0;
 
     /**
      * Returns true iff wide FPRs are just different views on the same physical register.
      */
-    virtual bool WideFPRsAreAliases() = 0;
+    virtual bool WideFPRsAreAliases() const = 0;
 
 
     enum class WidenessCheck {  // private
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 8f7bd30..5d49a91 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -625,6 +625,7 @@
 }
 
 Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+  UNUSED(compilation_unit);
   Mir2Lir* mir_to_lir = nullptr;
   switch (cu->instruction_set) {
     case kThumb2:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 6305b22..0a98c80 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -316,16 +316,16 @@
 
 // TODO: this is Thumb2 only.  Remove when DoPromotion refactored.
 RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
-  RegStorage res;
+  UNUSED(s_reg);
   UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
-  return res;
+  UNREACHABLE();
 }
 
 // TODO: this is Thumb2 only.  Remove when DoPromotion refactored.
 RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
-  RegStorage res;
+  UNUSED(s_reg);
   UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
-  return res;
+  UNREACHABLE();
 }
 
 
@@ -1392,6 +1392,7 @@
 }
 
 bool Mir2Lir::LiveOut(int s_reg) {
+  UNUSED(s_reg);
   // For now.
   return true;
 }
diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h
index 436cdb5..78e81b2 100644
--- a/compiler/dex/quick/resource_mask.h
+++ b/compiler/dex/quick/resource_mask.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 
 #include "base/logging.h"
+#include "base/value_object.h"
 #include "dex/reg_storage.h"
 
 namespace art {
@@ -113,10 +114,7 @@
     return (masks_[0] & other.masks_[0]) != 0u || (masks_[1] & other.masks_[1]) != 0u;
   }
 
-  void SetBit(size_t bit) {
-    DCHECK_LE(bit, kHighestCommonResource);
-    masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
-  }
+  void SetBit(size_t bit);
 
   constexpr bool HasBit(size_t bit) const {
     return (masks_[bit / 64u] & (UINT64_C(1) << (bit & 63u))) != 0u;
@@ -139,6 +137,12 @@
 
   friend class ResourceMaskCache;
 };
+std::ostream& operator<<(std::ostream& os, const ResourceMask::ResourceBit& rhs);
+
+inline void ResourceMask::SetBit(size_t bit) {
+  DCHECK_LE(bit, kHighestCommonResource);
+  masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
+}
 
 constexpr ResourceMask kEncodeNone = ResourceMask::NoBits();
 constexpr ResourceMask kEncodeAll = ResourceMask::AllBits();
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index dce2b73..85a3b32 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -547,6 +547,11 @@
   { kX86RepneScasw,    kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
 };
 
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs) {
+  os << X86Mir2Lir::EncodingMap[rhs].name;
+  return os;
+}
+
 static bool NeedsRex(int32_t raw_reg) {
   return RegStorage::RegNum(raw_reg) > 7;
 }
@@ -1631,6 +1636,7 @@
  * sequence or request that the trace be shortened and retried.
  */
 AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+  UNUSED(start_addr);
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 86efc1e..497ef94 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -290,9 +290,10 @@
  */
 static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
                              int state, const MethodReference& target_method,
-                             uint32_t unused,
+                             uint32_t,
                              uintptr_t direct_code, uintptr_t direct_method,
                              InvokeType type) {
+  UNUSED(info, direct_code);
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   if (direct_method != 0) {
     switch (state) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 7b5b831..dec99ae 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -250,7 +250,7 @@
   void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
   void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                         int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        int dest_reg_class) OVERRIDE;
+                        RegisterClass dest_reg_class) OVERRIDE;
   bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
   void GenMoveException(RegLocation rl_dest) OVERRIDE;
   void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -499,7 +499,7 @@
   void GenConstWide(RegLocation rl_dest, int64_t value);
   void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
   void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
-  void GenShiftByteVector(BasicBlock *bb, MIR *mir);
+  void GenShiftByteVector(MIR* mir);
   void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
                              uint32_t m4);
   void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
@@ -557,88 +557,80 @@
 
   /*
    * @brief Load 128 bit constant into vector register.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector
    * @note vA is the TypeSize for the register.
    * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
    */
-  void GenConst128(BasicBlock* bb, MIR* mir);
+  void GenConst128(MIR* mir);
 
   /*
    * @brief MIR to move a vectorized register to another.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination
    * @note vC: source
    */
-  void GenMoveVector(BasicBlock *bb, MIR *mir);
+  void GenMoveVector(MIR* mir);
 
   /*
    * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
    * the type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenMultiplyVector(BasicBlock *bb, MIR *mir);
+  void GenMultiplyVector(MIR* mir);
 
   /*
    * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenAddVector(BasicBlock *bb, MIR *mir);
+  void GenAddVector(MIR* mir);
 
   /*
    * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenSubtractVector(BasicBlock *bb, MIR *mir);
+  void GenSubtractVector(MIR* mir);
 
   /*
    * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: immediate
    */
-  void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
+  void GenShiftLeftVector(MIR* mir);
 
   /*
    * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
    * know the type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: immediate
    */
-  void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
+  void GenSignedShiftRightVector(MIR* mir);
 
   /*
    * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
    * to know the type of the vector.
-   * @param bb The basic block in which the MIR is from..
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: immediate
    */
-  void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
+  void GenUnsignedShiftRightVector(MIR* mir);
 
   /*
    * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
@@ -647,51 +639,47 @@
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenAndVector(BasicBlock *bb, MIR *mir);
+  void GenAndVector(MIR* mir);
 
   /*
    * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenOrVector(BasicBlock *bb, MIR *mir);
+  void GenOrVector(MIR* mir);
 
   /*
    * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenXorVector(BasicBlock *bb, MIR *mir);
+  void GenXorVector(MIR* mir);
 
   /*
    * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
    * @note vA: TypeSize
    * @note vB: destination and source VR (not vector register)
    * @note vC: source (vector register)
    */
-  void GenAddReduceVector(BasicBlock *bb, MIR *mir);
+  void GenAddReduceVector(MIR* mir);
 
   /*
    * @brief Extract a packed element into a single VR.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination VR (not vector register)
    * @note vC: source (vector register)
    * @note arg[0]: The index to use for extraction from vector register (which packed element).
    */
-  void GenReduceVector(BasicBlock *bb, MIR *mir);
+  void GenReduceVector(MIR* mir);
 
   /*
    * @brief Create a vector value, with all TypeSize values equal to vC
@@ -701,21 +689,21 @@
    * @note vB: destination vector register.
    * @note vC: source VR (not vector register).
    */
-  void GenSetVector(BasicBlock *bb, MIR *mir);
+  void GenSetVector(MIR* mir);
 
   /**
    * @brief Used to generate code for kMirOpPackedArrayGet.
    * @param bb The basic block of MIR.
    * @param mir The mir whose opcode is kMirOpPackedArrayGet.
    */
-  void GenPackedArrayGet(BasicBlock *bb, MIR *mir);
+  void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
 
   /**
    * @brief Used to generate code for kMirOpPackedArrayPut.
    * @param bb The basic block of MIR.
    * @param mir The mir whose opcode is kMirOpPackedArrayPut.
    */
-  void GenPackedArrayPut(BasicBlock *bb, MIR *mir);
+  void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Generate code for a vector opcode.
@@ -890,8 +878,8 @@
    * the value is live in a temp register of the correct class.  Additionally, if the value is in
    * a temp register of the wrong register class, it will be clobbered.
    */
-  RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
-  RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
+  RegLocation UpdateLocTyped(RegLocation loc);
+  RegLocation UpdateLocWideTyped(RegLocation loc);
 
   /*
    * @brief Analyze MIR before generating code, to prepare for the code generation.
@@ -902,7 +890,7 @@
    * @brief Analyze one basic block.
    * @param bb Basic block to analyze.
    */
-  void AnalyzeBB(BasicBlock * bb);
+  void AnalyzeBB(BasicBlock* bb);
 
   /*
    * @brief Analyze one extended MIR instruction
@@ -910,7 +898,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Extended instruction to analyze.
    */
-  void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
+  void AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Analyze one MIR instruction
@@ -918,7 +906,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Instruction to analyze.
    */
-  virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
+  virtual void AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Analyze one MIR float/double instruction
@@ -926,7 +914,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Instruction to analyze.
    */
-  virtual void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
+  virtual void AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Analyze one use of a double operand.
@@ -940,7 +928,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Instruction to analyze.
    */
-  void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
+  void AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir);
 
   // Information derived from analysis of MIR
 
@@ -987,12 +975,11 @@
    */
   LIR* AddVectorLiteral(int32_t* constants);
 
-  InToRegStorageMapping in_to_reg_storage_mapping_;
-
-  bool WideGPRsAreAliases() OVERRIDE {
+  bool WideGPRsAreAliases() const OVERRIDE {
     return cu_->target64;  // On 64b, we have 64b GPRs.
   }
-  bool WideFPRsAreAliases() OVERRIDE {
+
+  bool WideFPRsAreAliases() const OVERRIDE {
     return true;  // xmm registers have 64b views even on x86.
   }
 
@@ -1002,11 +989,17 @@
    */
   static void DumpRegLocation(RegLocation loc);
 
-  static const X86EncodingMap EncodingMap[kX86Last];
+  InToRegStorageMapping in_to_reg_storage_mapping_;
 
  private:
   void SwapBits(RegStorage result_reg, int shift, int32_t value);
   void SwapBits64(RegStorage result_reg, int shift, int64_t value);
+
+  static const X86EncodingMap EncodingMap[kX86Last];
+
+  friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
+
+  DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 21d1a5c..254d90f 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -169,8 +169,7 @@
    * If the result's location is in memory, then we do not need to do anything
    * more since the fstp has already placed the correct value in memory.
    */
-  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
-      UpdateLocTyped(rl_dest, kFPReg);
+  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
   if (rl_result.location == kLocPhysReg) {
     /*
      * We already know that the result is in a physical register but do not know if it is the
@@ -431,8 +430,7 @@
    * If the result's location is in memory, then we do not need to do anything
    * more since the fstp has already placed the correct value in memory.
    */
-  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
-      UpdateLocTyped(rl_dest, kFPReg);
+  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
   if (rl_result.location == kLocPhysReg) {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
     if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index aa1bf7f..7229318 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -208,7 +208,7 @@
 
 void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                                   int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                  int dest_reg_class) {
+                                  RegisterClass dest_reg_class) {
   DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
   DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
 
@@ -268,6 +268,7 @@
 }
 
 void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb);
   RegLocation rl_result;
   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -594,8 +595,9 @@
 }
 
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
+  UNUSED(rl_dest, reg_lo, lit, is_div);
   LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
@@ -763,12 +765,14 @@
 
 RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
                                   bool is_div) {
+  UNUSED(rl_dest, reg_lo, reg_hi, is_div);
   LOG(FATAL) << "Unexpected use of GenDivRem for x86";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
                                   RegLocation rl_src2, bool is_div, int flags) {
+  UNUSED(rl_dest);
   // We have to use fixed registers, so flush all the temps.
 
   // Prepare for explicit register usage.
@@ -1022,7 +1026,7 @@
     DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
     // In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
     if (!cu_->target64 && size == kSignedByte) {
-      rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg);
+      rl_src_value = UpdateLocTyped(rl_src_value);
       if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
         RegStorage temp = AllocateByteRegister();
         OpRegCopy(temp, rl_src_value.reg);
@@ -1309,18 +1313,21 @@
 }
 
 LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVldm for x86";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVstm for x86";
-  return NULL;
+  UNREACHABLE();
 }
 
 void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                RegLocation rl_result, int lit,
                                                int first_bit, int second_bit) {
+  UNUSED(lit);
   RegStorage t_reg = AllocTemp();
   OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
   OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1453,22 +1460,27 @@
 
 bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
                                     RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
   LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
-  return false;
+  UNREACHABLE();
 }
 
 bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(rl_src, rl_dest, lit);
   LOG(FATAL) << "Unexpected use of easyMultiply in x86";
-  return false;
+  UNREACHABLE();
 }
 
 LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+  UNUSED(cond, guide);
   LOG(FATAL) << "Unexpected use of OpIT in x86";
-  return NULL;
+  UNREACHABLE();
 }
 
 void X86Mir2Lir::OpEndIT(LIR* it) {
+  UNUSED(it);
   LOG(FATAL) << "Unexpected use of OpEndIT in x86";
+  UNREACHABLE();
 }
 
 void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
@@ -1486,6 +1498,7 @@
 }
 
 void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+  UNUSED(sreg);
   // All memory accesses below reference dalvik regs.
   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
 
@@ -1616,7 +1629,7 @@
     int32_t val_hi = High32Bits(val);
     // Prepare for explicit register usage.
     ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
-    rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+    rl_src1 = UpdateLocWideTyped(rl_src1);
     bool src1_in_reg = rl_src1.location == kLocPhysReg;
     int displacement = SRegOffset(rl_src1.s_reg_low);
 
@@ -1700,8 +1713,8 @@
 
   // Prepare for explicit register usage.
   ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
-  rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
-  rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+  rl_src1 = UpdateLocWideTyped(rl_src1);
+  rl_src2 = UpdateLocWideTyped(rl_src2);
 
   // At this point, the VRs are in their home locations.
   bool src1_in_reg = rl_src1.location == kLocPhysReg;
@@ -1837,12 +1850,12 @@
 }
 
 void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
-  rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+  rl_dest = UpdateLocWideTyped(rl_dest);
   if (rl_dest.location == kLocPhysReg) {
     // Ensure we are in a register pair
     RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
 
-    rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+    rl_src = UpdateLocWideTyped(rl_src);
     GenLongRegOrMemOp(rl_result, rl_src, op);
     StoreFinalValueWide(rl_dest, rl_result);
     return;
@@ -1850,7 +1863,7 @@
     // Handle the case when src and dest are intersect.
     rl_src = LoadValueWide(rl_src, kCoreReg);
     RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+    rl_src = UpdateLocWideTyped(rl_src);
     GenLongRegOrMemOp(rl_result, rl_src, op);
     StoreFinalValueWide(rl_dest, rl_result);
     return;
@@ -1910,7 +1923,7 @@
     rl_result = ForceTempWide(rl_result);
 
     // Perform the operation using the RHS.
-    rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+    rl_src2 = UpdateLocWideTyped(rl_src2);
     GenLongRegOrMemOp(rl_result, rl_src2, op);
 
     // And now record that the result is in the temp.
@@ -1919,10 +1932,9 @@
   }
 
   // It wasn't in registers, so it better be in memory.
-  DCHECK((rl_dest.location == kLocDalvikFrame) ||
-         (rl_dest.location == kLocCompilerTemp));
-  rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
-  rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+  DCHECK((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp));
+  rl_src1 = UpdateLocWideTyped(rl_src1);
+  rl_src2 = UpdateLocWideTyped(rl_src2);
 
   // Get one of the source operands into temporary register.
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -2088,7 +2100,7 @@
       NewLIR1(kX86Imul64DaR, numerator_reg.GetReg());
     } else {
       // Only need this once.  Multiply directly from the value.
-      rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+      rl_src = UpdateLocWideTyped(rl_src);
       if (rl_src.location != kLocPhysReg) {
         // Okay, we can do this from memory.
         ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2395,6 +2407,7 @@
 
 RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                           RegLocation rl_src, int shift_amount, int flags) {
+  UNUSED(flags);
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
   if (cu_->target64) {
     OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
@@ -2692,7 +2705,7 @@
       return in_mem ? kX86Xor32MI : kX86Xor32RI;
     default:
       LOG(FATAL) << "Unexpected opcode: " << op;
-      return kX86Add32MI;
+      UNREACHABLE();
   }
 }
 
@@ -2706,7 +2719,7 @@
       return false;
     }
 
-    rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+    rl_dest = UpdateLocWideTyped(rl_dest);
 
     if ((rl_dest.location == kLocDalvikFrame) ||
         (rl_dest.location == kLocCompilerTemp)) {
@@ -2736,7 +2749,7 @@
 
   int32_t val_lo = Low32Bits(val);
   int32_t val_hi = High32Bits(val);
-  rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+  rl_dest = UpdateLocWideTyped(rl_dest);
 
   // Can we just do this into memory?
   if ((rl_dest.location == kLocDalvikFrame) ||
@@ -2812,8 +2825,8 @@
 
   int32_t val_lo = Low32Bits(val);
   int32_t val_hi = High32Bits(val);
-  rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
-  rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+  rl_dest = UpdateLocWideTyped(rl_dest);
+  rl_src1 = UpdateLocWideTyped(rl_src1);
 
   // Can we do this directly into the destination registers?
   if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
@@ -3035,7 +3048,7 @@
 
   if (unary) {
     rl_lhs = LoadValue(rl_lhs, kCoreReg);
-    rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocTyped(rl_dest);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
     OpRegReg(op, rl_result.reg, rl_lhs.reg);
   } else {
@@ -3045,7 +3058,7 @@
       LoadValueDirectFixed(rl_rhs, t_reg);
       if (is_two_addr) {
         // Can we do this directly into memory?
-        rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+        rl_result = UpdateLocTyped(rl_dest);
         if (rl_result.location != kLocPhysReg) {
           // Okay, we can do this into memory
           OpMemReg(op, rl_result, t_reg.GetReg());
@@ -3068,12 +3081,12 @@
       // Multiply is 3 operand only (sort of).
       if (is_two_addr && op != kOpMul) {
         // Can we do this directly into memory?
-        rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+        rl_result = UpdateLocTyped(rl_dest);
         if (rl_result.location == kLocPhysReg) {
           // Ensure res is in a core reg
           rl_result = EvalLoc(rl_dest, kCoreReg, true);
           // Can we do this from memory directly?
-          rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+          rl_rhs = UpdateLocTyped(rl_rhs);
           if (rl_rhs.location != kLocPhysReg) {
             OpRegMem(op, rl_result.reg, rl_rhs);
             StoreFinalValue(rl_dest, rl_result);
@@ -3088,7 +3101,7 @@
         // It might happen rl_rhs and rl_dest are the same VR
         // in this case rl_dest is in reg after LoadValue while
         // rl_result is not updated yet, so do this
-        rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+        rl_result = UpdateLocTyped(rl_dest);
         if (rl_result.location != kLocPhysReg) {
           // Okay, we can do this into memory.
           OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
@@ -3105,8 +3118,8 @@
         }
       } else {
         // Try to use reg/memory instructions.
-        rl_lhs = UpdateLocTyped(rl_lhs, kCoreReg);
-        rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+        rl_lhs = UpdateLocTyped(rl_lhs);
+        rl_rhs = UpdateLocTyped(rl_rhs);
         // We can't optimize with FP registers.
         if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) {
           // Something is difficult, so fall back to the standard case.
@@ -3178,7 +3191,7 @@
     Mir2Lir::GenIntToLong(rl_dest, rl_src);
     return;
   }
-  rl_src = UpdateLocTyped(rl_src, kCoreReg);
+  rl_src = UpdateLocTyped(rl_src);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (rl_src.location == kLocPhysReg) {
     NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
@@ -3278,7 +3291,7 @@
   LoadValueDirectFixed(rl_shift, t_reg);
   if (is_two_addr) {
     // Can we do this directly into memory?
-    rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocWideTyped(rl_dest);
     if (rl_result.location != kLocPhysReg) {
       // Okay, we can do this into memory
       ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 79d5eeb..9616d8f 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -143,25 +143,6 @@
 
 RegStorage rs_rX86_SP;
 
-X86NativeRegisterPool rX86_ARG0;
-X86NativeRegisterPool rX86_ARG1;
-X86NativeRegisterPool rX86_ARG2;
-X86NativeRegisterPool rX86_ARG3;
-X86NativeRegisterPool rX86_ARG4;
-X86NativeRegisterPool rX86_ARG5;
-X86NativeRegisterPool rX86_FARG0;
-X86NativeRegisterPool rX86_FARG1;
-X86NativeRegisterPool rX86_FARG2;
-X86NativeRegisterPool rX86_FARG3;
-X86NativeRegisterPool rX86_FARG4;
-X86NativeRegisterPool rX86_FARG5;
-X86NativeRegisterPool rX86_FARG6;
-X86NativeRegisterPool rX86_FARG7;
-X86NativeRegisterPool rX86_RET0;
-X86NativeRegisterPool rX86_RET1;
-X86NativeRegisterPool rX86_INVOKE_TGT;
-X86NativeRegisterPool rX86_COUNT;
-
 RegStorage rs_rX86_ARG0;
 RegStorage rs_rX86_ARG1;
 RegStorage rs_rX86_ARG2;
@@ -237,8 +218,9 @@
 }
 
 RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+  UNUSED(reg);
   LOG(FATAL) << "Do not use this function!!!";
-  return RegStorage::InvalidReg();
+  UNREACHABLE();
 }
 
 /*
@@ -795,14 +777,11 @@
   class_type_address_insns_.reserve(100);
   call_method_insns_.reserve(100);
   store_method_addr_used_ = false;
-  if (kIsDebugBuild) {
     for (int i = 0; i < kX86Last; i++) {
-      if (X86Mir2Lir::EncodingMap[i].opcode != i) {
-        LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
-                   << " is wrong: expecting " << i << ", seeing "
-                   << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
-      }
-    }
+      DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
+          << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+          << " is wrong: expecting " << i << ", seeing "
+          << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
   }
   if (cu_->target64) {
     rs_rX86_SP = rs_rX86_SP_64;
@@ -821,20 +800,6 @@
     rs_rX86_FARG5 = rs_fr5;
     rs_rX86_FARG6 = rs_fr6;
     rs_rX86_FARG7 = rs_fr7;
-    rX86_ARG0 = rDI;
-    rX86_ARG1 = rSI;
-    rX86_ARG2 = rDX;
-    rX86_ARG3 = rCX;
-    rX86_ARG4 = r8;
-    rX86_ARG5 = r9;
-    rX86_FARG0 = fr0;
-    rX86_FARG1 = fr1;
-    rX86_FARG2 = fr2;
-    rX86_FARG3 = fr3;
-    rX86_FARG4 = fr4;
-    rX86_FARG5 = fr5;
-    rX86_FARG6 = fr6;
-    rX86_FARG7 = fr7;
     rs_rX86_INVOKE_TGT = rs_rDI;
   } else {
     rs_rX86_SP = rs_rX86_SP_32;
@@ -853,14 +818,6 @@
     rs_rX86_FARG5 = RegStorage::InvalidReg();
     rs_rX86_FARG6 = RegStorage::InvalidReg();
     rs_rX86_FARG7 = RegStorage::InvalidReg();
-    rX86_ARG0 = rAX;
-    rX86_ARG1 = rCX;
-    rX86_ARG2 = rDX;
-    rX86_ARG3 = rBX;
-    rX86_FARG0 = rAX;
-    rX86_FARG1 = rCX;
-    rX86_FARG2 = rDX;
-    rX86_FARG3 = rBX;
     rs_rX86_INVOKE_TGT = rs_rAX;
     // TODO(64): Initialize with invalid reg
 //    rX86_ARG4 = RegStorage::InvalidReg();
@@ -869,10 +826,6 @@
   rs_rX86_RET0 = rs_rAX;
   rs_rX86_RET1 = rs_rDX;
   rs_rX86_COUNT = rs_rCX;
-  rX86_RET0 = rAX;
-  rX86_RET1 = rDX;
-  rX86_INVOKE_TGT = rAX;
-  rX86_COUNT = rCX;
 }
 
 Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -882,8 +835,9 @@
 
 // Not used in x86(-64)
 RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+  UNUSED(trampoline);
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
-  return RegStorage::InvalidReg();
+  UNREACHABLE();
 }
 
 LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -1548,46 +1502,46 @@
       ReturnVectorRegisters(mir);
       break;
     case kMirOpConstVector:
-      GenConst128(bb, mir);
+      GenConst128(mir);
       break;
     case kMirOpMoveVector:
-      GenMoveVector(bb, mir);
+      GenMoveVector(mir);
       break;
     case kMirOpPackedMultiply:
-      GenMultiplyVector(bb, mir);
+      GenMultiplyVector(mir);
       break;
     case kMirOpPackedAddition:
-      GenAddVector(bb, mir);
+      GenAddVector(mir);
       break;
     case kMirOpPackedSubtract:
-      GenSubtractVector(bb, mir);
+      GenSubtractVector(mir);
       break;
     case kMirOpPackedShiftLeft:
-      GenShiftLeftVector(bb, mir);
+      GenShiftLeftVector(mir);
       break;
     case kMirOpPackedSignedShiftRight:
-      GenSignedShiftRightVector(bb, mir);
+      GenSignedShiftRightVector(mir);
       break;
     case kMirOpPackedUnsignedShiftRight:
-      GenUnsignedShiftRightVector(bb, mir);
+      GenUnsignedShiftRightVector(mir);
       break;
     case kMirOpPackedAnd:
-      GenAndVector(bb, mir);
+      GenAndVector(mir);
       break;
     case kMirOpPackedOr:
-      GenOrVector(bb, mir);
+      GenOrVector(mir);
       break;
     case kMirOpPackedXor:
-      GenXorVector(bb, mir);
+      GenXorVector(mir);
       break;
     case kMirOpPackedAddReduce:
-      GenAddReduceVector(bb, mir);
+      GenAddReduceVector(mir);
       break;
     case kMirOpPackedReduce:
-      GenReduceVector(bb, mir);
+      GenReduceVector(mir);
       break;
     case kMirOpPackedSet:
-      GenSetVector(bb, mir);
+      GenSetVector(mir);
       break;
     case kMirOpMemBarrier:
       GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
@@ -1638,7 +1592,7 @@
   }
 }
 
-void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
+void X86Mir2Lir::GenConst128(MIR* mir) {
   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   Clobber(rs_dest);
 
@@ -1689,7 +1643,7 @@
   load->target = data_target;
 }
 
-void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMoveVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1805,7 +1759,7 @@
   NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
 }
 
-void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1839,7 +1793,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1874,7 +1828,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSubtractVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1909,7 +1863,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
   // Destination does not need clobbered because it has already been as part
   // of the general packed shift handler (caller of this method).
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1953,7 +1907,7 @@
   AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
 }
 
-void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1973,7 +1927,7 @@
       break;
     case kSignedByte:
     case kUnsignedByte:
-      GenShiftByteVector(bb, mir);
+      GenShiftByteVector(mir);
       return;
     default:
       LOG(FATAL) << "Unsupported vector shift left " << opsize;
@@ -1982,7 +1936,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
 }
 
-void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1999,18 +1953,18 @@
       break;
     case kSignedByte:
     case kUnsignedByte:
-      GenShiftByteVector(bb, mir);
+      GenShiftByteVector(mir);
       return;
     case k64:
       // TODO Implement emulated shift algorithm.
     default:
       LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
-      break;
+      UNREACHABLE();
   }
   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
 }
 
-void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2030,7 +1984,7 @@
       break;
     case kSignedByte:
     case kUnsignedByte:
-      GenShiftByteVector(bb, mir);
+      GenShiftByteVector(mir);
       return;
     default:
       LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
@@ -2039,7 +1993,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
 }
 
-void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAndVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2048,7 +2002,7 @@
   NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenOrVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2057,7 +2011,7 @@
   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenXorVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2084,7 +2038,7 @@
   AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
 }
 
-void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
   bool is_wide = opsize == k64 || opsize == kDouble;
@@ -2219,7 +2173,7 @@
     // except the rhs is not a VR but a physical register allocated above.
     // No load of source VR is done because it assumes that rl_result will
     // share physical register / memory location.
-    rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocTyped(rl_dest);
     if (rl_result.location == kLocPhysReg) {
       // Ensure res is in a core reg.
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -2232,7 +2186,7 @@
   }
 }
 
-void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenReduceVector(MIR* mir) {
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegLocation rl_dest = mir_graph_->GetDest(mir);
   RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
@@ -2286,7 +2240,7 @@
   } else {
     int extract_index = mir->dalvikInsn.arg[0];
     int extr_opcode = 0;
-    rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocTyped(rl_dest);
 
     // Handle the rest of integral types now.
     switch (opsize) {
@@ -2302,7 +2256,7 @@
         break;
       default:
         LOG(FATAL) << "Unsupported vector reduce " << opsize;
-        return;
+        UNREACHABLE();
     }
 
     if (rl_result.location == kLocPhysReg) {
@@ -2331,7 +2285,7 @@
   }
 }
 
-void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSetVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2406,11 +2360,13 @@
   }
 }
 
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
   UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
 }
 
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
   UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
 }
 
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 8d5dabc..cb9a24a 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -54,14 +54,16 @@
 }
 
 bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
+  UNUSED(value);
   return true;
 }
 
 bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
-  return false;
+  return value == 0;
 }
 
 bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
+  UNUSED(value);
   return true;
 }
 
@@ -934,13 +936,14 @@
 
 LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
                                    int offset, int check_value, LIR* target, LIR** compare) {
-    LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
-            offset, check_value);
-    if (compare != nullptr) {
-        *compare = inst;
-    }
-    LIR* branch = OpCondBranch(cond, target);
-    return branch;
+  UNUSED(temp_reg);  // Comparison performed directly with memory.
+  LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+      offset, check_value);
+  if (compare != nullptr) {
+    *compare = inst;
+  }
+  LIR* branch = OpCondBranch(cond, target);
+  return branch;
 }
 
 void X86Mir2Lir::AnalyzeMIR() {
@@ -965,13 +968,13 @@
   }
 }
 
-void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
+void X86Mir2Lir::AnalyzeBB(BasicBlock* bb) {
   if (bb->block_type == kDead) {
     // Ignore dead blocks
     return;
   }
 
-  for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     int opcode = mir->dalvikInsn.opcode;
     if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
       AnalyzeExtendedMIR(opcode, bb, mir);
@@ -982,7 +985,7 @@
 }
 
 
-void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir) {
   switch (opcode) {
     // Instructions referencing doubles.
     case kMirOpFusedCmplDouble:
@@ -1009,7 +1012,7 @@
   }
 }
 
-void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir) {
   // Looking for
   // - Do we need a pointer to the code (used for packed switches and double lits)?
 
@@ -1046,7 +1049,8 @@
   }
 }
 
-void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir) {
+  UNUSED(bb);
   // Look at all the uses, and see if they are double constants.
   uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
   int next_sreg = 0;
@@ -1080,7 +1084,7 @@
   }
 }
 
-RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
   loc = UpdateLoc(loc);
   if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
     if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1094,7 +1098,7 @@
   return loc;
 }
 
-RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
   loc = UpdateLocWide(loc);
   if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
     if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1108,7 +1112,8 @@
   return loc;
 }
 
-void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir) {
+  UNUSED(opcode, bb);
   // For now this is only actual for x86-32.
   if (cu_->target64) {
     return;
@@ -1132,6 +1137,7 @@
 }
 
 LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  UNUSED(r_tgt);  // Call to absolute memory location doesn't need a temporary target register.
   if (cu_->target64) {
     return OpThreadMem(op, GetThreadOffset<8>(trampoline));
   } else {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 22a2f30..afdc244 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -313,25 +313,6 @@
 constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
 constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
 
-extern X86NativeRegisterPool rX86_ARG0;
-extern X86NativeRegisterPool rX86_ARG1;
-extern X86NativeRegisterPool rX86_ARG2;
-extern X86NativeRegisterPool rX86_ARG3;
-extern X86NativeRegisterPool rX86_ARG4;
-extern X86NativeRegisterPool rX86_ARG5;
-extern X86NativeRegisterPool rX86_FARG0;
-extern X86NativeRegisterPool rX86_FARG1;
-extern X86NativeRegisterPool rX86_FARG2;
-extern X86NativeRegisterPool rX86_FARG3;
-extern X86NativeRegisterPool rX86_FARG4;
-extern X86NativeRegisterPool rX86_FARG5;
-extern X86NativeRegisterPool rX86_FARG6;
-extern X86NativeRegisterPool rX86_FARG7;
-extern X86NativeRegisterPool rX86_RET0;
-extern X86NativeRegisterPool rX86_RET1;
-extern X86NativeRegisterPool rX86_INVOKE_TGT;
-extern X86NativeRegisterPool rX86_COUNT;
-
 extern RegStorage rs_rX86_ARG0;
 extern RegStorage rs_rX86_ARG1;
 extern RegStorage rs_rX86_ARG2;
@@ -674,6 +655,7 @@
   kX86RepneScasw,       // repne scasw
   kX86Last
 };
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
 
 /* Instruction assembly field_loc kind */
 enum X86EncodingKind {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 706933a..4a84ff2 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_DEX_REG_STORAGE_H_
 
 #include "base/logging.h"
+#include "base/value_object.h"
 #include "compiler_enums.h"  // For WideKind
 
 namespace art {
@@ -72,7 +73,7 @@
  * records.
  */
 
-class RegStorage {
+class RegStorage : public ValueObject {
  public:
   enum RegStorageKind {
     kValidMask     = 0x8000,
@@ -112,7 +113,7 @@
   }
   constexpr RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg)
       : reg_(
-          DCHECK_CONSTEXPR(rs_kind == k64BitPair, << rs_kind, 0u)
+          DCHECK_CONSTEXPR(rs_kind == k64BitPair, << static_cast<int>(rs_kind), 0u)
           DCHECK_CONSTEXPR((low_reg & kFloatingPoint) == (high_reg & kFloatingPoint),
                            << low_reg << ", " << high_reg, 0u)
           DCHECK_CONSTEXPR((high_reg & kRegNumMask) <= kHighRegNumMask,
@@ -331,9 +332,8 @@
       case k256BitSolo: return 32;
       case k512BitSolo: return 64;
       case k1024BitSolo: return 128;
-      default: LOG(FATAL) << "Unexpected shape";
+      default: LOG(FATAL) << "Unexpected shape"; UNREACHABLE();
     }
-    return 0;
   }
 
  private:
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a8e6b3c..4929b5b 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -106,6 +106,8 @@
   if (use_sea) {
     return true;
   }
+#else
+  UNUSED(method_ref);
 #endif
   if (!compiler_options_->IsCompilationEnabled()) {
     return false;