Merge "Fixing gtest failure due to VerifyClassUsingOatFile change" into dalvik-dev
diff --git a/Android.mk b/Android.mk
index 971eb2f..ff08dca 100644
--- a/Android.mk
+++ b/Android.mk
@@ -334,7 +334,7 @@
 .PHONY: cpplint-art
 cpplint-art:
 	./art/tools/cpplint.py \
-	    --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens \
+	    --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens,+build/namespaces,+readability/fn_size \
 	    $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/)
 
 # "mm cpplint-art-aspirational" to see warnings we would like to fix
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index de30859..2a05b77 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -83,7 +83,7 @@
                    OatBitMapKind kind = kBitMapMisc);
     ~ArenaBitVector() {};
 
-    static void* operator new( size_t size, ArenaAllocator* arena) {
+    static void* operator new(size_t size, ArenaAllocator* arena) {
       return arena->NewMem(sizeof(ArenaBitVector), true, ArenaAllocator::kAllocGrowableBitMap);
     }
     static void operator delete(void* p) {};  // Nop.
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index e427862..847a614 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -137,7 +137,7 @@
       AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
           : DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
         all_nodes_iterator_ =
-            new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator (mir_graph->GetBlockList());
+            new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
       }
 
       void Reset() {
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 0803914..ae160d6 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -53,7 +53,7 @@
   llvm_module_ = new ::llvm::Module("art", *llvm_context_);
   ::llvm::StructType::create(*llvm_context_, "JavaObject");
   art::llvm::makeLLVMModuleContents(llvm_module_);
-  intrinsic_helper_.reset( new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
+  intrinsic_helper_.reset(new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
   ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_));
 }
 
@@ -276,7 +276,7 @@
 #if defined(ART_USE_PORTABLE_COMPILER)
                        , llvm_compilation_unit
 #endif
-                       );
+                       ); // NOLINT(whitespace/parens)
 }
 
 }  // namespace art
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 9632388..be19d5a 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1122,9 +1122,9 @@
   size_t num_dalvik_reg = cu_->num_dalvik_registers;
 
   ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
-                                            kGrowableArraySSAtoDalvikMap);
+                                                    kGrowableArraySSAtoDalvikMap);
   ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
-                                            kGrowableArraySSAtoDalvikMap);
+                                                    kGrowableArraySSAtoDalvikMap);
   /*
    * Initial number of SSA registers is equal to the number of Dalvik
    * registers.
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 0b3fa46..634c576 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -410,7 +410,7 @@
       (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
       kPackedSwitch : kSparseSwitch;
   cur_block->successor_block_list.blocks =
-      new (arena_)GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
+      new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
 
   for (i = 0; i < size; i++) {
     BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
@@ -427,8 +427,8 @@
   }
 
   /* Fall-through case */
-  BasicBlock* fallthrough_block = FindBlock( cur_offset +  width, /* split */ false,
-                                           /* create */ true, /* immed_pred_block_p */ NULL);
+  BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* split */ false,
+                                            /* create */ true, /* immed_pred_block_p */ NULL);
   cur_block->fall_through = fallthrough_block;
   fallthrough_block->predecessors->Insert(cur_block);
 }
@@ -1146,8 +1146,9 @@
   bb->block_type = block_type;
   bb->id = block_id;
   // TUNING: better estimate of the exit block predecessors?
-  bb->predecessors = new (arena_)
-      GrowableArray<BasicBlock*>(arena_, (block_type == kExitBlock) ? 2048 : 2, kGrowableArrayPredecessors);
+  bb->predecessors = new (arena_) GrowableArray<BasicBlock*>(arena_,
+                                                             (block_type == kExitBlock) ? 2048 : 2,
+                                                             kGrowableArrayPredecessors);
   bb->successor_block_list.block_list_type = kNotUsed;
   block_id_map_.Put(block_id, block_id);
   return bb;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 882b81a..f83bbb2 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -228,7 +228,7 @@
             MIR* mir_next = mir->next;
             Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
             ConditionCode ccode = kCondNv;
-            switch(br_opcode) {
+            switch (br_opcode) {
               case Instruction::IF_EQZ:
                 ccode = kCondEq;
                 break;
@@ -255,7 +255,7 @@
                 (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
                 (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
               mir_next->dalvikInsn.arg[0] = ccode;
-              switch(opcode) {
+              switch (opcode) {
                 case Instruction::CMPL_FLOAT:
                   mir_next->dalvikInsn.opcode =
                       static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index cfd3daf..ad7a6a8 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -297,7 +297,7 @@
                                    ::llvm::Value* src1, ::llvm::Value* src2) {
   ::llvm::Value* res = NULL;
   DCHECK_EQ(src1->getType(), src2->getType());
-  switch(cc) {
+  switch (cc) {
     case kCondEq: res = irb_->CreateICmpEQ(src1, src2); break;
     case kCondNe: res = irb_->CreateICmpNE(src1, src2); break;
     case kCondLt: res = irb_->CreateICmpSLT(src1, src2); break;
@@ -369,7 +369,7 @@
 ::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long,
                                ::llvm::Value* src1, ::llvm::Value* src2) {
   ::llvm::Value* res = NULL;
-  switch(op) {
+  switch (op) {
     case kOpAdd: res = irb_->CreateAdd(src1, src2); break;
     case kOpSub: res = irb_->CreateSub(src1, src2); break;
     case kOpRsub: res = irb_->CreateSub(src2, src1); break;
@@ -393,7 +393,7 @@
   ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
   ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
   ::llvm::Value* res = NULL;
-  switch(op) {
+  switch (op) {
     case kOpAdd: res = irb_->CreateFAdd(src1, src2); break;
     case kOpSub: res = irb_->CreateFSub(src1, src2); break;
     case kOpMul: res = irb_->CreateFMul(src1, src2); break;
@@ -1500,7 +1500,7 @@
       res = true;
   }
   return res;
-}
+}  // NOLINT(readability/fn_size)
 
 void MirConverter::SetDexOffset(int32_t offset) {
   current_dalvik_offset_ = offset;
@@ -1781,7 +1781,7 @@
    * types (which is valid so long as we always do a real expansion of passed
    * arguments and field loads).
    */
-  switch(shorty_type) {
+  switch (shorty_type) {
     case 'Z' : shorty_type = 'I'; break;
     case 'B' : shorty_type = 'I'; break;
     case 'S' : shorty_type = 'I'; break;
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 2c626a0..8f73f0c 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -193,7 +193,7 @@
   }
   NewLIR0(kThumb2Fmstat);
   ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
-  switch(ccode) {
+  switch (ccode) {
     case kCondEq:
     case kCondNe:
       break;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index e12df6c..3a367c9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -129,7 +129,7 @@
   int32_t low_reg = rl_src1.low_reg;
   int32_t high_reg = rl_src1.high_reg;
 
-  switch(ccode) {
+  switch (ccode) {
     case kCondEq:
     case kCondNe:
       LIR* target;
@@ -270,7 +270,7 @@
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
-  switch(ccode) {
+  switch (ccode) {
     case kCondEq:
       OpCondBranch(kCondNe, not_taken);
       break;
@@ -436,7 +436,7 @@
   int r_hi = AllocTemp();
   int r_lo = AllocTemp();
   NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
-  switch(pattern) {
+  switch (pattern) {
     case Divide3:
       OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
                rl_src.low_reg, EncodeShift(kArmAsr, 31));
@@ -1002,7 +1002,7 @@
     return;
   }
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  switch(opcode) {
+  switch (opcode) {
     case Instruction::SHL_LONG:
     case Instruction::SHL_LONG_2ADDR:
       if (shift_amount == 1) {
@@ -1090,7 +1090,7 @@
   int32_t mod_imm_hi = ModifiedImmediate(val_hi);
 
   // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
-  switch(opcode) {
+  switch (opcode) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
     case Instruction::SUB_LONG:
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 80f597d..305a147 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -549,7 +549,7 @@
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
     case kOpAdd:
-      if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+      if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
         DCHECK_EQ((value & 0x3), 0);
         return NewLIR1(kThumbAddSpI7, value >> 2);
       } else if (short_form) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 8698b1f..7a59644 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -55,7 +55,7 @@
 }
 
 /* Convert an instruction to a NOP */
-void Mir2Lir::NopLIR( LIR* lir) {
+void Mir2Lir::NopLIR(LIR* lir) {
   lir->flags.is_nop = true;
 }
 
@@ -190,10 +190,10 @@
   }
 
   if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
-    DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use"));
+    DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->use_mask, "use"));
   }
   if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
-    DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def"));
+    DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->def_mask, "def"));
   }
 }
 
@@ -336,10 +336,10 @@
 }
 
 static void PushWord(std::vector<uint8_t>&buf, int data) {
-  buf.push_back( data & 0xff);
-  buf.push_back( (data >> 8) & 0xff);
-  buf.push_back( (data >> 16) & 0xff);
-  buf.push_back( (data >> 24) & 0xff);
+  buf.push_back(data & 0xff);
+  buf.push_back((data >> 8) & 0xff);
+  buf.push_back((data >> 16) & 0xff);
+  buf.push_back((data >> 24) & 0xff);
 }
 
 static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
@@ -454,8 +454,8 @@
     if (tab_rec == NULL) break;
     AlignBuffer(code_buffer_, tab_rec->offset);
     for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
-      code_buffer_.push_back( tab_rec->table[i] & 0xFF);
-      code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF);
+      code_buffer_.push_back(tab_rec->table[i] & 0xFF);
+      code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
     }
   }
 }
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index a34d2a9..d1bfd2d 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -279,7 +279,7 @@
     int r_dst = AllocTemp();
     int r_idx = AllocTemp();
     int r_val = INVALID_REG;
-    switch(cu_->instruction_set) {
+    switch (cu_->instruction_set) {
       case kThumb2:
         r_val = TargetReg(kLr);
         break;
@@ -1311,7 +1311,7 @@
         GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
       }
       // NOTE: callout here is not a safepoint
-      CallHelper(r_tgt, func_offset, false /* not a safepoint */ );
+      CallHelper(r_tgt, func_offset, false /* not a safepoint */);
       if (op == kOpDiv)
         rl_result = GetReturn(false);
       else
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8510006..127d191 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -107,7 +107,7 @@
 }
 
 LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ );
+  LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
   res->target = target;
   return res;
 }
@@ -642,8 +642,8 @@
   return NULL;
 }
 
-LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement,
-                                        int r_src, int r_src_hi, OpSize size, int s_reg) {
+LIR* MipsMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                       int r_src, int r_src_hi, OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
   return NULL;
 }
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index b758fb5..74eaa66 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -655,7 +655,7 @@
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
-}
+}  // NOLINT(readability/fn_size)
 
 // Process extended MIR instructions
 void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 41e5a2d..7765eaa 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -233,7 +233,7 @@
 
     RegisterClass oat_reg_class_by_size(OpSize size) {
       return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
-              size == kSignedByte ) ? kCoreReg : kAnyReg;
+              size == kSignedByte) ? kCoreReg : kAnyReg;
     }
 
     size_t CodeBufferSizeInBytes() {
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 4c91223..bc3740a 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1021,8 +1021,7 @@
 
   if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
     // Promote FpRegs
-    for (int i = 0; (i < num_regs) &&
-            (FpRegs[i].count >= promotion_threshold ); i++) {
+    for (int i = 0; (i < num_regs) && (FpRegs[i].count >= promotion_threshold); i++) {
       int p_map_idx = SRegToPMap(FpRegs[i].s_reg);
       if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
         int reg = AllocPreservedFPReg(FpRegs[i].s_reg,
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 6376e3b..75367a3 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -100,7 +100,7 @@
 }
 
 LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+  LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
   res->target = target;
   return res;
 }
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 3a0cbcc..7739e29 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -266,7 +266,7 @@
 void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
   int num_total_blocks = GetBasicBlockListCount();
 
-  if (bb->dominators == NULL ) {
+  if (bb->dominators == NULL) {
     bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
                                                  false /* expandable */, kBitMapDominators);
     bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 10bbd1f..f361dd7 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -160,7 +160,7 @@
       if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
           (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
           (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
-        switch(cu_->shorty[0]) {
+        switch (cu_->shorty[0]) {
             case 'I':
               changed |= SetCore(ssa_rep->uses[0], true);
               break;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 7fd1a7c..f395428 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -52,7 +52,14 @@
 #include "UniquePtr.h"
 #include "utils.h"
 
-using namespace art::mirror;
+using ::art::mirror::AbstractMethod;
+using ::art::mirror::Class;
+using ::art::mirror::DexCache;
+using ::art::mirror::EntryPointFromInterpreter;
+using ::art::mirror::Field;
+using ::art::mirror::Object;
+using ::art::mirror::ObjectArray;
+using ::art::mirror::String;
 
 namespace art {
 
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index 57b8a31..0e58378 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -41,7 +41,13 @@
 namespace art {
 namespace llvm {
 
-using namespace runtime_support;
+using ::art::llvm::runtime_support::JniMethodEnd;
+using ::art::llvm::runtime_support::JniMethodEndSynchronized;
+using ::art::llvm::runtime_support::JniMethodEndWithReference;
+using ::art::llvm::runtime_support::JniMethodEndWithReferenceSynchronized;
+using ::art::llvm::runtime_support::JniMethodStart;
+using ::art::llvm::runtime_support::JniMethodStartSynchronized;
+using ::art::llvm::runtime_support::RuntimeId;
 
 JniCompiler::JniCompiler(LlvmCompilationUnit* cunit,
                          const CompilerDriver& driver,
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index 94cc973..a7793ae 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -30,8 +30,6 @@
 #include "dex/compiler_ir.h"
 #include "dex/mir_graph.h"
 #include "dex/quick/mir_to_lir.h"
-using art::kMIRIgnoreNullCheck;
-using art::kMIRIgnoreRangeCheck;
 
 #include <llvm/ADT/STLExtras.h>
 #include <llvm/IR/Intrinsics.h>
@@ -44,13 +42,78 @@
 #include <map>
 #include <utility>
 
-using namespace art::llvm;
-
-using art::llvm::IntrinsicHelper;
+using ::art::kMIRIgnoreNullCheck;
+using ::art::kMIRIgnoreRangeCheck;
+using ::art::llvm::IRBuilder;
+using ::art::llvm::IntrinsicHelper;
+using ::art::llvm::JType;
+using ::art::llvm::RuntimeSupportBuilder;
+using ::art::llvm::kBoolean;
+using ::art::llvm::kByte;
+using ::art::llvm::kChar;
+using ::art::llvm::kDouble;
+using ::art::llvm::kFloat;
+using ::art::llvm::kInt;
+using ::art::llvm::kLikely;
+using ::art::llvm::kLong;
+using ::art::llvm::kObject;
+using ::art::llvm::kShort;
+using ::art::llvm::kTBAAConstJObject;
+using ::art::llvm::kTBAAHeapArray;
+using ::art::llvm::kTBAAHeapInstance;
+using ::art::llvm::kTBAAHeapStatic;
+using ::art::llvm::kTBAARegister;
+using ::art::llvm::kTBAARuntimeInfo;
+using ::art::llvm::kTBAAShadowFrame;
+using ::art::llvm::kUnlikely;
+using ::art::llvm::kVoid;
+using ::art::llvm::runtime_support::AllocArray;
+using ::art::llvm::runtime_support::AllocArrayWithAccessCheck;
+using ::art::llvm::runtime_support::AllocObject;
+using ::art::llvm::runtime_support::AllocObjectWithAccessCheck;
+using ::art::llvm::runtime_support::CheckAndAllocArray;
+using ::art::llvm::runtime_support::CheckAndAllocArrayWithAccessCheck;
+using ::art::llvm::runtime_support::CheckCast;
+using ::art::llvm::runtime_support::CheckPutArrayElement;
+using ::art::llvm::runtime_support::FillArrayData;
+using ::art::llvm::runtime_support::FindCatchBlock;
+using ::art::llvm::runtime_support::FindDirectMethodWithAccessCheck;
+using ::art::llvm::runtime_support::FindInterfaceMethod;
+using ::art::llvm::runtime_support::FindInterfaceMethodWithAccessCheck;
+using ::art::llvm::runtime_support::FindStaticMethodWithAccessCheck;
+using ::art::llvm::runtime_support::FindSuperMethodWithAccessCheck;
+using ::art::llvm::runtime_support::FindVirtualMethodWithAccessCheck;
+using ::art::llvm::runtime_support::Get32Instance;
+using ::art::llvm::runtime_support::Get32Static;
+using ::art::llvm::runtime_support::Get64Instance;
+using ::art::llvm::runtime_support::Get64Static;
+using ::art::llvm::runtime_support::GetObjectInstance;
+using ::art::llvm::runtime_support::GetObjectStatic;
+using ::art::llvm::runtime_support::InitializeStaticStorage;
+using ::art::llvm::runtime_support::InitializeType;
+using ::art::llvm::runtime_support::InitializeTypeAndVerifyAccess;
+using ::art::llvm::runtime_support::IsAssignable;
+using ::art::llvm::runtime_support::ResolveString;
+using ::art::llvm::runtime_support::RuntimeId;
+using ::art::llvm::runtime_support::Set32Instance;
+using ::art::llvm::runtime_support::Set32Static;
+using ::art::llvm::runtime_support::Set64Instance;
+using ::art::llvm::runtime_support::Set64Static;
+using ::art::llvm::runtime_support::SetObjectInstance;
+using ::art::llvm::runtime_support::SetObjectStatic;
+using ::art::llvm::runtime_support::ThrowDivZeroException;
+using ::art::llvm::runtime_support::ThrowException;
+using ::art::llvm::runtime_support::ThrowIndexOutOfBounds;
+using ::art::llvm::runtime_support::ThrowNullPointerException;
+using ::art::llvm::runtime_support::ThrowStackOverflowException;
+using ::art::llvm::runtime_support::art_d2i;
+using ::art::llvm::runtime_support::art_d2l;
+using ::art::llvm::runtime_support::art_f2i;
+using ::art::llvm::runtime_support::art_f2l;
 
 namespace art {
 extern char RemapShorty(char shortyType);
-};
+}  // namespace art
 
 namespace {
 
@@ -101,8 +164,7 @@
   // Helper function for GBC expansion
   //----------------------------------------------------------------------------
 
-  llvm::Value* ExpandToRuntime(runtime_support::RuntimeId rt,
-                               llvm::CallInst& inst);
+  llvm::Value* ExpandToRuntime(RuntimeId rt, llvm::CallInst& inst);
 
   uint64_t LV2UInt(llvm::Value* lv) {
     return llvm::cast<llvm::ConstantInt>(lv)->getZExtValue();
@@ -580,8 +642,7 @@
   }
 }
 
-llvm::Value* GBCExpanderPass::ExpandToRuntime(runtime_support::RuntimeId rt,
-                                              llvm::CallInst& inst) {
+llvm::Value* GBCExpanderPass::ExpandToRuntime(RuntimeId rt, llvm::CallInst& inst) {
   // Some GBC intrinsic can directly replace with IBC runtime. "Directly" means
   // the arguments passed to the GBC intrinsic are as the same as IBC runtime
   // function, therefore only called function is needed to change.
@@ -633,7 +694,7 @@
 
   // If stack overflow, throw exception.
   irb_.SetInsertPoint(block_exception);
-  irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowStackOverflowException));
+  irb_.CreateCall(irb_.GetRuntime(ThrowStackOverflowException));
 
   // Unwind.
   llvm::Type* ret_type = func->getReturnType();
@@ -1541,7 +1602,7 @@
   llvm::Value* array_elem_addr = EmitArrayGEP(array_addr, index_value, elem_jty);
 
   if (elem_jty == kObject) { // If put an object, check the type, and mark GC card table.
-    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::CheckPutArrayElement);
+    llvm::Function* runtime_func = irb_.GetRuntime(CheckPutArrayElement);
 
     irb_.CreateCall2(runtime_func, new_value, array_addr);
 
@@ -1575,11 +1636,11 @@
     llvm::Function* runtime_func;
 
     if (field_jty == kObject) {
-      runtime_func = irb_.GetRuntime(runtime_support::GetObjectInstance);
+      runtime_func = irb_.GetRuntime(GetObjectInstance);
     } else if (field_jty == kLong || field_jty == kDouble) {
-      runtime_func = irb_.GetRuntime(runtime_support::Get64Instance);
+      runtime_func = irb_.GetRuntime(Get64Instance);
     } else {
-      runtime_func = irb_.GetRuntime(runtime_support::Get32Instance);
+      runtime_func = irb_.GetRuntime(Get32Instance);
     }
 
     llvm::ConstantInt* field_idx_value = irb_.getInt32(field_idx);
@@ -1643,11 +1704,11 @@
     }
 
     if (field_jty == kObject) {
-      runtime_func = irb_.GetRuntime(runtime_support::SetObjectInstance);
+      runtime_func = irb_.GetRuntime(SetObjectInstance);
     } else if (field_jty == kLong || field_jty == kDouble) {
-      runtime_func = irb_.GetRuntime(runtime_support::Set64Instance);
+      runtime_func = irb_.GetRuntime(Set64Instance);
     } else {
-      runtime_func = irb_.GetRuntime(runtime_support::Set32Instance);
+      runtime_func = irb_.GetRuntime(Set32Instance);
     }
 
     llvm::Value* field_idx_value = irb_.getInt32(field_idx);
@@ -1701,8 +1762,7 @@
 
     llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
 
-    llvm::Function* runtime_func =
-      irb_.GetRuntime(runtime_support::InitializeTypeAndVerifyAccess);
+    llvm::Function* runtime_func = irb_.GetRuntime(InitializeTypeAndVerifyAccess);
 
     EmitUpdateDexPC(dex_pc);
 
@@ -1741,7 +1801,7 @@
     // Failback routine to load the class object
     irb_.SetInsertPoint(block_load_class);
 
-    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::InitializeType);
+    llvm::Function* runtime_func = irb_.GetRuntime(InitializeType);
 
     llvm::Constant* type_idx_value = irb_.getInt32(type_idx);
 
@@ -1796,7 +1856,7 @@
   // Failback routine to load the class object
   irb_.SetInsertPoint(block_load_static);
 
-  llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::InitializeStaticStorage);
+  llvm::Function* runtime_func = irb_.GetRuntime(InitializeStaticStorage);
 
   llvm::Constant* type_idx_value = irb_.getInt32(type_idx);
 
@@ -1846,11 +1906,11 @@
     llvm::Function* runtime_func;
 
     if (field_jty == kObject) {
-      runtime_func = irb_.GetRuntime(runtime_support::GetObjectStatic);
+      runtime_func = irb_.GetRuntime(GetObjectStatic);
     } else if (field_jty == kLong || field_jty == kDouble) {
-      runtime_func = irb_.GetRuntime(runtime_support::Get64Static);
+      runtime_func = irb_.GetRuntime(Get64Static);
     } else {
-      runtime_func = irb_.GetRuntime(runtime_support::Get32Static);
+      runtime_func = irb_.GetRuntime(Get32Static);
     }
 
     llvm::Constant* field_idx_value = irb_.getInt32(field_idx);
@@ -1928,11 +1988,11 @@
     llvm::Function* runtime_func;
 
     if (field_jty == kObject) {
-      runtime_func = irb_.GetRuntime(runtime_support::SetObjectStatic);
+      runtime_func = irb_.GetRuntime(SetObjectStatic);
     } else if (field_jty == kLong || field_jty == kDouble) {
-      runtime_func = irb_.GetRuntime(runtime_support::Set64Static);
+      runtime_func = irb_.GetRuntime(Set64Static);
     } else {
-      runtime_func = irb_.GetRuntime(runtime_support::Set32Static);
+      runtime_func = irb_.GetRuntime(Set32Static);
     }
 
     if (field_jty == kFloat) {
@@ -2029,7 +2089,7 @@
     // String is not resolved yet, resolve it now.
     irb_.SetInsertPoint(block_str_resolve);
 
-    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::ResolveString);
+    llvm::Function* runtime_func = irb_.GetRuntime(ResolveString);
 
     llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
 
@@ -2141,7 +2201,7 @@
 
   EmitUpdateDexPC(dex_pc);
 
-  irb_.CreateCall2(irb_.GetRuntime(runtime_support::CheckCast),
+  irb_.CreateCall2(irb_.GetRuntime(CheckCast),
                    type_object_addr, object_type_object_addr);
 
   EmitGuard_ExceptionLandingPad(dex_pc);
@@ -2209,7 +2269,7 @@
   // Test: Is the object instantiated from the subclass of the given class?
   irb_.SetInsertPoint(block_test_sub_class);
   llvm::Value* result =
-    irb_.CreateCall2(irb_.GetRuntime(runtime_support::IsAssignable),
+    irb_.CreateCall2(irb_.GetRuntime(IsAssignable),
                      type_object_addr, object_type_object_addr);
   irb_.CreateBr(block_cont);
 
@@ -2232,9 +2292,9 @@
   if (driver_->CanAccessInstantiableTypeWithoutChecks(dex_compilation_unit_->GetDexMethodIndex(),
                                                       *dex_compilation_unit_->GetDexFile(),
                                                       type_idx)) {
-    runtime_func = irb_.GetRuntime(runtime_support::AllocObject);
+    runtime_func = irb_.GetRuntime(AllocObject);
   } else {
-    runtime_func = irb_.GetRuntime(runtime_support::AllocObjectWithAccessCheck);
+    runtime_func = irb_.GetRuntime(AllocObjectWithAccessCheck);
   }
 
   llvm::Constant* type_index_value = irb_.getInt32(type_idx);
@@ -2373,7 +2433,7 @@
 
     // NOTE: We will check for the NullPointerException in the runtime.
 
-    llvm::Function* runtime_func = irb_.GetRuntime(runtime_support::FillArrayData);
+    llvm::Function* runtime_func = irb_.GetRuntime(FillArrayData);
 
     llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
 
@@ -2402,12 +2462,12 @@
 
   if (is_filled_new_array) {
     runtime_func = skip_access_check ?
-      irb_.GetRuntime(runtime_support::CheckAndAllocArray) :
-      irb_.GetRuntime(runtime_support::CheckAndAllocArrayWithAccessCheck);
+      irb_.GetRuntime(CheckAndAllocArray) :
+      irb_.GetRuntime(CheckAndAllocArrayWithAccessCheck);
   } else {
     runtime_func = skip_access_check ?
-      irb_.GetRuntime(runtime_support::AllocArray) :
-      irb_.GetRuntime(runtime_support::AllocArrayWithAccessCheck);
+      irb_.GetRuntime(AllocArray) :
+      irb_.GetRuntime(AllocArrayWithAccessCheck);
   }
 
   llvm::Constant* type_index_value = irb_.getInt32(type_idx);
@@ -2437,26 +2497,26 @@
 
   switch (invoke_type) {
   case art::kStatic:
-    runtime_func = irb_.GetRuntime(runtime_support::FindStaticMethodWithAccessCheck);
+    runtime_func = irb_.GetRuntime(FindStaticMethodWithAccessCheck);
     break;
 
   case art::kDirect:
-    runtime_func = irb_.GetRuntime(runtime_support::FindDirectMethodWithAccessCheck);
+    runtime_func = irb_.GetRuntime(FindDirectMethodWithAccessCheck);
     break;
 
   case art::kVirtual:
-    runtime_func = irb_.GetRuntime(runtime_support::FindVirtualMethodWithAccessCheck);
+    runtime_func = irb_.GetRuntime(FindVirtualMethodWithAccessCheck);
     break;
 
   case art::kSuper:
-    runtime_func = irb_.GetRuntime(runtime_support::FindSuperMethodWithAccessCheck);
+    runtime_func = irb_.GetRuntime(FindSuperMethodWithAccessCheck);
     break;
 
   case art::kInterface:
     if (is_fast_path) {
-      runtime_func = irb_.GetRuntime(runtime_support::FindInterfaceMethod);
+      runtime_func = irb_.GetRuntime(FindInterfaceMethod);
     } else {
-      runtime_func = irb_.GetRuntime(runtime_support::FindInterfaceMethodWithAccessCheck);
+      runtime_func = irb_.GetRuntime(FindInterfaceMethodWithAccessCheck);
     }
     break;
   }
@@ -2518,7 +2578,7 @@
 
   irb_.SetInsertPoint(block_exception);
   EmitUpdateDexPC(dex_pc);
-  irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowDivZeroException));
+  irb_.CreateCall(irb_.GetRuntime(ThrowDivZeroException));
   EmitBranchExceptionLandingPad(dex_pc);
 
   irb_.SetInsertPoint(block_continue);
@@ -2557,7 +2617,7 @@
 
     irb_.SetInsertPoint(block_exception);
     EmitUpdateDexPC(dex_pc);
-    irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowNullPointerException),
+    irb_.CreateCall(irb_.GetRuntime(ThrowNullPointerException),
                     irb_.getInt32(dex_pc));
     EmitBranchExceptionLandingPad(dex_pc);
 
@@ -2603,7 +2663,7 @@
     irb_.SetInsertPoint(block_exception);
 
     EmitUpdateDexPC(dex_pc);
-    irb_.CreateCall2(irb_.GetRuntime(runtime_support::ThrowIndexOutOfBounds), index, array_len);
+    irb_.CreateCall2(irb_.GetRuntime(ThrowIndexOutOfBounds), index, array_len);
     EmitBranchExceptionLandingPad(dex_pc);
 
     irb_.SetInsertPoint(block_continue);
@@ -2720,7 +2780,7 @@
   llvm::Value* ti_offset_value = irb_.getInt32(ti_offset);
 
   llvm::Value* catch_handler_index_value =
-    irb_.CreateCall2(irb_.GetRuntime(runtime_support::FindCatchBlock),
+    irb_.CreateCall2(irb_.GetRuntime(FindCatchBlock),
                      method_object_addr, ti_offset_value);
 
   // Switch instruction (Go to unwind basic block by default)
@@ -2825,14 +2885,14 @@
 
     //==- Exception --------------------------------------------------------==//
     case IntrinsicHelper::ThrowException: {
-      return ExpandToRuntime(runtime_support::ThrowException, call_inst);
+      return ExpandToRuntime(ThrowException, call_inst);
     }
     case IntrinsicHelper::HLThrowException: {
       uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
 
       EmitUpdateDexPC(dex_pc);
 
-      irb_.CreateCall(irb_.GetRuntime(runtime_support::ThrowException),
+      irb_.CreateCall(irb_.GetRuntime(ThrowException),
                       call_inst.getArgOperand(0));
 
       EmitGuard_ExceptionLandingPad(dex_pc);
@@ -2845,16 +2905,16 @@
       return irb_.Runtime().EmitIsExceptionPending();
     }
     case IntrinsicHelper::FindCatchBlock: {
-      return ExpandToRuntime(runtime_support::FindCatchBlock, call_inst);
+      return ExpandToRuntime(FindCatchBlock, call_inst);
     }
     case IntrinsicHelper::ThrowDivZeroException: {
-      return ExpandToRuntime(runtime_support::ThrowDivZeroException, call_inst);
+      return ExpandToRuntime(ThrowDivZeroException, call_inst);
     }
     case IntrinsicHelper::ThrowNullPointerException: {
-      return ExpandToRuntime(runtime_support::ThrowNullPointerException, call_inst);
+      return ExpandToRuntime(ThrowNullPointerException, call_inst);
     }
     case IntrinsicHelper::ThrowIndexOutOfBounds: {
-      return ExpandToRuntime(runtime_support::ThrowIndexOutOfBounds, call_inst);
+      return ExpandToRuntime(ThrowIndexOutOfBounds, call_inst);
     }
 
     //==- Const String -----------------------------------------------------==//
@@ -2865,7 +2925,7 @@
       return Expand_LoadStringFromDexCache(call_inst.getArgOperand(0));
     }
     case IntrinsicHelper::ResolveString: {
-      return ExpandToRuntime(runtime_support::ResolveString, call_inst);
+      return ExpandToRuntime(ResolveString, call_inst);
     }
 
     //==- Const Class ------------------------------------------------------==//
@@ -2873,13 +2933,13 @@
       return Expand_ConstClass(call_inst);
     }
     case IntrinsicHelper::InitializeTypeAndVerifyAccess: {
-      return ExpandToRuntime(runtime_support::InitializeTypeAndVerifyAccess, call_inst);
+      return ExpandToRuntime(InitializeTypeAndVerifyAccess, call_inst);
     }
     case IntrinsicHelper::LoadTypeFromDexCache: {
       return Expand_LoadTypeFromDexCache(call_inst.getArgOperand(0));
     }
     case IntrinsicHelper::InitializeType: {
-      return ExpandToRuntime(runtime_support::InitializeType, call_inst);
+      return ExpandToRuntime(InitializeType, call_inst);
     }
 
     //==- Lock -------------------------------------------------------------==//
@@ -2894,22 +2954,22 @@
 
     //==- Cast -------------------------------------------------------------==//
     case IntrinsicHelper::CheckCast: {
-      return ExpandToRuntime(runtime_support::CheckCast, call_inst);
+      return ExpandToRuntime(CheckCast, call_inst);
     }
     case IntrinsicHelper::HLCheckCast: {
       Expand_HLCheckCast(call_inst);
       return NULL;
     }
     case IntrinsicHelper::IsAssignable: {
-      return ExpandToRuntime(runtime_support::IsAssignable, call_inst);
+      return ExpandToRuntime(IsAssignable, call_inst);
     }
 
     //==- Alloc ------------------------------------------------------------==//
     case IntrinsicHelper::AllocObject: {
-      return ExpandToRuntime(runtime_support::AllocObject, call_inst);
+      return ExpandToRuntime(AllocObject, call_inst);
     }
     case IntrinsicHelper::AllocObjectWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::AllocObjectWithAccessCheck, call_inst);
+      return ExpandToRuntime(AllocObjectWithAccessCheck, call_inst);
     }
 
     //==- Instance ---------------------------------------------------------==//
@@ -2931,17 +2991,17 @@
       return EmitLoadArrayLength(call_inst.getArgOperand(0));
     }
     case IntrinsicHelper::AllocArray: {
-      return ExpandToRuntime(runtime_support::AllocArray, call_inst);
+      return ExpandToRuntime(AllocArray, call_inst);
     }
     case IntrinsicHelper::AllocArrayWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::AllocArrayWithAccessCheck,
+      return ExpandToRuntime(AllocArrayWithAccessCheck,
                              call_inst);
     }
     case IntrinsicHelper::CheckAndAllocArray: {
-      return ExpandToRuntime(runtime_support::CheckAndAllocArray, call_inst);
+      return ExpandToRuntime(CheckAndAllocArray, call_inst);
     }
     case IntrinsicHelper::CheckAndAllocArrayWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::CheckAndAllocArrayWithAccessCheck,
+      return ExpandToRuntime(CheckAndAllocArrayWithAccessCheck,
                              call_inst);
     }
     case IntrinsicHelper::ArrayGet: {
@@ -3029,14 +3089,14 @@
       return NULL;
     }
     case IntrinsicHelper::CheckPutArrayElement: {
-      return ExpandToRuntime(runtime_support::CheckPutArrayElement, call_inst);
+      return ExpandToRuntime(CheckPutArrayElement, call_inst);
     }
     case IntrinsicHelper::FilledNewArray: {
       Expand_FilledNewArray(call_inst);
       return NULL;
     }
     case IntrinsicHelper::FillArrayData: {
-      return ExpandToRuntime(runtime_support::FillArrayData, call_inst);
+      return ExpandToRuntime(FillArrayData, call_inst);
     }
     case IntrinsicHelper::HLFillArrayData: {
       Expand_HLFillArrayData(call_inst);
@@ -3052,13 +3112,13 @@
     case IntrinsicHelper::InstanceFieldGetByte:
     case IntrinsicHelper::InstanceFieldGetChar:
     case IntrinsicHelper::InstanceFieldGetShort: {
-      return ExpandToRuntime(runtime_support::Get32Instance, call_inst);
+      return ExpandToRuntime(Get32Instance, call_inst);
     }
     case IntrinsicHelper::InstanceFieldGetWide: {
-      return ExpandToRuntime(runtime_support::Get64Instance, call_inst);
+      return ExpandToRuntime(Get64Instance, call_inst);
     }
     case IntrinsicHelper::InstanceFieldGetObject: {
-      return ExpandToRuntime(runtime_support::GetObjectInstance, call_inst);
+      return ExpandToRuntime(GetObjectInstance, call_inst);
     }
     case IntrinsicHelper::InstanceFieldGetFast: {
       return Expand_IGetFast(call_inst.getArgOperand(0),
@@ -3107,13 +3167,13 @@
     case IntrinsicHelper::InstanceFieldPutByte:
     case IntrinsicHelper::InstanceFieldPutChar:
     case IntrinsicHelper::InstanceFieldPutShort: {
-      return ExpandToRuntime(runtime_support::Set32Instance, call_inst);
+      return ExpandToRuntime(Set32Instance, call_inst);
     }
     case IntrinsicHelper::InstanceFieldPutWide: {
-      return ExpandToRuntime(runtime_support::Set64Instance, call_inst);
+      return ExpandToRuntime(Set64Instance, call_inst);
     }
     case IntrinsicHelper::InstanceFieldPutObject: {
-      return ExpandToRuntime(runtime_support::SetObjectInstance, call_inst);
+      return ExpandToRuntime(SetObjectInstance, call_inst);
     }
     case IntrinsicHelper::InstanceFieldPutFast: {
       Expand_IPutFast(call_inst.getArgOperand(0),
@@ -3178,13 +3238,13 @@
     case IntrinsicHelper::StaticFieldGetByte:
     case IntrinsicHelper::StaticFieldGetChar:
     case IntrinsicHelper::StaticFieldGetShort: {
-      return ExpandToRuntime(runtime_support::Get32Static, call_inst);
+      return ExpandToRuntime(Get32Static, call_inst);
     }
     case IntrinsicHelper::StaticFieldGetWide: {
-      return ExpandToRuntime(runtime_support::Get64Static, call_inst);
+      return ExpandToRuntime(Get64Static, call_inst);
     }
     case IntrinsicHelper::StaticFieldGetObject: {
-      return ExpandToRuntime(runtime_support::GetObjectStatic, call_inst);
+      return ExpandToRuntime(GetObjectStatic, call_inst);
     }
     case IntrinsicHelper::StaticFieldGetFast: {
       return Expand_SGetFast(call_inst.getArgOperand(0),
@@ -3233,13 +3293,13 @@
     case IntrinsicHelper::StaticFieldPutByte:
     case IntrinsicHelper::StaticFieldPutChar:
     case IntrinsicHelper::StaticFieldPutShort: {
-      return ExpandToRuntime(runtime_support::Set32Static, call_inst);
+      return ExpandToRuntime(Set32Static, call_inst);
     }
     case IntrinsicHelper::StaticFieldPutWide: {
-      return ExpandToRuntime(runtime_support::Set64Static, call_inst);
+      return ExpandToRuntime(Set64Static, call_inst);
     }
     case IntrinsicHelper::StaticFieldPutObject: {
-      return ExpandToRuntime(runtime_support::SetObjectStatic, call_inst);
+      return ExpandToRuntime(SetObjectStatic, call_inst);
     }
     case IntrinsicHelper::StaticFieldPutFast: {
       Expand_SPutFast(call_inst.getArgOperand(0),
@@ -3304,7 +3364,7 @@
       return Expand_LoadClassSSBFromDexCache(call_inst.getArgOperand(0));
     }
     case IntrinsicHelper::InitializeAndLoadClassSSB: {
-      return ExpandToRuntime(runtime_support::InitializeStaticStorage, call_inst);
+      return ExpandToRuntime(InitializeStaticStorage, call_inst);
     }
 
     //==- High-level Array -------------------------------------------------==//
@@ -3449,19 +3509,19 @@
 
     //==- Invoke -----------------------------------------------------------==//
     case IntrinsicHelper::FindStaticMethodWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::FindStaticMethodWithAccessCheck, call_inst);
+      return ExpandToRuntime(FindStaticMethodWithAccessCheck, call_inst);
     }
     case IntrinsicHelper::FindDirectMethodWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::FindDirectMethodWithAccessCheck, call_inst);
+      return ExpandToRuntime(FindDirectMethodWithAccessCheck, call_inst);
     }
     case IntrinsicHelper::FindVirtualMethodWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::FindVirtualMethodWithAccessCheck, call_inst);
+      return ExpandToRuntime(FindVirtualMethodWithAccessCheck, call_inst);
     }
     case IntrinsicHelper::FindSuperMethodWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::FindSuperMethodWithAccessCheck, call_inst);
+      return ExpandToRuntime(FindSuperMethodWithAccessCheck, call_inst);
     }
     case IntrinsicHelper::FindInterfaceMethodWithAccessCheck: {
-      return ExpandToRuntime(runtime_support::FindInterfaceMethodWithAccessCheck, call_inst);
+      return ExpandToRuntime(FindInterfaceMethodWithAccessCheck, call_inst);
     }
     case IntrinsicHelper::GetSDCalleeMethodObjAddrFast: {
       return Expand_GetSDCalleeMethodObjAddrFast(call_inst.getArgOperand(0));
@@ -3471,7 +3531,7 @@
                 call_inst.getArgOperand(0), call_inst.getArgOperand(1));
     }
     case IntrinsicHelper::GetInterfaceCalleeMethodObjAddrFast: {
-      return ExpandToRuntime(runtime_support::FindInterfaceMethod, call_inst);
+      return ExpandToRuntime(FindInterfaceMethod, call_inst);
     }
     case IntrinsicHelper::InvokeRetVoid:
     case IntrinsicHelper::InvokeRetBoolean:
@@ -3500,16 +3560,16 @@
       return Expand_DivRem(call_inst, /* is_div */false, kLong);
     }
     case IntrinsicHelper::D2L: {
-      return ExpandToRuntime(runtime_support::art_d2l, call_inst);
+      return ExpandToRuntime(art_d2l, call_inst);
     }
     case IntrinsicHelper::D2I: {
-      return ExpandToRuntime(runtime_support::art_d2i, call_inst);
+      return ExpandToRuntime(art_d2i, call_inst);
     }
     case IntrinsicHelper::F2L: {
-      return ExpandToRuntime(runtime_support::art_f2l, call_inst);
+      return ExpandToRuntime(art_f2l, call_inst);
     }
     case IntrinsicHelper::F2I: {
-      return ExpandToRuntime(runtime_support::art_f2i, call_inst);
+      return ExpandToRuntime(art_f2i, call_inst);
     }
 
     //==- High-level Static ------------------------------------------------==//
@@ -3731,7 +3791,7 @@
   }
   UNIMPLEMENTED(FATAL) << "Unexpected GBC intrinsic: " << static_cast<int>(intr_id);
   return NULL;
-}
+}  // NOLINT(readability/fn_size)
 
 } // anonymous namespace
 
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index 1f2b977..592059e 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -114,7 +114,7 @@
   irb_.reset(new IRBuilder(*context_, *module_, *intrinsic_helper_));
 
   // We always need a switch case, so just use a normal function.
-  switch(GetInstructionSet()) {
+  switch (GetInstructionSet()) {
   default:
     runtime_support_.reset(new RuntimeSupportBuilder(*context_, *module_, *irb_));
     break;
diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc
index 976aa8f..19ccc36 100644
--- a/compiler/llvm/runtime_support_builder.cc
+++ b/compiler/llvm/runtime_support_builder.cc
@@ -27,14 +27,14 @@
 #include <llvm/IR/Module.h>
 #include <llvm/IR/Type.h>
 
-using namespace llvm;
+using ::llvm::BasicBlock;
+using ::llvm::CallInst;
+using ::llvm::Function;
+using ::llvm::Value;
 
 namespace art {
 namespace llvm {
 
-using namespace runtime_support;
-
-
 RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context,
                                              ::llvm::Module& module,
                                              IRBuilder& irb)
diff --git a/compiler/llvm/runtime_support_builder_arm.cc b/compiler/llvm/runtime_support_builder_arm.cc
index 57a9971..5a9d2b8 100644
--- a/compiler/llvm/runtime_support_builder_arm.cc
+++ b/compiler/llvm/runtime_support_builder_arm.cc
@@ -28,11 +28,17 @@
 
 #include <vector>
 
-using namespace llvm;
+using ::llvm::CallInst;
+using ::llvm::Function;
+using ::llvm::FunctionType;
+using ::llvm::InlineAsm;
+using ::llvm::IntegerType;
+using ::llvm::Type;
+using ::llvm::Value;
 
 namespace {
 
-char LDRSTRSuffixByType(art::llvm::IRBuilder& irb, ::llvm::Type* type) {
+char LDRSTRSuffixByType(art::llvm::IRBuilder& irb, Type* type) {
   int width = type->isPointerTy() ?
               irb.getSizeOfPtrEquivInt()*8 :
               ::llvm::cast<IntegerType>(type)->getBitWidth();
@@ -53,7 +59,7 @@
 
 /* Thread */
 
-::llvm::Value* RuntimeSupportBuilderARM::EmitGetCurrentThread() {
+Value* RuntimeSupportBuilderARM::EmitGetCurrentThread() {
   Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
   InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), "mov $0, r9", "=r", false);
   CallInst* thread = irb_.CreateCall(func);
@@ -62,8 +68,8 @@
   return thread;
 }
 
-::llvm::Value* RuntimeSupportBuilderARM::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
-                                                                TBAASpecialType s_ty) {
+Value* RuntimeSupportBuilderARM::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
+                                                          TBAASpecialType s_ty) {
   FunctionType* func_ty = FunctionType::get(/*Result=*/type,
                                             /*isVarArg=*/false);
   std::string inline_asm(StringPrintf("ldr%c $0, [r9, #%d]",
@@ -76,7 +82,7 @@
   return result;
 }
 
-void RuntimeSupportBuilderARM::EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+void RuntimeSupportBuilderARM::EmitStoreToThreadOffset(int64_t offset, Value* value,
                                                        TBAASpecialType s_ty) {
   FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
                                             /*Params=*/value->getType(),
@@ -89,8 +95,7 @@
   irb_.SetTBAA(call_inst, s_ty);
 }
 
-::llvm::Value*
-RuntimeSupportBuilderARM::EmitSetCurrentThread(::llvm::Value* thread) {
+Value* RuntimeSupportBuilderARM::EmitSetCurrentThread(Value* thread) {
   // Separate to two InlineAsm: The first one produces the return value, while the second,
   // sets the current thread.
   // LLVM can delete the first one if the caller in LLVM IR doesn't use the return value.
@@ -114,7 +119,7 @@
 
 /* Monitor */
 
-void RuntimeSupportBuilderARM::EmitLockObject(::llvm::Value* object) {
+void RuntimeSupportBuilderARM::EmitLockObject(Value* object) {
   RuntimeSupportBuilder::EmitLockObject(object);
   FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
                                             /*isVarArg=*/false);
@@ -122,7 +127,7 @@
   irb_.CreateCall(func);
 }
 
-void RuntimeSupportBuilderARM::EmitUnlockObject(::llvm::Value* object) {
+void RuntimeSupportBuilderARM::EmitUnlockObject(Value* object) {
   RuntimeSupportBuilder::EmitUnlockObject(object);
   FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
                                             /*isVarArg=*/false);
diff --git a/compiler/llvm/runtime_support_builder_thumb2.cc b/compiler/llvm/runtime_support_builder_thumb2.cc
index 2b9170c..b8a5f05 100644
--- a/compiler/llvm/runtime_support_builder_thumb2.cc
+++ b/compiler/llvm/runtime_support_builder_thumb2.cc
@@ -31,13 +31,18 @@
 #include <inttypes.h>
 #include <vector>
 
-using namespace llvm;
+using ::llvm::BasicBlock;
+using ::llvm::Function;
+using ::llvm::FunctionType;
+using ::llvm::InlineAsm;
+using ::llvm::Type;
+using ::llvm::Value;
 
 namespace art {
 namespace llvm {
 
 
-void RuntimeSupportBuilderThumb2::EmitLockObject(::llvm::Value* object) {
+void RuntimeSupportBuilderThumb2::EmitLockObject(Value* object) {
   FunctionType* func_ty = FunctionType::get(/*Result=*/irb_.getInt32Ty(),
                                             /*Params=*/irb_.getJObjectTy(),
                                             /*isVarArg=*/false);
@@ -58,10 +63,10 @@
 
   InlineAsm* func = InlineAsm::get(func_ty, asms, "=&l,l,~l,~l", true);
 
-  ::llvm::Value* retry_slow_path = irb_.CreateCall(func, object);
+  Value* retry_slow_path = irb_.CreateCall(func, object);
   retry_slow_path = irb_.CreateICmpNE(retry_slow_path, irb_.getJInt(0));
 
-  ::llvm::Function* parent_func = irb_.GetInsertBlock()->getParent();
+  Function* parent_func = irb_.GetInsertBlock()->getParent();
   BasicBlock* basic_block_lock = BasicBlock::Create(context_, "lock", parent_func);
   BasicBlock* basic_block_cont = BasicBlock::Create(context_, "lock_cont", parent_func);
   irb_.CreateCondBr(retry_slow_path, basic_block_lock, basic_block_cont, kUnlikely);
diff --git a/compiler/llvm/runtime_support_builder_x86.cc b/compiler/llvm/runtime_support_builder_x86.cc
index eed0b63..c056e58 100644
--- a/compiler/llvm/runtime_support_builder_x86.cc
+++ b/compiler/llvm/runtime_support_builder_x86.cc
@@ -29,13 +29,19 @@
 
 #include <vector>
 
-using namespace llvm;
+using ::llvm::CallInst;
+using ::llvm::Function;
+using ::llvm::FunctionType;
+using ::llvm::InlineAsm;
+using ::llvm::Type;
+using ::llvm::UndefValue;
+using ::llvm::Value;
 
 namespace art {
 namespace llvm {
 
 
-::llvm::Value* RuntimeSupportBuilderX86::EmitGetCurrentThread() {
+Value* RuntimeSupportBuilderX86::EmitGetCurrentThread() {
   Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
   std::string inline_asm(StringPrintf("mov %%fs:%d, $0", Thread::SelfOffset().Int32Value()));
   InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), inline_asm, "=r", false);
@@ -45,8 +51,8 @@
   return thread;
 }
 
-::llvm::Value* RuntimeSupportBuilderX86::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
-                                                                TBAASpecialType s_ty) {
+Value* RuntimeSupportBuilderX86::EmitLoadFromThreadOffset(int64_t offset, Type* type,
+                                                          TBAASpecialType s_ty) {
   FunctionType* func_ty = FunctionType::get(/*Result=*/type,
                                             /*isVarArg=*/false);
   std::string inline_asm(StringPrintf("mov %%fs:%d, $0", static_cast<int>(offset)));
@@ -57,7 +63,7 @@
   return result;
 }
 
-void RuntimeSupportBuilderX86::EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
+void RuntimeSupportBuilderX86::EmitStoreToThreadOffset(int64_t offset, Value* value,
                                                        TBAASpecialType s_ty) {
   FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
                                             /*Params=*/value->getType(),
@@ -68,9 +74,9 @@
   irb_.SetTBAA(call_inst, s_ty);
 }
 
-::llvm::Value* RuntimeSupportBuilderX86::EmitSetCurrentThread(::llvm::Value*) {
+Value* RuntimeSupportBuilderX86::EmitSetCurrentThread(Value*) {
   /* Nothing to be done. */
-  return ::llvm::UndefValue::get(irb_.getJObjectTy());
+  return UndefValue::get(irb_.getJObjectTy());
 }
 
 
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 4c32506..da05c49 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -399,8 +399,7 @@
                          fp_spill_mask,
                          mapping_table_offset,
                          vmap_table_offset,
-                         gc_map_offset
-                         );
+                         gc_map_offset);
 
   if (compiler_driver_->IsImage()) {
     ClassLinker* linker = Runtime::Current()->GetClassLinker();
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index fd0bf2f..85eb89b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1431,6 +1431,10 @@
   std::string oat_option;
   std::string boot_image_option;
   std::string boot_oat_option;
+
+  // We are more like a compiler than a run-time. We don't want to execute code.
+  options.push_back(std::make_pair("compiler", reinterpret_cast<void*>(NULL)));
+
   if (boot_image_filename != NULL) {
     boot_image_option += "-Ximage:";
     boot_image_option += boot_image_filename;
diff --git a/runtime/base/histogram_test.cc b/runtime/base/histogram_test.cc
index ea3e35f..9f3587a 100644
--- a/runtime/base/histogram_test.cc
+++ b/runtime/base/histogram_test.cc
@@ -20,7 +20,7 @@
 
 #include <sstream>
 
-using namespace art;
+namespace art {
 
 //Simple usage:
 //  Histogram *hist = new Histogram("SimplePercentiles");
@@ -266,3 +266,5 @@
   hist->PrintConfidenceIntervals(stream, 0.99);
   EXPECT_EQ(expected, stream.str());
 }
+
+} // namespace art
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 07157da..b3f5092 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -148,7 +148,7 @@
       }
       android_atomic_dec(&num_pending_readers_);
     }
-  } while(!done);
+  } while (!done);
 #else
   CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
 #endif
@@ -176,7 +176,7 @@
     } else {
       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
     }
-  } while(!done);
+  } while (!done);
 #else
   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
 #endif
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 25c0b9e..1df0207 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -178,7 +178,7 @@
     do {
       slot = cur_content_log_entry_;
       new_slot = (slot + 1) % kContentionLogSize;
-    } while(!cur_content_log_entry_.CompareAndSwap(slot, new_slot));
+    } while (!cur_content_log_entry_.CompareAndSwap(slot, new_slot));
     contention_log_[new_slot].blocked_tid = blocked_tid;
     contention_log_[new_slot].owner_tid = owner_tid;
     contention_log_[new_slot].count = 1;
@@ -312,7 +312,7 @@
         }
         android_atomic_dec(&num_contenders_);
       }
-    } while(!done);
+    } while (!done);
     DCHECK_EQ(state_, 1);
     exclusive_owner_ = SafeGetTid(self);
 #else
@@ -344,7 +344,7 @@
       } else {
         return false;
       }
-    } while(!done);
+    } while (!done);
     DCHECK_EQ(state_, 1);
     exclusive_owner_ = SafeGetTid(self);
 #else
@@ -404,7 +404,7 @@
         _exit(1);
       }
     }
-  } while(!done);
+  } while (!done);
 #else
     CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
 #endif
@@ -513,7 +513,7 @@
       }
       android_atomic_dec(&num_pending_writers_);
     }
-  } while(!done);
+  } while (!done);
   DCHECK_EQ(state_, -1);
   exclusive_owner_ = SafeGetTid(self);
 #else
@@ -545,7 +545,7 @@
     } else {
       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
     }
-  } while(!done);
+  } while (!done);
 #else
   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
 #endif
@@ -583,7 +583,7 @@
       }
       android_atomic_dec(&num_pending_writers_);
     }
-  } while(!done);
+  } while (!done);
   exclusive_owner_ = SafeGetTid(self);
 #else
   timespec ts;
@@ -616,7 +616,7 @@
       // Owner holds it exclusively.
       return false;
     }
-  } while(!done);
+  } while (!done);
 #else
   int result = pthread_rwlock_tryrdlock(&rwlock_);
   if (result == EBUSY) {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 403a2eb..7429ab1 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -401,7 +401,7 @@
    *
    * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
    */
-  void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) {
+  void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     va_list ap;
 
     const mirror::AbstractMethod* traceMethod = NULL;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e5844b0..75886cf 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -35,7 +35,30 @@
 #include "runtime_support.h"
 #include "sirt_ref.h"
 
-using namespace art::mirror;
+using ::art::mirror::AbstractMethod;
+using ::art::mirror::AbstractMethodClass;
+using ::art::mirror::CharArray;
+using ::art::mirror::Class;
+using ::art::mirror::ClassClass;
+using ::art::mirror::ClassLoader;
+using ::art::mirror::Constructor;
+using ::art::mirror::DexCache;
+using ::art::mirror::DoubleArray;
+using ::art::mirror::Field;
+using ::art::mirror::FieldClass;
+using ::art::mirror::IfTable;
+using ::art::mirror::IntArray;
+using ::art::mirror::LongArray;
+using ::art::mirror::Method;
+using ::art::mirror::Object;
+using ::art::mirror::ObjectArray;
+using ::art::mirror::Proxy;
+using ::art::mirror::ShortArray;
+using ::art::mirror::StackTraceElement;
+using ::art::mirror::StaticStorageBase;
+using ::art::mirror::String;
+using ::art::mirror::StringClass;
+using ::art::mirror::Throwable;
 
 namespace art {
 
@@ -937,14 +960,14 @@
 
   // TODO: Remove EXPECT_FALSE when GCC can handle EXPECT_EQ
   // http://code.google.com/p/googletest/issues/detail?id=322
-  EXPECT_FALSE(                   s0->GetBoolean(statics));
-  EXPECT_EQ(6,                    s1->GetByte(statics));
-  EXPECT_EQ('b',                  s2->GetChar(statics));
-  EXPECT_EQ(-535,                 s3->GetShort(statics));
-  EXPECT_EQ(2000000001,           s4->GetInt(statics));
+  EXPECT_FALSE(s0->GetBoolean(statics));
+  EXPECT_EQ(6, s1->GetByte(statics));
+  EXPECT_EQ('b', s2->GetChar(statics));
+  EXPECT_EQ(-535, s3->GetShort(statics));
+  EXPECT_EQ(2000000001, s4->GetInt(statics));
   EXPECT_EQ(0x34567890abcdef12LL, s5->GetLong(statics));
-  EXPECT_EQ(0.75,                 s6->GetFloat(statics));
-  EXPECT_EQ(16777219,             s7->GetDouble(statics));
+  EXPECT_EQ(0.75, s6->GetFloat(statics));
+  EXPECT_EQ(16777219, s7->GetDouble(statics));
   EXPECT_TRUE(s8->GetObject(statics)->AsString()->Equals("robot"));
 }
 
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 73c47b5..e735e27 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -178,8 +178,7 @@
                                 fp_spill_mask,
                                 reinterpret_cast<uint32_t>(mapping_table),
                                 reinterpret_cast<uint32_t>(vmap_table),
-                                reinterpret_cast<uint32_t>(gc_map)
-                                );
+                                reinterpret_cast<uint32_t>(gc_map));
   }
 
   void MakeExecutable(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -308,7 +307,7 @@
     options.push_back(std::make_pair("-Xcheck:jni", reinterpret_cast<void*>(NULL)));
     options.push_back(std::make_pair(min_heap_string.c_str(), reinterpret_cast<void*>(NULL)));
     options.push_back(std::make_pair(max_heap_string.c_str(), reinterpret_cast<void*>(NULL)));
-    if(!Runtime::Create(options, false)) {
+    if (!Runtime::Create(options, false)) {
       LOG(FATAL) << "Failed to create runtime";
       return;
     }
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b502c9a..4fbee51 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -184,7 +184,7 @@
 static ObjectRegistry* gRegistry = NULL;
 
 // Recent allocation tracking.
-static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER ("AllocTracker lock");
+static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER("AllocTracker lock");
 AllocRecord* Dbg::recent_allocation_records_ PT_GUARDED_BY(gAllocTrackerLock) = NULL; // TODO: CircularBuffer<AllocRecord>
 static size_t gAllocRecordMax GUARDED_BY(gAllocTrackerLock) = 0;
 static size_t gAllocRecordHead GUARDED_BY(gAllocTrackerLock) = 0;
@@ -2761,7 +2761,7 @@
     VLOG(jdwp) << "    Control has returned from event thread";
 
     /* wait for thread to re-suspend itself */
-    SuspendThread(thread_id, false /* request_suspension */ );
+    SuspendThread(thread_id, false /* request_suspension */);
     self->TransitionFromSuspendedToRunnable();
   }
 
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 28e06cc..8edeb18 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1039,7 +1039,7 @@
   }
   InvokeType GetMethodInvokeType(const DexFile::ClassDef& class_def) const {
     if (HasNextDirectMethod()) {
-      if ((GetMemberAccessFlags() & kAccStatic) != 0 ) {
+      if ((GetMemberAccessFlags() & kAccStatic) != 0) {
         return kStatic;
       } else {
         return kDirect;
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 427baf2..6b41511 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -56,12 +56,11 @@
 
 int const Instruction::kInstructionSizeInCodeUnits[] = {
 #define INSTRUCTION_SIZE(opcode, c, p, format, r, i, a, v) \
-  (( opcode == NOP                      ) ? -1 : \
+  ((opcode == NOP)                        ? -1 : \
    ((format >= k10x) && (format <= k10t)) ? 1 : \
    ((format >= k20t) && (format <= k22c)) ? 2 : \
    ((format >= k32x) && (format <= k3rc)) ? 3 : \
-   ( format == k51l                     ) ? 5 : -1 \
-  ),
+    (format == k51l)                      ? 5 : -1),
 #include "dex_instruction_list.h"
   DEX_INSTRUCTION_LIST(INSTRUCTION_SIZE)
 #undef DEX_INSTRUCTION_LIST
diff --git a/runtime/disassembler_arm.cc b/runtime/disassembler_arm.cc
index 172bef8..a7319a5 100644
--- a/runtime/disassembler_arm.cc
+++ b/runtime/disassembler_arm.cc
@@ -1034,7 +1034,7 @@
 
   os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instr, opcode.str().c_str()) << args.str() << '\n';
   return 4;
-}
+}  // NOLINT(readability/fn_size)
 
 size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr) {
   uint16_t instr = ReadU16(instr_ptr);
@@ -1184,7 +1184,7 @@
         ThumbRegister Rm(instr, 6);
         ThumbRegister Rn(instr, 3);
         ThumbRegister Rt(instr, 0);
-        switch(opB) {
+        switch (opB) {
           case 0: opcode << "str"; break;
           case 1: opcode << "strh"; break;
           case 2: opcode << "strb"; break;
@@ -1206,7 +1206,7 @@
         uint16_t opB = (instr >> 11) & 1;
         ThumbRegister Rn(instr, 3);
         ThumbRegister Rt(instr, 0);
-        switch(opA) {
+        switch (opA) {
           case 6:
             imm5 <<= 2;
             opcode << (opB == 0 ? "str" : "ldr");
diff --git a/runtime/disassembler_x86.cc b/runtime/disassembler_x86.cc
index bda162a..48f7b6b 100644
--- a/runtime/disassembler_x86.cc
+++ b/runtime/disassembler_x86.cc
@@ -25,8 +25,7 @@
 namespace art {
 namespace x86 {
 
-DisassemblerX86::DisassemblerX86() {
-}
+DisassemblerX86::DisassemblerX86() {}
 
 size_t DisassemblerX86::Dump(std::ostream& os, const uint8_t* begin) {
   return DumpInstruction(os, begin);
@@ -745,7 +744,7 @@
                      prefixed_opcode.str().c_str())
      << args.str() << '\n';
   return instr - begin_instr;
-}
+} // NOLINT(readability/fn_size)
 
 }  // namespace x86
 }  // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index da122e6..6ce36e8 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -366,7 +366,7 @@
   const unsigned char *name = (const unsigned char *) _name;
   unsigned h = 0, g;
 
-  while(*name) {
+  while (*name) {
     h = (h << 4) + *name++;
     g = h & 0xf0000000;
     h ^= g;
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index d677ade..92d9ea2 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -66,7 +66,7 @@
         // Stack overflow.
         return false;
       }
-    } while(!back_index_.compare_and_swap(index, index + 1));
+    } while (!back_index_.compare_and_swap(index, index + 1));
     begin_[index] = value;
     return true;
   }
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index aa02f82..940ed13 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -30,7 +30,7 @@
 #include "thread.h"
 #include "UniquePtr.h"
 
-using namespace art::mirror;
+using ::art::mirror::Object;
 
 namespace art {
 namespace gc {
@@ -44,8 +44,8 @@
   }
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
-  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
-                   bool /* is_static */) const {
+  void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                  bool /* is_static */) const {
     // TODO: Optimize?
     // TODO: C++0x auto
     const std::vector<space::ContinuousSpace*>& spaces = heap_->GetContinuousSpaces();
@@ -70,7 +70,7 @@
       bitmap_(bitmap) {
   }
 
-  void operator ()(const Object* obj) const
+  void operator()(const Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_) {
     DCHECK(obj != NULL);
@@ -90,7 +90,7 @@
     : cleared_cards_(cleared_cards) {
   }
 
-  inline void operator ()(byte* card, byte expected_value, byte new_value) const {
+  inline void operator()(byte* card, byte expected_value, byte new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       cleared_cards_->insert(card);
     }
@@ -106,7 +106,7 @@
     : cleared_cards_(cleared_cards) {
   }
 
-  void operator ()(byte* card, byte expected_card, byte new_card) const {
+  void operator()(byte* card, byte expected_card, byte new_card) const {
     if (expected_card == CardTable::kCardDirty) {
       cleared_cards_->push_back(card);
     }
@@ -120,7 +120,7 @@
   explicit ModUnionScanImageRootVisitor(collector::MarkSweep* const mark_sweep)
       : mark_sweep_(mark_sweep) {}
 
-  void operator ()(const Object* root) const
+  void operator()(const Object* root) const
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(root != NULL);
@@ -141,14 +141,14 @@
 class AddToReferenceArrayVisitor {
  public:
   explicit AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table,
-                                      std::vector<const mirror::Object*>* references)
+                                      std::vector<const Object*>* references)
     : mod_union_table_(mod_union_table),
       references_(references) {
   }
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
-  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
-                     bool /* is_static */) const {
+  void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                  bool /* is_static */) const {
     // Only add the reference if it is non null and fits our criteria.
     if (ref != NULL && mod_union_table_->AddReference(obj, ref)) {
       references_->push_back(ref);
@@ -157,18 +157,18 @@
 
  private:
   ModUnionTableReferenceCache* const mod_union_table_;
-  std::vector<const mirror::Object*>* const references_;
+  std::vector<const Object*>* const references_;
 };
 
 class ModUnionReferenceVisitor {
  public:
   explicit ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table,
-                                    std::vector<const mirror::Object*>* references)
+                                    std::vector<const Object*>* references)
     : mod_union_table_(mod_union_table),
       references_(references) {
   }
 
-  void operator ()(const Object* obj) const
+  void operator()(const Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     DCHECK(obj != NULL);
     // We don't have an early exit since we use the visitor pattern, an early
@@ -178,7 +178,7 @@
   }
  private:
   ModUnionTableReferenceCache* const mod_union_table_;
-  std::vector<const mirror::Object*>* const references_;
+  std::vector<const Object*>* const references_;
 };
 
 class CheckReferenceVisitor {
@@ -191,8 +191,8 @@
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
   // TODO: Fixme when anotatalysis works with visitors.
-  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
-                   bool /* is_static */) const
+  void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                  bool /* is_static */) const
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     Heap* heap = mod_union_table_->GetHeap();
     if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
@@ -216,13 +216,13 @@
 
 class ModUnionCheckReferences {
  public:
-  explicit ModUnionCheckReferences (ModUnionTableReferenceCache* mod_union_table,
-                                    const std::set<const Object*>& references)
+  explicit ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table,
+                                   const std::set<const Object*>& references)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       : mod_union_table_(mod_union_table), references_(references) {
   }
 
-  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+  void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
     Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
     DCHECK(obj != NULL);
     CheckReferenceVisitor visitor(mod_union_table_, references_);
@@ -237,8 +237,8 @@
 void ModUnionTableReferenceCache::Verify() {
   // Start by checking that everything in the mod union table is marked.
   Heap* heap = GetHeap();
-  typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It;
-  typedef std::vector<const mirror::Object*>::const_iterator It2;
+  typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It;
+  typedef std::vector<const Object*>::const_iterator It2;
   for (It it = references_.begin(), end = references_.end(); it != end; ++it) {
     for (It2 it_ref = it->second.begin(), end_ref = it->second.end(); it_ref != end_ref;
         ++it_ref ) {
@@ -277,13 +277,13 @@
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
   }
   os << "]\nModUnionTable references: [";
-  typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It2;
+  typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It2;
   for (It2 it = references_.begin(); it != references_.end(); ++it) {
     const byte* card = &*it->first;
     uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
     uintptr_t end = start + CardTable::kCardSize;
     os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
-    typedef std::vector<const mirror::Object*>::const_iterator It3;
+    typedef std::vector<const Object*>::const_iterator It3;
     for (It3 itr = it->second.begin(); itr != it->second.end();++itr) {
       os << reinterpret_cast<const void*>(*itr) << ",";
     }
@@ -295,7 +295,7 @@
   Heap* heap = GetHeap();
   CardTable* card_table = heap->GetCardTable();
 
-  std::vector<const mirror::Object*> cards_references;
+  std::vector<const Object*> cards_references;
   ModUnionReferenceVisitor visitor(this, &cards_references);
 
   typedef std::set<byte*>::iterator It;
@@ -311,7 +311,7 @@
 
     // Update the corresponding references for the card.
     // TODO: C++0x auto
-    SafeMap<const byte*, std::vector<const mirror::Object*> >::iterator
+    SafeMap<const byte*, std::vector<const Object*> >::iterator
         found = references_.find(card);
     if (found == references_.end()) {
       if (cards_references.empty()) {
@@ -330,10 +330,10 @@
   // TODO: C++0x auto
   size_t count = 0;
 
-  typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It;
+  typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It;
   for (It it = references_.begin(); it != references_.end(); ++it) {
-    typedef std::vector<const mirror::Object*>::const_iterator It2;
-    for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+    typedef std::vector<const Object*>::const_iterator It2;
+    for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref) {
       mark_sweep->MarkRoot(*it_ref);
       ++count;
     }
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index bf4c1ed..77f93a2 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -103,7 +103,7 @@
         : bitmap_(bitmap) {
     }
 
-    void operator ()(mirror::Object* obj) const {
+    void operator()(mirror::Object* obj) const {
       bitmap_->Clear(obj);
     }
    private:
@@ -112,7 +112,7 @@
 
   template <typename Visitor>
   void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const {
-    for (; visit_begin < visit_end; visit_begin += kAlignment ) {
+    for (; visit_begin < visit_end; visit_begin += kAlignment) {
       visitor(reinterpret_cast<mirror::Object*>(visit_begin));
     }
   }
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index d00d7c2..516a449 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -46,7 +46,7 @@
       begin_(begin),
       end_(end) {}
 
-  void operator ()(const mirror::Object* obj) {
+  void operator()(const mirror::Object* obj) {
     EXPECT_TRUE(obj >= begin_);
     EXPECT_TRUE(obj <= end_);
     EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 7725215..3cc64e9 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -50,17 +50,16 @@
 #include "utils.h"
 #include <sys/mman.h>
 
-using namespace art;
 extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
   // Is this chunk in use?
   if (used_bytes != 0) {
     return;
   }
   // Do we have any whole pages to give back?
-  start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
-  end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
+  start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::kPageSize));
+  end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::kPageSize));
   if (end > start) {
-    size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
+    size_t length = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
     int rc = madvise(start, length, MADV_DONTNEED);
     if (UNLIKELY(rc != 0)) {
       errno = rc;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 865ee13..92ce4ef 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -50,7 +50,10 @@
 #include "thread_list.h"
 #include "verifier/method_verifier.h"
 
-using namespace art::mirror;
+using ::art::mirror::Class;
+using ::art::mirror::Field;
+using ::art::mirror::Object;
+using ::art::mirror::ObjectArray;
 
 namespace art {
 namespace gc {
@@ -72,7 +75,7 @@
  public:
   explicit SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
 
-  void operator ()(void* finger) const {
+  void operator()(void* finger) const {
     mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger));
   }
 
@@ -522,7 +525,7 @@
  public:
   explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
 
-  void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
+  void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
       NO_THREAD_SAFETY_ANALYSIS {
     if (kDebugLocking) {
       Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
@@ -563,7 +566,7 @@
   explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
 
   // TODO: Fixme when anotatalysis works with visitors.
-  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+  void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
     if (kDebugLocking) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -607,7 +610,7 @@
  public:
   explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
 
-  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+  void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
     if (kDebugLocking) {
       Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
     }
@@ -1079,8 +1082,8 @@
   explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
 
   // TODO: Fixme when anotatalysis works with visitors.
-  void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
-                   bool /* is_static */) const
+  void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
+                  bool /* is_static */) const
       NO_THREAD_SAFETY_ANALYSIS {
     if (kDebugLocking) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -1146,8 +1149,8 @@
    public:
     explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {}
 
-    void operator ()(const Object* /* obj */, const Object* ref,
-                     const MemberOffset& /* offset */, bool /* is_static */) const {
+    void operator()(const Object* /* obj */, const Object* ref,
+                    const MemberOffset& /* offset */, bool /* is_static */) const {
       if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
         chunk_task_->MarkStackPush(ref);
       }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6423a0d..021d8e7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -232,7 +232,7 @@
 
 // Sort spaces based on begin address
 struct ContinuousSpaceSorter {
-  bool operator ()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const {
+  bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const {
     return a->Begin() < b->Begin();
   }
 };
@@ -895,8 +895,8 @@
   }
 
   // For MarkSweep::VisitObjectReferences.
-  void operator ()(const mirror::Object* referrer, const mirror::Object* object,
-                   const MemberOffset&, bool) const {
+  void operator()(const mirror::Object* referrer, const mirror::Object* object,
+                  const MemberOffset&, bool) const {
     if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
       referring_objects_.push_back(const_cast<mirror::Object*>(referrer));
     }
@@ -1166,7 +1166,7 @@
 
 class ScanVisitor {
  public:
-  void operator ()(const mirror::Object* obj) const {
+  void operator()(const mirror::Object* obj) const {
     LOG(INFO) << "Would have rescanned object " << obj;
   }
 };
@@ -1184,8 +1184,8 @@
 
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
   // analysis on visitors.
-  void operator ()(const mirror::Object* obj, const mirror::Object* ref,
-                   const MemberOffset& offset, bool /* is_static */) const
+  void operator()(const mirror::Object* obj, const mirror::Object* ref,
+                  const MemberOffset& offset, bool /* is_static */) const
       NO_THREAD_SAFETY_ANALYSIS {
     // Verify that the reference is live.
     if (UNLIKELY(ref != NULL && !IsLive(ref))) {
@@ -1265,7 +1265,7 @@
  public:
   explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
 
-  void operator ()(const mirror::Object* obj) const
+  void operator()(const mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // Note: we are verifying the references in obj but not obj itself, this is because obj must
     // be live or else how did we find it in the live bitmap?
@@ -1312,8 +1312,8 @@
 
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
   // annotalysis on visitors.
-  void operator ()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
-                   bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
+  void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
+                  bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
     // Filter out class references since changing an object's class does not mark the card as dirty.
     // Also handles large objects, since the only reference they hold is a class reference.
     if (ref != NULL && !ref->IsClass()) {
@@ -1379,7 +1379,7 @@
       : heap_(heap),
         failed_(false) {}
 
-  void operator ()(const mirror::Object* obj) const
+  void operator()(const mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
     collector::MarkSweep::VisitObjectReferences(obj, visitor);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index aaf449b..feccba3 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -70,7 +70,7 @@
 
 class AgeCardVisitor {
  public:
-  byte operator ()(byte card) const {
+  byte operator()(byte card) const {
     if (card == accounting::CardTable::kCardDirty) {
       return card - 1;
     } else {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 45314c2..09a0fc7 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -40,7 +40,21 @@
 #include "scoped_thread_state_change.h"
 #include "thread.h"
 
-using namespace art::mirror;
+using ::art::mirror::AbstractMethod;
+using ::art::mirror::Array;
+using ::art::mirror::BooleanArray;
+using ::art::mirror::ByteArray;
+using ::art::mirror::CharArray;
+using ::art::mirror::Class;
+using ::art::mirror::ClassLoader;
+using ::art::mirror::Field;
+using ::art::mirror::IntArray;
+using ::art::mirror::LongArray;
+using ::art::mirror::Object;
+using ::art::mirror::ObjectArray;
+using ::art::mirror::ShortArray;
+using ::art::mirror::String;
+using ::art::mirror::Throwable;
 
 namespace art {
 
@@ -969,11 +983,11 @@
       return JValue(); /* Handled in caller. */ \
     } \
   } else { \
-    inst = inst-> next_function (); \
+    inst = inst->next_function(); \
   }
 
 static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
-  __attribute__ ((cold, noreturn, noinline));
+  __attribute__((cold, noreturn, noinline));
 
 static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -989,7 +1003,7 @@
 template<bool do_access_check>
 static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
                       ShadowFrame& shadow_frame, JValue result_register)
-    NO_THREAD_SAFETY_ANALYSIS __attribute__ ((hot));
+    NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
 
 template<bool do_access_check>
 static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
@@ -1254,7 +1268,7 @@
         if (UNLIKELY(s == NULL)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          shadow_frame.SetVRegReference( inst->VRegA_21c(), s);
+          shadow_frame.SetVRegReference(inst->VRegA_21c(), s);
           inst = inst->Next_2xx();
         }
         break;
@@ -1265,7 +1279,7 @@
         if (UNLIKELY(s == NULL)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          shadow_frame.SetVRegReference( inst->VRegA_31c(), s);
+          shadow_frame.SetVRegReference(inst->VRegA_31c(), s);
           inst = inst->Next_3xx();
         }
         break;
@@ -2950,7 +2964,7 @@
         UnexpectedOpcode(inst, mh);
     }
   }
-}
+}  // NOLINT(readability/fn_size)
 
 static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
                       ShadowFrame& shadow_frame, JValue result_register)
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index c6047cd..72d9aea 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -49,7 +49,23 @@
 #include "UniquePtr.h"
 #include "well_known_classes.h"
 
-using namespace art::mirror;
+using ::art::mirror::AbstractMethod;
+using ::art::mirror::Array;
+using ::art::mirror::BooleanArray;
+using ::art::mirror::ByteArray;
+using ::art::mirror::CharArray;
+using ::art::mirror::Class;
+using ::art::mirror::ClassLoader;
+using ::art::mirror::DoubleArray;
+using ::art::mirror::Field;
+using ::art::mirror::FloatArray;
+using ::art::mirror::IntArray;
+using ::art::mirror::LongArray;
+using ::art::mirror::Object;
+using ::art::mirror::ObjectArray;
+using ::art::mirror::ShortArray;
+using ::art::mirror::String;
+using ::art::mirror::Throwable;
 
 namespace art {
 
diff --git a/runtime/oat.h b/runtime/oat.h
index fb28962..4bd1871 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -97,8 +97,7 @@
                    uint32_t fp_spill_mask,
                    uint32_t mapping_table_offset,
                    uint32_t vmap_table_offset,
-                   uint32_t gc_map_offset
-                   );
+                   uint32_t gc_map_offset);
 
   ~OatMethodOffsets();
 
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 467575c..359b539 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -206,8 +206,7 @@
     ThrowClassCastException(throw_location,
                             StringPrintf("Couldn't convert result of type %s to %s",
                                          PrettyDescriptor(srcType).c_str(),
-                                         PrettyDescriptor(dstType).c_str()
-                                         ).c_str());
+                                         PrettyDescriptor(dstType).c_str()).c_str());
   }
   return false;
 }
@@ -297,8 +296,7 @@
         ThrowClassCastException(throw_location,
                                 StringPrintf("Couldn't convert result of type %s to %s",
                                              PrettyTypeOf(o).c_str(),
-                                             PrettyDescriptor(dst_class).c_str()
-                                             ).c_str());
+                                             PrettyDescriptor(dst_class).c_str()).c_str());
       }
       return false;
     }
@@ -359,8 +357,7 @@
                                   StringPrintf("%s has type %s, got %s",
                                                UnboxingFailureKind(m, index, f).c_str(),
                                                PrettyDescriptor(dst_class).c_str(),
-                                               PrettyDescriptor(src_descriptor.c_str()).c_str()
-                                               ).c_str());
+                                               PrettyDescriptor(src_descriptor.c_str()).c_str()).c_str());
     return false;
   }
 
diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc
index d703db2..7b4c3ca 100644
--- a/runtime/runtime_support_llvm.cc
+++ b/runtime/runtime_support_llvm.cc
@@ -47,9 +47,10 @@
 #include <stdint.h>
 #include <stdlib.h>
 
-using namespace art;
+namespace art {
 
-extern "C" {
+using ::art::mirror::AbstractMethod;
+
 class ShadowFrameCopyVisitor : public StackVisitor {
  public:
   explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
@@ -59,7 +60,7 @@
     if (IsShadowFrame()) {
       ShadowFrame* cur_frame = GetCurrentShadowFrame();
       size_t num_regs = cur_frame->NumberOfVRegs();
-      mirror::AbstractMethod* method = cur_frame->GetMethod();
+      AbstractMethod* method = cur_frame->GetMethod();
       uint32_t dex_pc = cur_frame->GetDexPC();
       ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
 
@@ -101,6 +102,61 @@
   ShadowFrame* top_frame_;
 };
 
+}  // namespace art
+
+extern "C" {
+
+using ::art::CatchHandlerIterator;
+using ::art::DexFile;
+using ::art::FindFieldFast;
+using ::art::FindMethodFast;
+using ::art::InstanceObjectRead;
+using ::art::InstanceObjectWrite;
+using ::art::InstancePrimitiveRead;
+using ::art::InstancePrimitiveWrite;
+using ::art::Instruction;
+using ::art::InvokeType;
+using ::art::JNIEnvExt;
+using ::art::JValue;
+using ::art::Locks;
+using ::art::MethodHelper;
+using ::art::PrettyClass;
+using ::art::PrettyMethod;
+using ::art::Primitive;
+using ::art::ResolveStringFromCode;
+using ::art::Runtime;
+using ::art::ScopedJniEnvLocalRefState;
+using ::art::ScopedObjectAccessUnchecked;
+using ::art::ShadowFrame;
+using ::art::ShadowFrameCopyVisitor;
+using ::art::StaticObjectRead;
+using ::art::StaticObjectWrite;
+using ::art::StaticPrimitiveRead;
+using ::art::StaticPrimitiveWrite;
+using ::art::Thread;
+using ::art::Thread;
+using ::art::ThrowArithmeticExceptionDivideByZero;
+using ::art::ThrowArrayIndexOutOfBoundsException;
+using ::art::ThrowArrayStoreException;
+using ::art::ThrowClassCastException;
+using ::art::ThrowLocation;
+using ::art::ThrowNoSuchMethodError;
+using ::art::ThrowNullPointerException;
+using ::art::ThrowNullPointerExceptionFromDexPC;
+using ::art::ThrowStackOverflowError;
+using ::art::kDirect;
+using ::art::kInterface;
+using ::art::kNative;
+using ::art::kStatic;
+using ::art::kSuper;
+using ::art::kVirtual;
+using ::art::mirror::AbstractMethod;
+using ::art::mirror::Array;
+using ::art::mirror::Class;
+using ::art::mirror::Field;
+using ::art::mirror::Object;
+using ::art::mirror::Throwable;
+
 //----------------------------------------------------------------------------
 // Thread
 //----------------------------------------------------------------------------
@@ -118,7 +174,7 @@
   return NULL;
 }
 
-void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread)
+void art_portable_lock_object_from_code(Object* obj, Thread* thread)
     EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
   DCHECK(obj != NULL);        // Assumed to have been checked before entry
   obj->MonitorEnter(thread);  // May block
@@ -127,7 +183,7 @@
   DCHECK(!thread->IsExceptionPending());
 }
 
-void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread)
+void art_portable_unlock_object_from_code(Object* obj, Thread* thread)
     UNLOCK_FUNCTION(monitor_lock_) {
   DCHECK(obj != NULL);  // Assumed to have been checked before entry
   // MonitorExit may throw exception
@@ -143,14 +199,14 @@
     visitor.WalkStack(true);
     self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
     self->SetDeoptimizationReturnValue(JValue());
-    self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+    self->SetException(ThrowLocation(), reinterpret_cast<Throwable*>(-1));
   }
 }
 
 ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread,
-                                              ShadowFrame* new_shadow_frame,
-                                              mirror::AbstractMethod* method,
-                                              uint32_t num_vregs) {
+                                                      ShadowFrame* new_shadow_frame,
+                                                      AbstractMethod* method,
+                                                      uint32_t num_vregs) {
   ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame);
   new_shadow_frame->SetMethod(method);
   new_shadow_frame->SetNumberOfVRegs(num_vregs);
@@ -201,7 +257,7 @@
   ThrowStackOverflowError(Thread::Current());
 }
 
-void art_portable_throw_exception_from_code(mirror::Throwable* exception)
+void art_portable_throw_exception_from_code(Throwable* exception)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   Thread* self = Thread::Current();
   ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -216,21 +272,22 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   DCHECK(self->IsExceptionPending());
   // TODO: make this inline.
-  mirror::Throwable* exception = self->GetException(NULL);
+  Throwable* exception = self->GetException(NULL);
   self->ClearException();
   return exception;
 }
 
-int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_method, uint32_t ti_offset)
+int32_t art_portable_find_catch_block_from_code(AbstractMethod* current_method,
+                                                uint32_t ti_offset)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   Thread* self = Thread::Current();  // TODO: make an argument.
   ThrowLocation throw_location;
-  mirror::Throwable* exception = self->GetException(&throw_location);
+  Throwable* exception = self->GetException(&throw_location);
   // Check for special deoptimization exception.
   if (UNLIKELY(reinterpret_cast<int32_t>(exception) == -1)) {
     return -1;
   }
-  mirror::Class* exception_type = exception->GetClass();
+  Class* exception_type = exception->GetClass();
   MethodHelper mh(current_method);
   const DexFile::CodeItem* code_item = mh.GetCodeItem();
   DCHECK_LT(ti_offset, code_item->tries_size_);
@@ -249,7 +306,7 @@
       break;
     }
     // Does this catch exception type apply?
-    mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
+    Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
     if (UNLIKELY(iter_exception_type == NULL)) {
       // TODO: check, the verifier (class linker?) should take care of resolving all exception
       //       classes early.
@@ -278,57 +335,62 @@
 // Object Space
 //----------------------------------------------------------------------------
 
-mirror::Object* art_portable_alloc_object_from_code(uint32_t type_idx,
-                                            mirror::AbstractMethod* referrer,
-                                            Thread* thread)
+Object* art_portable_alloc_object_from_code(uint32_t type_idx, AbstractMethod* referrer, Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return AllocObjectFromCode(type_idx, referrer, thread, false);
 }
 
-mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
-                                                              mirror::AbstractMethod* referrer,
+Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
+                                                              AbstractMethod* referrer,
                                                               Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return AllocObjectFromCode(type_idx, referrer, thread, true);
 }
 
-mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx,
-                                           mirror::AbstractMethod* referrer,
+Object* art_portable_alloc_array_from_code(uint32_t type_idx,
+                                           AbstractMethod* referrer,
                                            uint32_t length,
                                            Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return AllocArrayFromCode(type_idx, referrer, length, self, false);
 }
 
-mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
-                                                             mirror::AbstractMethod* referrer,
+Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
+                                                             AbstractMethod* referrer,
                                                              uint32_t length,
                                                              Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return AllocArrayFromCode(type_idx, referrer, length, self, true);
 }
 
-mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
-                                                     mirror::AbstractMethod* referrer,
+Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
+                                                     AbstractMethod* referrer,
                                                      uint32_t length,
                                                      Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false);
 }
 
-mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
-                                                                       mirror::AbstractMethod* referrer,
+Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
+                                                                       AbstractMethod* referrer,
                                                                        uint32_t length,
                                                                        Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true);
 }
 
-static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object,
-                                        mirror::AbstractMethod* caller_method, bool access_check,
-                                        InvokeType type, Thread* thread)
+static AbstractMethod* FindMethodHelper(uint32_t method_idx,
+                                        Object* this_object,
+                                        AbstractMethod* caller_method,
+                                        bool access_check,
+                                        InvokeType type,
+                                        Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
+  AbstractMethod* method = FindMethodFast(method_idx,
+                                          this_object,
+                                          caller_method,
+                                          access_check,
+                                          type);
   if (UNLIKELY(method == NULL)) {
     method = FindMethodFromCode(method_idx, this_object, caller_method,
                                 thread, access_check, type);
@@ -349,71 +411,70 @@
   return method;
 }
 
-mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
-                                                                    mirror::Object* this_object,
-                                                                    mirror::AbstractMethod* referrer,
+Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
+                                                                    Object* this_object,
+                                                                    AbstractMethod* referrer,
                                                                     Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread);
 }
 
-mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
-                                                                    mirror::Object* this_object,
-                                                                    mirror::AbstractMethod* referrer,
+Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
+                                                                    Object* this_object,
+                                                                    AbstractMethod* referrer,
                                                                     Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread);
 }
 
-mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
-                                                                     mirror::Object* this_object,
-                                                                     mirror::AbstractMethod* referrer,
+Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
+                                                                     Object* this_object,
+                                                                     AbstractMethod* referrer,
                                                                      Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread);
 }
 
-mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
-                                                                   mirror::Object* this_object,
-                                                                   mirror::AbstractMethod* referrer,
+Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
+                                                                   Object* this_object,
+                                                                   AbstractMethod* referrer,
                                                                    Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread);
 }
 
-mirror::Object*
-art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
-                                                       mirror::Object* this_object,
-                                                       mirror::AbstractMethod* referrer,
-                                                       Thread* thread)
+Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
+                                                                       Object* this_object,
+                                                                       AbstractMethod* referrer,
+                                                                       Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread);
 }
 
-mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
-                                                     mirror::Object* this_object,
-                                                     mirror::AbstractMethod* referrer,
+Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
+                                                     Object* this_object,
+                                                     AbstractMethod* referrer,
                                                      Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread);
 }
 
-mirror::Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx,
-                                                         mirror::AbstractMethod* referrer,
+Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx,
+                                                         AbstractMethod* referrer,
                                                          Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false);
 }
 
-mirror::Object* art_portable_initialize_type_from_code(uint32_t type_idx,
-                                               mirror::AbstractMethod* referrer,
+Object* art_portable_initialize_type_from_code(uint32_t type_idx,
+                                               AbstractMethod* referrer,
                                                Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false);
 }
 
-mirror::Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx,
-                                                                 mirror::AbstractMethod* referrer,
+Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx,
+                                                                 AbstractMethod* referrer,
                                                                  Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Called when caller isn't guaranteed to have access to a type and the dex cache may be
@@ -421,20 +482,29 @@
   return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true);
 }
 
-mirror::Object* art_portable_resolve_string_from_code(mirror::AbstractMethod* referrer, uint32_t string_idx)
+Object* art_portable_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return ResolveStringFromCode(referrer, string_idx);
 }
 
-int32_t art_portable_set32_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, int32_t new_value)
+int32_t art_portable_set32_static_from_code(uint32_t field_idx,
+                                            AbstractMethod* referrer,
+                                            int32_t new_value)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t));
+  Field* field = FindFieldFast(field_idx,
+                               referrer,
+                               StaticPrimitiveWrite,
+                               sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     field->Set32(field->GetDeclaringClass(), new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticPrimitiveWrite, sizeof(uint32_t), true);
+  field = FindFieldFromCode(field_idx,
+                            referrer,
+                            Thread::Current(),
+                            StaticPrimitiveWrite,
+                            sizeof(uint32_t),
+                            true);
   if (LIKELY(field != NULL)) {
     field->Set32(field->GetDeclaringClass(), new_value);
     return 0;
@@ -442,15 +512,21 @@
   return -1;
 }
 
-int32_t art_portable_set64_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, int64_t new_value)
+int32_t art_portable_set64_static_from_code(uint32_t field_idx,
+                                            AbstractMethod* referrer,
+                                            int64_t new_value)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
+  Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     field->Set64(field->GetDeclaringClass(), new_value);
     return 0;
   }
-  field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticPrimitiveWrite, sizeof(uint64_t), true);
+  field = FindFieldFromCode(field_idx,
+                            referrer,
+                            Thread::Current(),
+                            StaticPrimitiveWrite,
+                            sizeof(uint64_t),
+                            true);
   if (LIKELY(field != NULL)) {
     field->Set64(field->GetDeclaringClass(), new_value);
     return 0;
@@ -458,15 +534,17 @@
   return -1;
 }
 
-int32_t art_portable_set_obj_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* new_value)
+int32_t art_portable_set_obj_static_from_code(uint32_t field_idx,
+                                              AbstractMethod* referrer,
+                                              Object* new_value)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(mirror::Object*));
+  Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*));
   if (LIKELY(field != NULL)) {
     field->SetObj(field->GetDeclaringClass(), new_value);
     return 0;
   }
   field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticObjectWrite, sizeof(mirror::Object*), true);
+                            StaticObjectWrite, sizeof(Object*), true);
   if (LIKELY(field != NULL)) {
     field->SetObj(field->GetDeclaringClass(), new_value);
     return 0;
@@ -474,9 +552,9 @@
   return -1;
 }
 
-int32_t art_portable_get32_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer)
+int32_t art_portable_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
+  Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     return field->Get32(field->GetDeclaringClass());
   }
@@ -488,9 +566,9 @@
   return 0;
 }
 
-int64_t art_portable_get64_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer)
+int64_t art_portable_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
+  Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     return field->Get64(field->GetDeclaringClass());
   }
@@ -502,24 +580,24 @@
   return 0;
 }
 
-mirror::Object* art_portable_get_obj_static_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer)
+Object* art_portable_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(mirror::Object*));
+  Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*));
   if (LIKELY(field != NULL)) {
     return field->GetObj(field->GetDeclaringClass());
   }
   field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            StaticObjectRead, sizeof(mirror::Object*), true);
+                            StaticObjectRead, sizeof(Object*), true);
   if (LIKELY(field != NULL)) {
     return field->GetObj(field->GetDeclaringClass());
   }
   return 0;
 }
 
-int32_t art_portable_set32_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer,
-                                     mirror::Object* obj, uint32_t new_value)
+int32_t art_portable_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
+                                              Object* obj, uint32_t new_value)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
+  Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     field->Set32(obj, new_value);
     return 0;
@@ -533,10 +611,10 @@
   return -1;
 }
 
-int32_t art_portable_set64_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer,
-                                      mirror::Object* obj, int64_t new_value)
+int32_t art_portable_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
+                                              Object* obj, int64_t new_value)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
+  Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     field->Set64(obj, new_value);
     return 0;
@@ -550,16 +628,16 @@
   return -1;
 }
 
-int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer,
-                                        mirror::Object* obj, mirror::Object* new_value)
+int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
+                                                Object* obj, Object* new_value)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(mirror::Object*));
+  Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*));
   if (LIKELY(field != NULL)) {
     field->SetObj(obj, new_value);
     return 0;
   }
   field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstanceObjectWrite, sizeof(mirror::Object*), true);
+                            InstanceObjectWrite, sizeof(Object*), true);
   if (LIKELY(field != NULL)) {
     field->SetObj(obj, new_value);
     return 0;
@@ -567,9 +645,9 @@
   return -1;
 }
 
-int32_t art_portable_get32_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj)
+int32_t art_portable_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
+  Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
   if (LIKELY(field != NULL)) {
     return field->Get32(obj);
   }
@@ -581,9 +659,9 @@
   return 0;
 }
 
-int64_t art_portable_get64_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj)
+int64_t art_portable_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
+  Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
   if (LIKELY(field != NULL)) {
     return field->Get64(obj);
   }
@@ -595,22 +673,22 @@
   return 0;
 }
 
-mirror::Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, mirror::AbstractMethod* referrer, mirror::Object* obj)
+Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::Object*));
+  Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*));
   if (LIKELY(field != NULL)) {
     return field->GetObj(obj);
   }
   field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
-                            InstanceObjectRead, sizeof(mirror::Object*), true);
+                            InstanceObjectRead, sizeof(Object*), true);
   if (LIKELY(field != NULL)) {
     return field->GetObj(obj);
   }
   return 0;
 }
 
-void art_portable_fill_array_data_from_code(mirror::AbstractMethod* method, uint32_t dex_pc,
-                                    mirror::Array* array, uint32_t payload_offset)
+void art_portable_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc,
+                                            Array* array, uint32_t payload_offset)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Test: Is array equal to null? (Guard NullPointerException)
   if (UNLIKELY(array == NULL)) {
@@ -650,14 +728,14 @@
 // Type checking, in the nature of casting
 //----------------------------------------------------------------------------
 
-int32_t art_portable_is_assignable_from_code(const mirror::Class* dest_type, const mirror::Class* src_type)
+int32_t art_portable_is_assignable_from_code(const Class* dest_type, const Class* src_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   DCHECK(dest_type != NULL);
   DCHECK(src_type != NULL);
   return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
 }
 
-void art_portable_check_cast_from_code(const mirror::Class* dest_type, const mirror::Class* src_type)
+void art_portable_check_cast_from_code(const Class* dest_type, const Class* src_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
   DCHECK(src_type->IsClass()) << PrettyClass(src_type);
@@ -666,17 +744,17 @@
   }
 }
 
-void art_portable_check_put_array_element_from_code(const mirror::Object* element,
-                                                    const mirror::Object* array)
+void art_portable_check_put_array_element_from_code(const Object* element,
+                                                    const Object* array)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (element == NULL) {
     return;
   }
   DCHECK(array != NULL);
-  mirror::Class* array_class = array->GetClass();
+  Class* array_class = array->GetClass();
   DCHECK(array_class != NULL);
-  mirror::Class* component_type = array_class->GetComponentType();
-  mirror::Class* element_class = element->GetClass();
+  Class* component_type = array_class->GetComponentType();
+  Class* element_class = element->GetClass();
   if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) {
     ThrowArrayStoreException(element_class, array_class);
   }
@@ -717,19 +795,20 @@
 
 
 void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie,
-                                      jobject locked,
-                                      Thread* self)
+                                              jobject locked,
+                                              Thread* self)
     SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
   self->TransitionFromSuspendedToRunnable();
   UnlockJniSynchronizedMethod(locked, self);  // Must decode before pop.
   PopLocalReferences(saved_local_ref_cookie, self);
 }
 
-mirror::Object* art_portable_jni_method_end_with_reference(jobject result, uint32_t saved_local_ref_cookie,
+Object* art_portable_jni_method_end_with_reference(jobject result,
+                                                   uint32_t saved_local_ref_cookie,
                                                    Thread* self)
     SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
   self->TransitionFromSuspendedToRunnable();
-  mirror::Object* o = self->DecodeJObject(result);  // Must decode before pop.
+  Object* o = self->DecodeJObject(result);  // Must decode before pop.
   PopLocalReferences(saved_local_ref_cookie, self);
   // Process result.
   if (UNLIKELY(self->GetJniEnv()->check_jni)) {
@@ -741,13 +820,14 @@
   return o;
 }
 
-mirror::Object* art_portable_jni_method_end_with_reference_synchronized(jobject result,
+Object* art_portable_jni_method_end_with_reference_synchronized(jobject result,
                                                                 uint32_t saved_local_ref_cookie,
-                                                                jobject locked, Thread* self)
+                                                                jobject locked,
+                                                                Thread* self)
     SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
   self->TransitionFromSuspendedToRunnable();
   UnlockJniSynchronizedMethod(locked, self);  // Must decode before pop.
-  mirror::Object* o = self->DecodeJObject(result);
+  Object* o = self->DecodeJObject(result);
   PopLocalReferences(saved_local_ref_cookie, self);
   // Process result.
   if (UNLIKELY(self->GetJniEnv()->check_jni)) {
@@ -762,12 +842,12 @@
 // Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation
 // handler which is a field within the proxy object receiver. The var args encode the arguments
 // with the last argument being a pointer to a JValue to store the result in.
-void art_portable_proxy_invoke_handler_from_code(mirror::AbstractMethod* proxy_method, ...)
+void art_portable_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   va_list ap;
   va_start(ap, proxy_method);
 
-  mirror::Object* receiver = va_arg(ap, mirror::Object*);
+  Object* receiver = va_arg(ap, Object*);
   Thread* self = va_arg(ap, Thread*);
   MethodHelper proxy_mh(proxy_method);
 
@@ -785,19 +865,19 @@
   jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
 
   // Convert proxy method into expected interface method.
-  mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+  AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
   DCHECK(interface_method != NULL);
   DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
   jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
 
-  // Record arguments and turn mirror::Object* arguments into jobject to survive GC.
+  // Record arguments and turn Object* arguments into jobject to survive GC.
   std::vector<jvalue> args;
   const size_t num_params = proxy_mh.NumArgs();
   for (size_t i = 1; i < num_params; ++i) {
     jvalue val;
     switch (proxy_mh.GetParamPrimitiveType(i)) {
       case Primitive::kPrimNot:
-        val.l = soa.AddLocalReference<jobject>(va_arg(ap, mirror::Object*));
+        val.l = soa.AddLocalReference<jobject>(va_arg(ap, Object*));
         break;
       case Primitive::kPrimBoolean:  // Fall-through.
       case Primitive::kPrimByte:     // Fall-through.
@@ -843,4 +923,5 @@
 void art_portable_constructor_barrier() {
   LOG(FATAL) << "Implemented by IRBuilder.";
 }
+
 }  // extern "C"
diff --git a/runtime/stack.cc b/runtime/stack.cc
index f4ae81d..35cd895 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -185,12 +185,12 @@
 }
 
 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
-  DCHECK (cur_quick_frame_ != NULL) << "This is a quick frame routine";
+  DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine";
   return context_->GetGPR(reg);
 }
 
 void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) {
-  DCHECK (cur_quick_frame_ != NULL) << "This is a quick frame routine";
+  DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine";
   context_->SetGPR(reg, value);
 }
 
@@ -341,7 +341,7 @@
         }
         cur_depth_++;
         cur_shadow_frame_ = cur_shadow_frame_->GetLink();
-      } while(cur_shadow_frame_ != NULL);
+      } while (cur_shadow_frame_ != NULL);
     }
     if (include_transitions) {
       bool should_continue = VisitFrame();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f0d5417..d1e33b8 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1779,7 +1779,7 @@
                                       m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
     verifier.Verify();
     std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
-    for(uint16_t reg = 0; reg < num_regs; reg++) {
+    for (uint16_t reg = 0; reg < num_regs; reg++) {
       VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
       switch (kind) {
         case kUndefined:
diff --git a/runtime/thread.h b/runtime/thread.h
index 3b66943..388178f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -104,7 +104,7 @@
   static Thread* Current() {
     // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
     // that we can replace this with a direct %fs access on x86.
-    if(!is_started_) {
+    if (!is_started_) {
       return NULL;
     } else {
       void* thread = pthread_getspecific(Thread::pthread_key_self_);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 59c38b4..7aa835a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -468,7 +468,7 @@
       // Wait for another thread to exit before re-checking.
       thread_exit_cond_.Wait(self);
     }
-  } while(!all_threads_are_daemons);
+  } while (!all_threads_are_daemons);
 }
 
 void ThreadList::SuspendAllDaemonThreads() {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 3293290..2227b8d 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -230,7 +230,7 @@
   // Create Trace object.
   {
     MutexLock mu(self, *Locks::trace_lock_);
-    if(the_trace_ != NULL) {
+    if (the_trace_ != NULL) {
       LOG(ERROR) << "Trace already in progress, ignoring this request";
     } else {
       the_trace_ = new Trace(trace_file.release(), buffer_size, flags);
diff --git a/runtime/utils.h b/runtime/utils.h
index a08e465..72597f5 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -352,18 +352,18 @@
 class VoidFunctor {
  public:
   template <typename A>
-  inline void operator () (A a) const {
+  inline void operator() (A a) const {
     UNUSED(a);
   }
 
   template <typename A, typename B>
-  inline void operator () (A a, B b) const {
+  inline void operator() (A a, B b) const {
     UNUSED(a);
     UNUSED(b);
   }
 
   template <typename A, typename B, typename C>
-  inline void operator () (A a, B b, C c) const {
+  inline void operator() (A a, B b, C c) const {
     UNUSED(a);
     UNUSED(b);
     UNUSED(c);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index ff7f594..f414b79 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1014,12 +1014,12 @@
   verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map);
 
   MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet();
-  if(method_to_safe_casts != NULL ) {
+  if (method_to_safe_casts != NULL) {
     SetSafeCastMap(ref, method_to_safe_casts);
   }
 
   MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap();
-  if(pc_to_concrete_method != NULL ) {
+  if (pc_to_concrete_method != NULL) {
     SetDevirtMap(ref, pc_to_concrete_method);
   }
   return true;
@@ -1824,7 +1824,7 @@
       uint32_t instance_of_idx = 0;
       if (0 != work_insn_idx_) {
         instance_of_idx = work_insn_idx_ - 1;
-        while(0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+        while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
           instance_of_idx--;
         }
         CHECK(insn_flags_[instance_of_idx].IsOpcode());
@@ -1854,7 +1854,7 @@
         // which is not done because of the multiple inheritance implications.
         const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
 
-        if(!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) {
+        if (!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) {
           RegisterLine* update_line = new RegisterLine(code_item_->registers_size_, this);
           if (inst->Opcode() == Instruction::IF_EQZ) {
             fallthrough_line.reset(update_line);
@@ -1868,7 +1868,7 @@
             // register encoding space of instance-of, and propagate type information to the source
             // of the move-object.
             uint32_t move_idx = instance_of_idx - 1;
-            while(0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+            while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
               move_idx--;
             }
             CHECK(insn_flags_[move_idx].IsOpcode());
@@ -2696,7 +2696,7 @@
   DCHECK(insn_flags_[*start_guess].IsOpcode());
 
   return true;
-}
+} // NOLINT(readability/fn_size)
 
 const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
   const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
@@ -3766,7 +3766,7 @@
     bool is_interface = (inst->Opcode() == Instruction::INVOKE_INTERFACE) ||
         (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
 
-    if(!is_interface && !is_virtual) {
+    if (!is_interface && !is_virtual) {
       continue;
     }
     // Get reg type for register holding the reference to the object that will be dispatched upon.
@@ -3792,7 +3792,7 @@
     }
     mirror::AbstractMethod* abstract_method =
         dex_cache_->GetResolvedMethod(is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
-    if(abstract_method == NULL) {
+    if (abstract_method == NULL) {
       // If the method is not found in the cache this means that it was never found
       // by ResolveMethodAndCheckAccess() called when verifying invoke_*.
       continue;
@@ -3986,7 +3986,7 @@
 
   // Look up the PC in the map, get the concrete method to execute and return its reference.
   MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc);
-  if(pc_to_concrete_method != it->second->end()) {
+  if (pc_to_concrete_method != it->second->end()) {
     return &(pc_to_concrete_method->second);
   } else {
     return NULL;
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 1c61a29..8418928 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -211,7 +211,7 @@
 }
 
 LongLoType* LongLoType::GetInstance() {
-  CHECK (instance_ != NULL);
+  CHECK(instance_ != NULL);
   return instance_;
 }
 
@@ -355,7 +355,7 @@
 }
 
 void BooleanType::Destroy() {
-  if(BooleanType::instance != NULL) {
+  if (BooleanType::instance != NULL) {
     delete instance;
     instance = NULL;
   }
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 6013250..22c585c 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -377,7 +377,7 @@
     entry = new UnresolvedReferenceType(descriptor.c_str(), entries_.size());
   } else {
     mirror::Class* klass = uninit_type.GetClass();
-    if(uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
+    if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
       // For uninitialized "this reference" look for reference types that are not precise.
       for (size_t i = primitive_count_; i < entries_.size(); i++) {
         RegType* cur_entry = entries_[i];
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 814dff7..2411758 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -44,7 +44,7 @@
   }
   ~RegTypeCache();
   static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    if(!RegTypeCache::primitive_initialized_) {
+    if (!RegTypeCache::primitive_initialized_) {
       CHECK_EQ(RegTypeCache::primitive_count_, 0);
       CreatePrimitiveTypes();
       CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitives);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index d2c9dd6..a24c3c9 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -405,7 +405,7 @@
   std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
   EXPECT_EQ(expected, unresolved_ref.Dump());
   expected = "Precise Reference: java.lang.String";
-  EXPECT_EQ( expected, resolved_ref.Dump());
+  EXPECT_EQ(expected, resolved_ref.Dump());
   expected ="Uninitialized Reference: java.lang.String Allocation PC: 10";
   EXPECT_EQ(expected, resolved_unintialiesd.Dump());
   expected = "Unresolved And Uninitialized Reference: java.lang.DoesNotExist Allocation PC: 12";
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 492916e..ccdbffd 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -33,8 +33,8 @@
 namespace art {
 
 #define IS_IN_REF_BITMAP(mh, ref_bitmap, reg) \
-  ( ((reg) < mh.GetCodeItem()->registers_size_) &&                       \
-    (( *((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01) )
+  (((reg) < mh.GetCodeItem()->registers_size_) && \
+   ((*((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
 
 #define CHECK_REGS_CONTAIN_REFS(...)     \
   do {                                   \
diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc
index fc156b1..d100c10 100644
--- a/test/StackWalk/stack_walk_jni.cc
+++ b/test/StackWalk/stack_walk_jni.cc
@@ -31,8 +31,8 @@
 namespace art {
 
 #define REG(mh, reg_bitmap, reg) \
-  ( ((reg) < mh.GetCodeItem()->registers_size_) &&                       \
-    (( *((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01) )
+  (((reg) < mh.GetCodeItem()->registers_size_) && \
+   ((*((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
 
 #define CHECK_REGS(...) if (!IsShadowFrame()) { \
     int t[] = {__VA_ARGS__}; \
diff --git a/tools/cpplint.py b/tools/cpplint.py
index 30c7128..4f069b7 100755
--- a/tools/cpplint.py
+++ b/tools/cpplint.py
@@ -645,6 +645,11 @@
       filename: The name of the current file.
       linenum: The number of the line to check.
     """
+    # BEGIN android-added
+    if not self.in_a_function:
+      return
+    # END android-added
+
     if Match(r'T(EST|est)', self.current_function):
       base_trigger = self._TEST_TRIGGER
     else:
@@ -1526,7 +1531,10 @@
   # Note that we assume the contents of [] to be short enough that
   # they'll never need to wrap.
   if (  # Ignore control structures.
-      not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
+      # BEGIN android-changed
+      # not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
+      not Search(r'\b(if|for|while|switch|return|delete|new)\b', fncall) and
+      # END android-changed
       # Ignore pointers/references to functions.
       not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
       # Ignore pointers/references to arrays.