Merge "ART: Generate path to entrypoints in VisitLoadString for x86"
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 92e9cd9..f07f8a0 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2378,13 +2378,8 @@
 
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble:
-      // TODO: don't use branches.
-      GenerateFpCompareAndBranch(instruction->GetCondition(),
-                                 instruction->IsGtBias(),
-                                 type,
-                                 locations,
-                                 &true_label);
-      break;
+      GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
+      return;
   }
 
   // Convert the branches into the result.
@@ -3177,6 +3172,230 @@
   }
 }
 
+void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond,
+                                                     bool gt_bias,
+                                                     Primitive::Type type,
+                                                     LocationSummary* locations) {
+  Register dst = locations->Out().AsRegister<Register>();
+  FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+  FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+  if (type == Primitive::kPrimFloat) {
+    if (isR6) {
+      switch (cond) {
+        case kCondEQ:
+          __ CmpEqS(FTMP, lhs, rhs);
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondNE:
+          __ CmpEqS(FTMP, lhs, rhs);
+          __ Mfc1(dst, FTMP);
+          __ Addiu(dst, dst, 1);
+          break;
+        case kCondLT:
+          if (gt_bias) {
+            __ CmpLtS(FTMP, lhs, rhs);
+          } else {
+            __ CmpUltS(FTMP, lhs, rhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondLE:
+          if (gt_bias) {
+            __ CmpLeS(FTMP, lhs, rhs);
+          } else {
+            __ CmpUleS(FTMP, lhs, rhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondGT:
+          if (gt_bias) {
+            __ CmpUltS(FTMP, rhs, lhs);
+          } else {
+            __ CmpLtS(FTMP, rhs, lhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondGE:
+          if (gt_bias) {
+            __ CmpUleS(FTMP, rhs, lhs);
+          } else {
+            __ CmpLeS(FTMP, rhs, lhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+          UNREACHABLE();
+      }
+    } else {
+      switch (cond) {
+        case kCondEQ:
+          __ CeqS(0, lhs, rhs);
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondNE:
+          __ CeqS(0, lhs, rhs);
+          __ LoadConst32(dst, 1);
+          __ Movt(dst, ZERO, 0);
+          break;
+        case kCondLT:
+          if (gt_bias) {
+            __ ColtS(0, lhs, rhs);
+          } else {
+            __ CultS(0, lhs, rhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondLE:
+          if (gt_bias) {
+            __ ColeS(0, lhs, rhs);
+          } else {
+            __ CuleS(0, lhs, rhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondGT:
+          if (gt_bias) {
+            __ CultS(0, rhs, lhs);
+          } else {
+            __ ColtS(0, rhs, lhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondGE:
+          if (gt_bias) {
+            __ CuleS(0, rhs, lhs);
+          } else {
+            __ ColeS(0, rhs, lhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+          UNREACHABLE();
+      }
+    }
+  } else {
+    DCHECK_EQ(type, Primitive::kPrimDouble);
+    if (isR6) {
+      switch (cond) {
+        case kCondEQ:
+          __ CmpEqD(FTMP, lhs, rhs);
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondNE:
+          __ CmpEqD(FTMP, lhs, rhs);
+          __ Mfc1(dst, FTMP);
+          __ Addiu(dst, dst, 1);
+          break;
+        case kCondLT:
+          if (gt_bias) {
+            __ CmpLtD(FTMP, lhs, rhs);
+          } else {
+            __ CmpUltD(FTMP, lhs, rhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondLE:
+          if (gt_bias) {
+            __ CmpLeD(FTMP, lhs, rhs);
+          } else {
+            __ CmpUleD(FTMP, lhs, rhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondGT:
+          if (gt_bias) {
+            __ CmpUltD(FTMP, rhs, lhs);
+          } else {
+            __ CmpLtD(FTMP, rhs, lhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        case kCondGE:
+          if (gt_bias) {
+            __ CmpUleD(FTMP, rhs, lhs);
+          } else {
+            __ CmpLeD(FTMP, rhs, lhs);
+          }
+          __ Mfc1(dst, FTMP);
+          __ Andi(dst, dst, 1);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+          UNREACHABLE();
+      }
+    } else {
+      switch (cond) {
+        case kCondEQ:
+          __ CeqD(0, lhs, rhs);
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondNE:
+          __ CeqD(0, lhs, rhs);
+          __ LoadConst32(dst, 1);
+          __ Movt(dst, ZERO, 0);
+          break;
+        case kCondLT:
+          if (gt_bias) {
+            __ ColtD(0, lhs, rhs);
+          } else {
+            __ CultD(0, lhs, rhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondLE:
+          if (gt_bias) {
+            __ ColeD(0, lhs, rhs);
+          } else {
+            __ CuleD(0, lhs, rhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondGT:
+          if (gt_bias) {
+            __ CultD(0, rhs, lhs);
+          } else {
+            __ ColtD(0, rhs, lhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        case kCondGE:
+          if (gt_bias) {
+            __ CuleD(0, rhs, lhs);
+          } else {
+            __ ColeD(0, rhs, lhs);
+          }
+          __ LoadConst32(dst, 1);
+          __ Movf(dst, ZERO, 0);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+          UNREACHABLE();
+      }
+    }
+  }
+}
+
 void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond,
                                                               bool gt_bias,
                                                               Primitive::Type type,
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 7ba6c0d..0039981 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -243,6 +243,10 @@
   void GenerateLongCompareAndBranch(IfCondition cond,
                                     LocationSummary* locations,
                                     MipsLabel* label);
+  void GenerateFpCompare(IfCondition cond,
+                         bool gt_bias,
+                         Primitive::Type type,
+                         LocationSummary* locations);
   void GenerateFpCompareAndBranch(IfCondition cond,
                                   bool gt_bias,
                                   Primitive::Type type,
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index cea4a7e..eda0971 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -38,7 +38,7 @@
 static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
               "Unexpected values for register codes.");
 
-static inline int VIXLRegCodeFromART(int code) {
+inline int VIXLRegCodeFromART(int code) {
   if (code == SP) {
     return vixl::aarch64::kSPRegInternalCode;
   }
@@ -48,7 +48,7 @@
   return code;
 }
 
-static inline int ARTRegCodeFromVIXL(int code) {
+inline int ARTRegCodeFromVIXL(int code) {
   if (code == vixl::aarch64::kSPRegInternalCode) {
     return SP;
   }
@@ -58,85 +58,85 @@
   return code;
 }
 
-static inline vixl::aarch64::Register XRegisterFrom(Location location) {
+inline vixl::aarch64::Register XRegisterFrom(Location location) {
   DCHECK(location.IsRegister()) << location;
   return vixl::aarch64::Register::GetXRegFromCode(VIXLRegCodeFromART(location.reg()));
 }
 
-static inline vixl::aarch64::Register WRegisterFrom(Location location) {
+inline vixl::aarch64::Register WRegisterFrom(Location location) {
   DCHECK(location.IsRegister()) << location;
   return vixl::aarch64::Register::GetWRegFromCode(VIXLRegCodeFromART(location.reg()));
 }
 
-static inline vixl::aarch64::Register RegisterFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::Register RegisterFrom(Location location, Primitive::Type type) {
   DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type;
   return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
 }
 
-static inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
+inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
   return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
 }
 
-static inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
   return RegisterFrom(instr->GetLocations()->InAt(input_index),
                       instr->InputAt(input_index)->GetType());
 }
 
-static inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
+inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
   return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg());
 }
 
-static inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
+inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
   return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg());
 }
 
-static inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
   DCHECK(Primitive::IsFloatingPointType(type)) << type;
   return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
 }
 
-static inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
+inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
   return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
 }
 
-static inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
   return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
                         instr->InputAt(input_index)->GetType());
 }
 
-static inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
   return Primitive::IsFloatingPointType(type)
       ? vixl::aarch64::CPURegister(FPRegisterFrom(location, type))
       : vixl::aarch64::CPURegister(RegisterFrom(location, type));
 }
 
-static inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
+inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
   return Primitive::IsFloatingPointType(instr->GetType())
       ? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr))
       : static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr));
 }
 
-static inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
+inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
   return Primitive::IsFloatingPointType(instr->InputAt(index)->GetType())
       ? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index))
       : static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index));
 }
 
-static inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr,
+inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr,
                                                                      int index) {
   HInstruction* input = instr->InputAt(index);
   Primitive::Type input_type = input->GetType();
   if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) {
     return (Primitive::ComponentSize(input_type) >= vixl::aarch64::kXRegSizeInBytes)
-        ?  vixl::aarch64::xzr
+        ? vixl::aarch64::xzr
         : vixl::aarch64::wzr;
   }
   return InputCPURegisterAt(instr, index);
 }
 
-static inline int64_t Int64ConstantFrom(Location location) {
+inline int64_t Int64ConstantFrom(Location location) {
   HConstant* instr = location.GetConstant();
   if (instr->IsIntConstant()) {
     return instr->AsIntConstant()->GetValue();
@@ -148,7 +148,7 @@
   }
 }
 
-static inline vixl::aarch64::Operand OperandFrom(Location location, Primitive::Type type) {
+inline vixl::aarch64::Operand OperandFrom(Location location, Primitive::Type type) {
   if (location.IsRegister()) {
     return vixl::aarch64::Operand(RegisterFrom(location, type));
   } else {
@@ -156,23 +156,23 @@
   }
 }
 
-static inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
   return OperandFrom(instr->GetLocations()->InAt(input_index),
                      instr->InputAt(input_index)->GetType());
 }
 
-static inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
+inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
   return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex());
 }
 
-static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
                                                     size_t offset = 0) {
   // A heap reference must be 32bit, so fit in a W register.
   DCHECK(base.IsW());
   return vixl::aarch64::MemOperand(base.X(), offset);
 }
 
-static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
                                                     const vixl::aarch64::Register& regoffset,
                                                     vixl::aarch64::Shift shift = vixl::aarch64::LSL,
                                                     unsigned shift_amount = 0) {
@@ -181,24 +181,24 @@
   return vixl::aarch64::MemOperand(base.X(), regoffset, shift, shift_amount);
 }
 
-static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
                                                     Offset offset) {
   return HeapOperand(base, offset.SizeValue());
 }
 
-static inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
+inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
   return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset);
 }
 
-static inline Location LocationFrom(const vixl::aarch64::Register& reg) {
+inline Location LocationFrom(const vixl::aarch64::Register& reg) {
   return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
 }
 
-static inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
+inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
   return Location::FpuRegisterLocation(fpreg.GetCode());
 }
 
-static inline vixl::aarch64::Operand OperandFromMemOperand(
+inline vixl::aarch64::Operand OperandFromMemOperand(
     const vixl::aarch64::MemOperand& mem_op) {
   if (mem_op.IsImmediateOffset()) {
     return vixl::aarch64::Operand(mem_op.GetOffset());
@@ -219,7 +219,7 @@
   }
 }
 
-static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
+inline bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
   DCHECK(constant->IsIntConstant() || constant->IsLongConstant() || constant->IsNullConstant())
       << constant->DebugName();
 
@@ -258,7 +258,7 @@
   }
 }
 
-static inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
+inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
                                                         HInstruction* instr) {
   if (constant->IsConstant()
       && CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
@@ -272,7 +272,7 @@
 // codes are same, we can initialize vixl register list simply by the register masks. Currently,
 // only SP/WSP and ZXR/WZR codes are different between art and vixl.
 // Note: This function is only used for debug checks.
-static inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
+inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
                                                    size_t num_core,
                                                    uint32_t art_fpu_registers,
                                                    size_t num_fpu) {
@@ -290,7 +290,7 @@
   return true;
 }
 
-static inline vixl::aarch64::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
+inline vixl::aarch64::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
   switch (op_kind) {
     case HArm64DataProcWithShifterOp::kASR: return vixl::aarch64::ASR;
     case HArm64DataProcWithShifterOp::kLSL: return vixl::aarch64::LSL;
@@ -302,7 +302,7 @@
   }
 }
 
-static inline vixl::aarch64::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
+inline vixl::aarch64::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
   switch (op_kind) {
     case HArm64DataProcWithShifterOp::kUXTB: return vixl::aarch64::UXTB;
     case HArm64DataProcWithShifterOp::kUXTH: return vixl::aarch64::UXTH;
@@ -317,7 +317,7 @@
   }
 }
 
-static inline bool CanFitInShifterOperand(HInstruction* instruction) {
+inline bool CanFitInShifterOperand(HInstruction* instruction) {
   if (instruction->IsTypeConversion()) {
     HTypeConversion* conversion = instruction->AsTypeConversion();
     Primitive::Type result_type = conversion->GetResultType();
@@ -332,7 +332,7 @@
   }
 }
 
-static inline bool HasShifterOperand(HInstruction* instr) {
+inline bool HasShifterOperand(HInstruction* instr) {
   // `neg` instructions are an alias of `sub` using the zero register as the
   // first register input.
   bool res = instr->IsAdd() || instr->IsAnd() || instr->IsNeg() ||
@@ -340,7 +340,7 @@
   return res;
 }
 
-static inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
+inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
   DCHECK(HasShifterOperand(instruction));
   // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
   // does *not* support extension. This is because the `extended register` form
@@ -351,7 +351,7 @@
   return instruction->IsAdd() || instruction->IsSub();
 }
 
-static inline bool IsConstantZeroBitPattern(const HInstruction* instruction) {
+inline bool IsConstantZeroBitPattern(const HInstruction* instruction) {
   return instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern();
 }
 
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 8d4d143..b8e1379 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -297,7 +297,15 @@
       DCHECK(!runtime->UseJitCompilation());
       mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
       CHECK(string != nullptr);
-      // TODO: In follow up CL, add PcRelative and Address back in.
+      if (compiler_driver_->GetSupportBootImageFixup()) {
+        DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
+        desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic()
+            ? HLoadString::LoadKind::kBootImageLinkTimePcRelative
+            : HLoadString::LoadKind::kBootImageLinkTimeAddress;
+      } else {
+        // MIPS64 or compiler_driver_test. Do not sharpen.
+        DCHECK_EQ(desired_load_kind, HLoadString::LoadKind::kDexCacheViaMethod);
+      }
     } else if (runtime->UseJitCompilation()) {
       // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
       // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index e0e1e81..25aa8ce 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -19,6 +19,16 @@
 #include "arch/quick_alloc_entrypoints.S"
 
 
+.macro INCREASE_FRAME frame_adjustment
+    sub sp, sp, #(\frame_adjustment)
+    .cfi_adjust_cfa_offset (\frame_adjustment)
+.endm
+
+.macro DECREASE_FRAME frame_adjustment
+    add sp, sp, #(\frame_adjustment)
+    .cfi_adjust_cfa_offset -(\frame_adjustment)
+.endm
+
 .macro SAVE_REG reg, offset
     str \reg, [sp, #(\offset)]
     .cfi_rel_offset \reg, (\offset)
@@ -70,8 +80,7 @@
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves];
     ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
 
-    sub sp, sp, #176
-    .cfi_adjust_cfa_offset 176
+    INCREASE_FRAME 176
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 176)
@@ -115,8 +124,7 @@
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly];
     ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
 
-    sub sp, sp, #96
-    .cfi_adjust_cfa_offset 96
+    INCREASE_FRAME 96
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_REFS_ONLY != 96)
@@ -150,13 +158,11 @@
     RESTORE_TWO_REGS x27, x28, 64
     RESTORE_TWO_REGS x29, xLR, 80
 
-    add sp, sp, #96
-    .cfi_adjust_cfa_offset -96
+    DECREASE_FRAME 96
 .endm
 
 .macro POP_SAVE_REFS_ONLY_FRAME
-    add sp, sp, #96
-    .cfi_adjust_cfa_offset - 96
+    DECREASE_FRAME 96
 .endm
 
 .macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
@@ -166,8 +172,7 @@
 
 
 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
-    sub sp, sp, #224
-    .cfi_adjust_cfa_offset 224
+    INCREASE_FRAME 224
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 224)
@@ -254,8 +259,7 @@
     // x29(callee-save) and LR.
     RESTORE_TWO_REGS x29, xLR, 208
 
-    add sp, sp, #224
-    .cfi_adjust_cfa_offset -224
+    DECREASE_FRAME 224
 .endm
 
     /*
@@ -263,8 +267,7 @@
      * Runtime::CreateCalleeSaveMethod(kSaveEverything)
      */
 .macro SETUP_SAVE_EVERYTHING_FRAME
-    sub sp, sp, #512
-    .cfi_adjust_cfa_offset 512
+    INCREASE_FRAME 512
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_EVERYTHING != 512)
@@ -364,8 +367,7 @@
     RESTORE_TWO_REGS x27, x28, 480
     RESTORE_TWO_REGS x29, xLR, 496
 
-    add sp, sp, #512
-    .cfi_adjust_cfa_offset -512
+    DECREASE_FRAME 512
 .endm
 
 .macro RETURN_IF_RESULT_IS_ZERO
@@ -1268,11 +1270,11 @@
     cbz x0, .Lthrow_class_cast_exception
 
     // Restore and return
+    .cfi_remember_state
     RESTORE_REG xLR, 24
     RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
     ret
-
-    .cfi_adjust_cfa_offset 32         // Reset unwind info so following code unwinds.
+    .cfi_restore_state                // Reset unwind info so following code unwinds.
 
 .Lthrow_class_cast_exception:
     // Restore
@@ -1351,8 +1353,7 @@
     POP_REG_NE x3, 24, \xDest
     POP_REG_NE x4, 32, \xDest
     RESTORE_REG xLR, 40
-    add sp, sp, #48
-    .cfi_adjust_cfa_offset -48
+    DECREASE_FRAME 48
 .Lrb_exit\number:
 #else
     ldr \wDest, [\xObj, #\offset]   // Heap reference = 32b. This also zero-extends to \xDest.
@@ -1428,6 +1429,7 @@
     cbz x0, .Lthrow_array_store_exception
 
     // Restore
+    .cfi_remember_state
     RESTORE_TWO_REGS x2, xLR, 16
     RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
 
@@ -1439,7 +1441,7 @@
     lsr x0, x0, #7
     strb w3, [x3, x0]
     ret
-    .cfi_adjust_cfa_offset 32  // 4 restores after cbz for unwinding.
+    .cfi_restore_state            // Reset unwind info so following code unwinds.
 .Lthrow_array_store_exception:
     RESTORE_TWO_REGS x2, xLR, 16
     RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
@@ -2468,8 +2470,8 @@
     SAVE_TWO_REGS x10, x11, 80
     SAVE_TWO_REGS x12, x13, 96
     SAVE_TWO_REGS x14, x15, 112
-    SAVE_TWO_REGS   x16, x17, 128
-    SAVE_TWO_REGS   x18, x19, 144
+    SAVE_TWO_REGS x16, x17, 128
+    SAVE_TWO_REGS x18, x19, 144
     // Save all potentially live caller-save floating-point registers.
     stp   d0, d1,   [sp, #160]
     stp   d2, d3,   [sp, #176]
@@ -2522,8 +2524,7 @@
     ldp   d30, d31, [sp, #336]
     // Restore return address and remove padding.
     RESTORE_REG xLR, 360
-    add sp, sp, #368
-    .cfi_adjust_cfa_offset -368
+    DECREASE_FRAME 368
 .Lret_rb_\name:
     ret
 END \name
diff --git a/tools/bisection-search/README.md b/tools/bisection_search/README.md
similarity index 100%
rename from tools/bisection-search/README.md
rename to tools/bisection_search/README.md
diff --git a/tools/bisection_search/__init__.py b/tools/bisection_search/__init__.py
new file mode 100644
index 0000000..0a42789
--- /dev/null
+++ b/tools/bisection_search/__init__.py
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is intentionally left empty. It indicates that the directory is a Python package.
diff --git a/tools/bisection-search/bisection_search.py b/tools/bisection_search/bisection_search.py
similarity index 100%
rename from tools/bisection-search/bisection_search.py
rename to tools/bisection_search/bisection_search.py
diff --git a/tools/bisection-search/bisection_test.py b/tools/bisection_search/bisection_test.py
similarity index 100%
rename from tools/bisection-search/bisection_test.py
rename to tools/bisection_search/bisection_test.py
diff --git a/tools/bisection-search/common.py b/tools/bisection_search/common.py
similarity index 100%
rename from tools/bisection-search/common.py
rename to tools/bisection_search/common.py
diff --git a/tools/javafuzz/__init__.py b/tools/javafuzz/__init__.py
new file mode 100644
index 0000000..3955c71
--- /dev/null
+++ b/tools/javafuzz/__init__.py
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is intentionally left empty. It indicates that the directory is a Python package.
\ No newline at end of file