Merge "ART: Fix Quick-style LR vs PC core spill mask bug"
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 42b792c..af93aab 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -39,6 +39,9 @@
     true,   // kIntrinsicReverseBits
     true,   // kIntrinsicReverseBytes
     true,   // kIntrinsicNumberOfLeadingZeros
+    true,   // kIntrinsicNumberOfTrailingZeros
+    true,   // kIntrinsicRotateRight
+    true,   // kIntrinsicRotateLeft
     true,   // kIntrinsicAbsInt
     true,   // kIntrinsicAbsLong
     true,   // kIntrinsicAbsFloat
@@ -79,6 +82,10 @@
 static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfLeadingZeros],
               "NumberOfLeadingZeros must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfTrailingZeros],
+              "NumberOfTrailingZeros must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRotateRight], "RotateRight must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRotateLeft], "RotateLeft must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static");
 static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static");
@@ -232,6 +239,9 @@
     "putOrderedObject",      // kNameCachePutOrderedObject
     "arraycopy",             // kNameCacheArrayCopy
     "numberOfLeadingZeros",  // kNameCacheNumberOfLeadingZeros
+    "numberOfTrailingZeros",  // kNameCacheNumberOfTrailingZeros
+    "rotateRight",           // kNameCacheRotateRight
+    "rotateLeft",            // kNameCacheRotateLeft
 };
 
 const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
@@ -289,6 +299,8 @@
     { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheShort } },
     // kProtoCacheObject_Z
     { kClassCacheBoolean, 1, { kClassCacheJavaLangObject } },
+    // kProtoCacheJI_J
+    { kClassCacheLong, 2, { kClassCacheLong, kClassCacheInt } },
     // kProtoCacheObjectJII_Z
     { kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong,
         kClassCacheInt, kClassCacheInt } },
@@ -379,6 +391,8 @@
 
     INTRINSIC(JavaLangInteger, NumberOfLeadingZeros, I_I, kIntrinsicNumberOfLeadingZeros, k32),
     INTRINSIC(JavaLangLong, NumberOfLeadingZeros, J_I, kIntrinsicNumberOfLeadingZeros, k64),
+    INTRINSIC(JavaLangInteger, NumberOfTrailingZeros, I_I, kIntrinsicNumberOfTrailingZeros, k32),
+    INTRINSIC(JavaLangLong, NumberOfTrailingZeros, J_I, kIntrinsicNumberOfTrailingZeros, k64),
 
     INTRINSIC(JavaLangMath,       Abs, I_I, kIntrinsicAbsInt, 0),
     INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0),
@@ -468,6 +482,11 @@
     INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray,
               0),
 
+    INTRINSIC(JavaLangInteger, RotateRight, II_I, kIntrinsicRotateRight, k32),
+    INTRINSIC(JavaLangLong, RotateRight, JI_J, kIntrinsicRotateRight, k64),
+    INTRINSIC(JavaLangInteger, RotateLeft, II_I, kIntrinsicRotateLeft, k32),
+    INTRINSIC(JavaLangLong, RotateLeft, JI_J, kIntrinsicRotateLeft, k64),
+
 #undef INTRINSIC
 
 #define SPECIAL(c, n, p, o, d) \
@@ -631,7 +650,10 @@
     case kIntrinsicSystemArrayCopyCharArray:
       return backend->GenInlinedArrayCopyCharArray(info);
     case kIntrinsicNumberOfLeadingZeros:
-      return false;  // not implemented in quick
+    case kIntrinsicNumberOfTrailingZeros:
+    case kIntrinsicRotateRight:
+    case kIntrinsicRotateLeft:
+      return false;   // not implemented in quick.
     default:
       LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode;
       return false;  // avoid warning "control reaches end of non-void function"
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index d6c8bfb..8458806 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -208,6 +208,9 @@
       kNameCachePutOrderedObject,
       kNameCacheArrayCopy,
       kNameCacheNumberOfLeadingZeros,
+      kNameCacheNumberOfTrailingZeros,
+      kNameCacheRotateRight,
+      kNameCacheRotateLeft,
       kNameCacheLast
     };
 
@@ -245,6 +248,7 @@
       kProtoCacheJJ_V,
       kProtoCacheJS_V,
       kProtoCacheObject_Z,
+      kProtoCacheJI_J,
       kProtoCacheObjectJII_Z,
       kProtoCacheObjectJJJ_Z,
       kProtoCacheObjectJObjectObject_Z,
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 41c239d..b71fdb8 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -125,6 +125,28 @@
           LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
           UNREACHABLE();
       }
+    case kIntrinsicRotateRight:
+      switch (GetType(method.d.data, true)) {
+        case Primitive::kPrimInt:
+          return Intrinsics::kIntegerRotateRight;
+        case Primitive::kPrimLong:
+          return Intrinsics::kLongRotateRight;
+        default:
+          LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+          UNREACHABLE();
+      }
+    case kIntrinsicRotateLeft:
+      switch (GetType(method.d.data, true)) {
+        case Primitive::kPrimInt:
+          return Intrinsics::kIntegerRotateLeft;
+        case Primitive::kPrimLong:
+          return Intrinsics::kLongRotateLeft;
+        default:
+          LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+          UNREACHABLE();
+      }
+
+    // Misc data processing.
     case kIntrinsicNumberOfLeadingZeros:
       switch (GetType(method.d.data, true)) {
         case Primitive::kPrimInt:
@@ -135,6 +157,16 @@
           LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
           UNREACHABLE();
       }
+    case kIntrinsicNumberOfTrailingZeros:
+      switch (GetType(method.d.data, true)) {
+        case Primitive::kPrimInt:
+          return Intrinsics::kIntegerNumberOfTrailingZeros;
+        case Primitive::kPrimLong:
+          return Intrinsics::kLongNumberOfTrailingZeros;
+        default:
+          LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
+          UNREACHABLE();
+      }
 
     // Abs.
     case kIntrinsicAbsDouble:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 6040a40..cc8ddb6 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -266,6 +266,227 @@
   GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
 }
 
+static void GenNumberOfTrailingZeros(LocationSummary* locations,
+                                     Primitive::Type type,
+                                     ArmAssembler* assembler) {
+  DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+
+  Register out = locations->Out().AsRegister<Register>();
+
+  if (type == Primitive::kPrimLong) {
+    Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+    Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+    Label end;
+    __ rbit(out, in_reg_lo);
+    __ clz(out, out);
+    __ CompareAndBranchIfNonZero(in_reg_lo, &end);
+    __ rbit(out, in_reg_hi);
+    __ clz(out, out);
+    __ AddConstant(out, 32);
+    __ Bind(&end);
+  } else {
+    Register in = locations->InAt(0).AsRegister<Register>();
+    __ rbit(out, in);
+    __ clz(out, out);
+  }
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+  GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+  GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
+static void GenIntegerRotate(LocationSummary* locations,
+                             ArmAssembler* assembler,
+                             bool is_left) {
+  Register in = locations->InAt(0).AsRegister<Register>();
+  Location rhs = locations->InAt(1);
+  Register out = locations->Out().AsRegister<Register>();
+
+  if (rhs.IsConstant()) {
+    // Arm32 and Thumb2 assemblers require a rotation on the interval [1,31],
+    // so map all rotations to a +ve. equivalent in that range.
+    // (e.g. left *or* right by -2 bits == 30 bits in the same direction.)
+    uint32_t rot = rhs.GetConstant()->AsIntConstant()->GetValue() & 0x1F;
+    if (rot) {
+      // Rotate, mapping left rotations to right equivalents if necessary.
+      // (e.g. left by 2 bits == right by 30.)
+      __ Ror(out, in, is_left ? (0x20 - rot) : rot);
+    } else if (out != in) {
+      __ Mov(out, in);
+    }
+  } else {
+    if (is_left) {
+      __ rsb(out, rhs.AsRegister<Register>(), ShifterOperand(0));
+      __ Ror(out, in, out);
+    } else {
+      __ Ror(out, in, rhs.AsRegister<Register>());
+    }
+  }
+}
+
+// Gain some speed by mapping all Long rotates onto equivalent pairs of Integer
+// rotates by swapping input regs (effectively rotating by the first 32-bits of
+// a larger rotation) or flipping direction (thus treating larger right/left
+// rotations as sub-word sized rotations in the other direction) as appropriate.
+static void GenLongRotate(LocationSummary* locations,
+                          ArmAssembler* assembler,
+                          bool is_left) {
+  Register in_reg_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+  Register in_reg_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+  Location rhs = locations->InAt(1);
+  Register out_reg_lo = locations->Out().AsRegisterPairLow<Register>();
+  Register out_reg_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+  if (rhs.IsConstant()) {
+    uint32_t rot = rhs.GetConstant()->AsIntConstant()->GetValue();
+    // Map all left rotations to right equivalents.
+    if (is_left) {
+      rot = 0x40 - rot;
+    }
+    // Map all rotations to +ve. equivalents on the interval [0,63].
+    rot &= 0x3F;
+    // For rotates over a word in size, 'pre-rotate' by 32-bits to keep rotate
+    // logic below to a simple pair of binary orr.
+    // (e.g. 34 bits == in_reg swap + 2 bits right.)
+    if (rot >= 0x20) {
+      rot -= 0x20;
+      std::swap(in_reg_hi, in_reg_lo);
+    }
+    // Rotate, or mov to out for zero or word size rotations.
+    if (rot) {
+      __ Lsr(out_reg_hi, in_reg_hi, rot);
+      __ orr(out_reg_hi, out_reg_hi, ShifterOperand(in_reg_lo, arm::LSL, 0x20 - rot));
+      __ Lsr(out_reg_lo, in_reg_lo, rot);
+      __ orr(out_reg_lo, out_reg_lo, ShifterOperand(in_reg_hi, arm::LSL, 0x20 - rot));
+    } else {
+      __ Mov(out_reg_lo, in_reg_lo);
+      __ Mov(out_reg_hi, in_reg_hi);
+    }
+  } else {
+    Register shift_left = locations->GetTemp(0).AsRegister<Register>();
+    Register shift_right = locations->GetTemp(1).AsRegister<Register>();
+    Label end;
+    Label right;
+
+    __ and_(shift_left, rhs.AsRegister<Register>(), ShifterOperand(0x1F));
+    __ Lsrs(shift_right, rhs.AsRegister<Register>(), 6);
+    __ rsb(shift_right, shift_left, ShifterOperand(0x20), AL, kCcKeep);
+
+    if (is_left) {
+      __ b(&right, CS);
+    } else {
+      __ b(&right, CC);
+      std::swap(shift_left, shift_right);
+    }
+
+    // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
+    // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
+    __ Lsl(out_reg_hi, in_reg_hi, shift_left);
+    __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+    __ add(out_reg_hi, out_reg_hi, ShifterOperand(out_reg_lo));
+    __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+    __ Lsr(shift_left, in_reg_hi, shift_right);
+    __ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_left));
+    __ b(&end);
+
+    // out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
+    // out_reg_lo = (reg_lo >> shift_right) | (reg_hi << shift_left).
+    __ Bind(&right);
+    __ Lsr(out_reg_hi, in_reg_hi, shift_right);
+    __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+    __ add(out_reg_hi, out_reg_hi, ShifterOperand(out_reg_lo));
+    __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+    __ Lsl(shift_right, in_reg_hi, shift_left);
+    __ add(out_reg_lo, out_reg_lo, ShifterOperand(shift_right));
+
+    __ Bind(&end);
+  }
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerRotateRight(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerRotateRight(HInvoke* invoke) {
+  GenIntegerRotate(invoke->GetLocations(), GetAssembler(), false /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongRotateRight(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (invoke->InputAt(1)->IsConstant()) {
+    locations->SetInAt(1, Location::ConstantLocation(invoke->InputAt(1)->AsConstant()));
+  } else {
+    locations->SetInAt(1, Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());
+  }
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongRotateRight(HInvoke* invoke) {
+  GenLongRotate(invoke->GetLocations(), GetAssembler(), false /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitIntegerRotateLeft(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitIntegerRotateLeft(HInvoke* invoke) {
+  GenIntegerRotate(invoke->GetLocations(), GetAssembler(), true /* is_left */);
+}
+
+void IntrinsicLocationsBuilderARM::VisitLongRotateLeft(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (invoke->InputAt(1)->IsConstant()) {
+    locations->SetInAt(1, Location::ConstantLocation(invoke->InputAt(1)->AsConstant()));
+  } else {
+    locations->SetInAt(1, Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());
+  }
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM::VisitLongRotateLeft(HInvoke* invoke) {
+  GenLongRotate(invoke->GetLocations(), GetAssembler(), true /* is_left */);
+}
+
 static void MathAbsFP(LocationSummary* locations, bool is64bit, ArmAssembler* assembler) {
   Location in = locations->InAt(0);
   Location out = locations->Out();
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 1dbca34..b0cfd0d 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -41,12 +41,12 @@
 using helpers::FPRegisterFrom;
 using helpers::HeapOperand;
 using helpers::LocationFrom;
+using helpers::OperandFrom;
 using helpers::RegisterFrom;
 using helpers::SRegisterFrom;
 using helpers::WRegisterFrom;
 using helpers::XRegisterFrom;
 
-
 namespace {
 
 ALWAYS_INLINE inline MemOperand AbsoluteHeapOperandFrom(Location location, size_t offset = 0) {
@@ -286,6 +286,131 @@
   GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
 }
 
+static void GenNumberOfTrailingZeros(LocationSummary* locations,
+                                     Primitive::Type type,
+                                     vixl::MacroAssembler* masm) {
+  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+  Location in = locations->InAt(0);
+  Location out = locations->Out();
+
+  __ Rbit(RegisterFrom(out, type), RegisterFrom(in, type));
+  __ Clz(RegisterFrom(out, type), RegisterFrom(out, type));
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+  GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+  GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
+static void GenRotateRight(LocationSummary* locations,
+                           Primitive::Type type,
+                           vixl::MacroAssembler* masm) {
+  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+  Location in = locations->InAt(0);
+  Location out = locations->Out();
+  Operand rhs = OperandFrom(locations->InAt(1), type);
+
+  if (rhs.IsImmediate()) {
+    uint32_t shift = rhs.immediate() & (RegisterFrom(in, type).SizeInBits() - 1);
+    __ Ror(RegisterFrom(out, type),
+           RegisterFrom(in, type),
+           shift);
+  } else {
+    DCHECK(rhs.shift() == vixl::LSL && rhs.shift_amount() == 0);
+    __ Ror(RegisterFrom(out, type),
+           RegisterFrom(in, type),
+           rhs.reg());
+  }
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerRotateRight(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerRotateRight(HInvoke* invoke) {
+  GenRotateRight(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongRotateRight(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongRotateRight(HInvoke* invoke) {
+  GenRotateRight(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
+static void GenRotateLeft(LocationSummary* locations,
+                           Primitive::Type type,
+                           vixl::MacroAssembler* masm) {
+  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+  Location in = locations->InAt(0);
+  Location out = locations->Out();
+  Operand rhs = OperandFrom(locations->InAt(1), type);
+
+  if (rhs.IsImmediate()) {
+    uint32_t regsize = RegisterFrom(in, type).SizeInBits();
+    uint32_t shift = (regsize - rhs.immediate()) & (regsize - 1);
+    __ Ror(RegisterFrom(out, type), RegisterFrom(in, type), shift);
+  } else {
+    DCHECK(rhs.shift() == vixl::LSL && rhs.shift_amount() == 0);
+    __ Neg(RegisterFrom(out, type),
+           Operand(RegisterFrom(locations->InAt(1), type)));
+    __ Ror(RegisterFrom(out, type),
+           RegisterFrom(in, type),
+           RegisterFrom(out, type));
+  }
+}
+
+void IntrinsicLocationsBuilderARM64::VisitIntegerRotateLeft(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerRotateLeft(HInvoke* invoke) {
+  GenRotateLeft(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitLongRotateLeft(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitLongRotateLeft(HInvoke* invoke) {
+  GenRotateLeft(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
+}
+
 static void GenReverse(LocationSummary* locations,
                        Primitive::Type type,
                        vixl::MacroAssembler* masm) {
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 7e5339e..bfe5e55 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -29,9 +29,15 @@
   V(IntegerReverse, kStatic, kNeedsEnvironmentOrCache) \
   V(IntegerReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
   V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
+  V(IntegerNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
+  V(IntegerRotateRight, kStatic, kNeedsEnvironmentOrCache) \
+  V(IntegerRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
   V(LongReverse, kStatic, kNeedsEnvironmentOrCache) \
   V(LongReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
   V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
+  V(LongNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
+  V(LongRotateRight, kStatic, kNeedsEnvironmentOrCache) \
+  V(LongRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
   V(ShortReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
   V(MathAbsDouble, kStatic, kNeedsEnvironmentOrCache) \
   V(MathAbsFloat, kStatic, kNeedsEnvironmentOrCache) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index daf56d0..c5d88d2 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1956,6 +1956,12 @@
 UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index f78a726..258ae9a 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1774,6 +1774,12 @@
 UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dbcaab9..a4d1837 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -495,6 +495,7 @@
   virtual void clz(Register rd, Register rm, Condition cond = AL) = 0;
   virtual void movw(Register rd, uint16_t imm16, Condition cond = AL) = 0;
   virtual void movt(Register rd, uint16_t imm16, Condition cond = AL) = 0;
+  virtual void rbit(Register rd, Register rm, Condition cond = AL) = 0;
 
   // Multiply instructions.
   virtual void mul(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 184d8a5..f7772ae 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -735,6 +735,20 @@
 }
 
 
+void Arm32Assembler::rbit(Register rd, Register rm, Condition cond) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  CHECK_NE(rd, PC);
+  CHECK_NE(rm, PC);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B26 | B25 | B23 | B22 | B21 | B20 | (0xf << 16) |
+                     (static_cast<int32_t>(rd) << kRdShift) |
+                     (0xf << 8) | B5 | B4 | static_cast<int32_t>(rm);
+  Emit(encoding);
+}
+
+
 void Arm32Assembler::EmitMulOp(Condition cond, int32_t opcode,
                                Register rd, Register rn,
                                Register rm, Register rs) {
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 17c6747..3407369 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -87,6 +87,7 @@
   void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
   void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
   void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
+  void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
 
   // Multiply instructions.
   void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index e6412ac..2a0912e 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -883,4 +883,8 @@
   DriverStr(expected, "strexd");
 }
 
+TEST_F(AssemblerArm32Test, rbit) {
+  T3Helper(&arm::Arm32Assembler::rbit, true, "rbit{cond} {reg1}, {reg2}", "rbit");
+}
+
 }  // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index b677789..0f6c4f5 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2426,6 +2426,25 @@
 }
 
 
+void Thumb2Assembler::rbit(Register rd, Register rm, Condition cond) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CheckCondition(cond);
+  CHECK_NE(rd, PC);
+  CHECK_NE(rm, PC);
+  CHECK_NE(rd, SP);
+  CHECK_NE(rm, SP);
+  int32_t encoding = B31 | B30 | B29 | B28 | B27 |
+      B25 | B23 | B20 |
+      static_cast<uint32_t>(rm) << 16 |
+      0xf << 12 |
+      static_cast<uint32_t>(rd) << 8 |
+      B7 | B5 |
+      static_cast<uint32_t>(rm);
+  Emit32(encoding);
+}
+
+
 void Thumb2Assembler::ldrex(Register rt, Register rn, uint16_t imm, Condition cond) {
   CHECK_NE(rn, kNoRegister);
   CHECK_NE(rt, kNoRegister);
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 6dee68e..a1a8927 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -111,6 +111,7 @@
   void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
   void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
   void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
+  void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
 
   // Multiply instructions.
   void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 84f5cb1..9c08ce0 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -1019,4 +1019,12 @@
   DriverStr(expected, "clz");
 }
 
+TEST_F(AssemblerThumb2Test, rbit) {
+  __ rbit(arm::R1, arm::R0);
+
+  const char* expected = "rbit r1, r0\n";
+
+  DriverStr(expected, "rbit");
+}
+
 }  // namespace art
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 963eecb..995a1d5 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -99,6 +99,7 @@
   jit/jit.cc \
   jit/jit_code_cache.cc \
   jit/jit_instrumentation.cc \
+  jit/profiling_info.cc \
   lambda/art_lambda_method.cc \
   lambda/box_table.cc \
   lambda/closure.cc \
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index cfd7fcd..a84c20a 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -26,6 +26,7 @@
 #include "dex_file.h"
 #include "dex_file-inl.h"
 #include "gc_root-inl.h"
+#include "jit/profiling_info.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
@@ -545,6 +546,10 @@
   }
 
   visitor.VisitRootIfNonNull(declaring_class_.AddressWithoutBarrier());
+  ProfilingInfo* profiling_info = GetProfilingInfo();
+  if (hotness_count_ != 0 && !IsNative() && profiling_info != nullptr) {
+    profiling_info->VisitRoots(visitor);
+  }
 }
 
 inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 64416d2..5dbea52 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -30,6 +30,7 @@
 #include "interpreter/interpreter.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
+#include "jit/profiling_info.h"
 #include "jni_internal.h"
 #include "mapping_table.h"
 #include "mirror/abstract_method.h"
@@ -579,4 +580,16 @@
   return oat_method.GetVmapTable();
 }
 
+ProfilingInfo* ArtMethod::CreateProfilingInfo() {
+  ProfilingInfo* info = ProfilingInfo::Create(this);
+  MemberOffset offset = ArtMethod::EntryPointFromJniOffset(sizeof(void*));
+  uintptr_t pointer = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
+  if (!reinterpret_cast<Atomic<ProfilingInfo*>*>(pointer)->
+          CompareExchangeStrongSequentiallyConsistent(nullptr, info)) {
+    return GetProfilingInfo();
+  } else {
+    return info;
+  }
+}
+
 }  // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index e0b11d0..3f2161f 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -33,6 +33,7 @@
 namespace art {
 
 union JValue;
+class ProfilingInfo;
 class ScopedObjectAccessAlreadyRunnable;
 class StringPiece;
 class ShadowFrame;
@@ -389,16 +390,25 @@
         PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
   }
 
+  ProfilingInfo* CreateProfilingInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+  ProfilingInfo* GetProfilingInfo() {
+    return reinterpret_cast<ProfilingInfo*>(GetEntryPointFromJni());
+  }
+
   void* GetEntryPointFromJni() {
     return GetEntryPointFromJniPtrSize(sizeof(void*));
   }
+
   ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) {
     return GetNativePointer<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
   }
 
   void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) {
+    DCHECK(IsNative());
     SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
   }
+
   ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
     SetNativePointer(EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
   }
@@ -523,6 +533,10 @@
   ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  uint16_t IncrementCounter() {
+    return ++hotness_count_;
+  }
+
  protected:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
   // The class we are a part of.
@@ -544,7 +558,11 @@
   // Entry within a dispatch table for this method. For static/direct methods the index is into
   // the declaringClass.directMethods, for virtual methods the vtable and for interface methods the
   // ifTable.
-  uint32_t method_index_;
+  uint16_t method_index_;
+
+  // The hotness we measure for this method. Incremented by the interpreter. Not atomic, as we allow
+  // missing increments: if the method is hot, we will see it eventually.
+  uint16_t hotness_count_;
 
   // Fake padding field gets inserted here.
 
@@ -558,7 +576,8 @@
     // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
     GcRoot<mirror::Class>* dex_cache_resolved_types_;
 
-    // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
+    // Pointer to JNI function registered to this method, or a function to resolve the JNI function,
+    // or the profiling data for non-native methods.
     void* entry_point_from_jni_;
 
     // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e1aca2f..72226af 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -218,6 +218,17 @@
                << " " << dex_pc_offset;
   }
 
+  // We only care about invokes in the Jit.
+  void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
+                                mirror::Object*,
+                                ArtMethod* method,
+                                uint32_t dex_pc,
+                                ArtMethod*)
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+    LOG(ERROR) << "Unexpected invoke event in debugger " << PrettyMethod(method)
+               << " " << dex_pc;
+  }
+
  private:
   static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
       SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -3490,6 +3501,62 @@
   return instrumentation->IsDeoptimized(m);
 }
 
+struct NeedsDeoptimizationVisitor : public StackVisitor {
+ public:
+  explicit NeedsDeoptimizationVisitor(Thread* self)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+    : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+      needs_deoptimization_(false) {}
+
+  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+    // The visitor is meant to be used when handling exception from compiled code only.
+    CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: " << PrettyMethod(GetMethod());
+    ArtMethod* method = GetMethod();
+    if (method == nullptr) {
+      // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
+      // so we can stop the visit.
+      DCHECK(!needs_deoptimization_);
+      return false;
+    }
+    if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+      // We found a compiled frame in the stack but instrumentation is set to interpret
+      // everything: we need to deoptimize.
+      needs_deoptimization_ = true;
+      return false;
+    }
+    if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
+      // We found a deoptimized method in the stack.
+      needs_deoptimization_ = true;
+      return false;
+    }
+    return true;
+  }
+
+  bool NeedsDeoptimization() const {
+    return needs_deoptimization_;
+  }
+
+ private:
+  // Do we need to deoptimize the stack?
+  bool needs_deoptimization_;
+
+  DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
+};
+
+// Do we need to deoptimize the stack to handle an exception?
+bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
+  const SingleStepControl* const ssc = thread->GetSingleStepControl();
+  if (ssc != nullptr) {
+    // We deopt to step into the catch handler.
+    return true;
+  }
+  // Deoptimization is required if at least one method in the stack needs it. However we
+  // skip frames that will be unwound (thus not executed).
+  NeedsDeoptimizationVisitor visitor(thread);
+  visitor.WalkStack(true);  // includes upcall.
+  return visitor.NeedsDeoptimization();
+}
+
 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
 // cause suspension if the thread is the current thread.
 class ScopedDebuggerThreadSuspension {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index a9fa6ce..8278fc6 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -576,6 +576,19 @@
     return IsForcedInterpreterNeededForUpcallImpl(thread, m);
   }
 
+  // Indicates whether we need to force the use of interpreter when handling an
+  // exception. This allows to deoptimize the stack and continue execution with
+  // the interpreter.
+  // Note: the interpreter will start by handling the exception when executing
+  // the deoptimized frames.
+  static bool IsForcedInterpreterNeededForException(Thread* thread)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
+    if (!IsDebuggerActive()) {
+      return false;
+    }
+    return IsForcedInterpreterNeededForExceptionImpl(thread);
+  }
+
   // Single-stepping.
   static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
                                        JDWP::JdwpStepDepth depth)
@@ -734,6 +747,9 @@
   static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  static bool IsForcedInterpreterNeededForExceptionImpl(Thread* thread)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Indicates whether the debugger is making requests.
   static bool gDebuggerActive;
 
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 98d4e59..47e5c12 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1275,6 +1275,8 @@
   // pointer to the OatDexFile it was loaded from. Otherwise oat_dex_file_ is
   // null.
   const OatDexFile* oat_dex_file_;
+
+  friend class DexFileVerifierTest;
 };
 
 struct DexFileReference {
@@ -1459,6 +1461,9 @@
   uint32_t GetMethodCodeItemOffset() const {
     return method_.code_off_;
   }
+  const uint8_t* DataPointer() const {
+    return ptr_pos_;
+  }
   const uint8_t* EndDataPointer() const {
     CHECK(!HasNext());
     return ptr_pos_;
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index eec4983..09416cc 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -16,7 +16,9 @@
 
 #include "dex_file_verifier.h"
 
+#include <inttypes.h>
 #include <zlib.h>
+
 #include <memory>
 
 #include "base/stringprintf.h"
@@ -444,66 +446,86 @@
   return true;
 }
 
-bool DexFileVerifier::CheckClassDataItemField(uint32_t idx, uint32_t access_flags,
+bool DexFileVerifier::CheckClassDataItemField(uint32_t idx,
+                                              uint32_t access_flags,
+                                              uint32_t class_access_flags,
+                                              uint16_t class_type_index,
                                               bool expect_static) {
+  // Check for overflow.
   if (!CheckIndex(idx, header_->field_ids_size_, "class_data_item field_idx")) {
     return false;
   }
 
+  // Check that it's the right class.
+  uint16_t my_class_index =
+      (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + idx)->
+          class_idx_;
+  if (class_type_index != my_class_index) {
+    ErrorStringPrintf("Field's class index unexpected, %" PRIu16 "vs %" PRIu16,
+                      my_class_index,
+                      class_type_index);
+    return false;
+  }
+
+  // Check that it falls into the right class-data list.
   bool is_static = (access_flags & kAccStatic) != 0;
   if (UNLIKELY(is_static != expect_static)) {
     ErrorStringPrintf("Static/instance field not in expected list");
     return false;
   }
 
-  if (UNLIKELY((access_flags & ~kAccJavaFlagsMask) != 0)) {
-    ErrorStringPrintf("Bad class_data_item field access_flags %x", access_flags);
+  // Check field access flags.
+  std::string error_msg;
+  if (!CheckFieldAccessFlags(access_flags, class_access_flags, &error_msg)) {
+    ErrorStringPrintf("%s", error_msg.c_str());
     return false;
   }
 
   return true;
 }
 
-bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx, uint32_t access_flags,
+bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx,
+                                               uint32_t access_flags,
+                                               uint32_t class_access_flags,
+                                               uint16_t class_type_index,
                                                uint32_t code_offset,
-                                               std::unordered_set<uint32_t>& direct_method_indexes,
+                                               std::unordered_set<uint32_t>* direct_method_indexes,
                                                bool expect_direct) {
+  DCHECK(direct_method_indexes != nullptr);
+  // Check for overflow.
   if (!CheckIndex(idx, header_->method_ids_size_, "class_data_item method_idx")) {
     return false;
   }
 
-  bool is_direct = (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0;
-  bool expect_code = (access_flags & (kAccNative | kAccAbstract)) == 0;
-  bool is_synchronized = (access_flags & kAccSynchronized) != 0;
-  bool allow_synchronized = (access_flags & kAccNative) != 0;
-
-  if (UNLIKELY(is_direct != expect_direct)) {
-    ErrorStringPrintf("Direct/virtual method not in expected list");
+  // Check that it's the right class.
+  uint16_t my_class_index =
+      (reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + idx)->
+          class_idx_;
+  if (class_type_index != my_class_index) {
+    ErrorStringPrintf("Method's class index unexpected, %" PRIu16 "vs %" PRIu16,
+                      my_class_index,
+                      class_type_index);
     return false;
   }
 
+  // Check that it's not defined as both direct and virtual.
   if (expect_direct) {
-    direct_method_indexes.insert(idx);
-  } else if (direct_method_indexes.find(idx) != direct_method_indexes.end()) {
+    direct_method_indexes->insert(idx);
+  } else if (direct_method_indexes->find(idx) != direct_method_indexes->end()) {
     ErrorStringPrintf("Found virtual method with same index as direct method: %d", idx);
     return false;
   }
 
-  constexpr uint32_t access_method_mask = kAccJavaFlagsMask | kAccConstructor |
-      kAccDeclaredSynchronized;
-  if (UNLIKELY(((access_flags & ~access_method_mask) != 0) ||
-               (is_synchronized && !allow_synchronized))) {
-    ErrorStringPrintf("Bad class_data_item method access_flags %x", access_flags);
-    return false;
-  }
-
-  if (UNLIKELY(expect_code && (code_offset == 0))) {
-    ErrorStringPrintf("Unexpected zero value for class_data_item method code_off with access "
-                      "flags %x", access_flags);
-    return false;
-  } else if (UNLIKELY(!expect_code && (code_offset != 0))) {
-    ErrorStringPrintf("Unexpected non-zero value %x for class_data_item method code_off"
-                      " with access flags %x", code_offset, access_flags);
+  // Check method access flags.
+  bool has_code = (code_offset != 0);
+  std::string error_msg;
+  if (!CheckMethodAccessFlags(idx,
+                              access_flags,
+                              class_access_flags,
+                              has_code,
+                              expect_direct,
+                              &error_msg)) {
+    ErrorStringPrintf("%s", error_msg.c_str());
     return false;
   }
 
@@ -689,60 +711,185 @@
   return true;
 }
 
+bool DexFileVerifier::FindClassFlags(uint32_t index,
+                                     bool is_field,
+                                     uint16_t* class_type_index,
+                                     uint32_t* class_access_flags) {
+  DCHECK(class_type_index != nullptr);
+  DCHECK(class_access_flags != nullptr);
+
+  // First check if the index is valid.
+  if (index >= (is_field ? header_->field_ids_size_ : header_->method_ids_size_)) {
+    return false;
+  }
+
+  // Next get the type index.
+  if (is_field) {
+    *class_type_index =
+        (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + index)->
+            class_idx_;
+  } else {
+    *class_type_index =
+        (reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + index)->
+            class_idx_;
+  }
+
+  // Check if that is valid.
+  if (*class_type_index >= header_->type_ids_size_) {
+    return false;
+  }
+
+  // Now search for the class def. This is basically a specialized version of the DexFile code, as
+  // we should not trust that this is a valid DexFile just yet.
+  const DexFile::ClassDef* class_def_begin =
+      reinterpret_cast<const DexFile::ClassDef*>(begin_ + header_->class_defs_off_);
+  for (size_t i = 0; i < header_->class_defs_size_; ++i) {
+    const DexFile::ClassDef* class_def = class_def_begin + i;
+    if (class_def->class_idx_ == *class_type_index) {
+      *class_access_flags = class_def->access_flags_;
+      return true;
+    }
+  }
+
+  // Didn't find the class-def, not defined here...
+  return false;
+}
+
+bool DexFileVerifier::CheckOrderAndGetClassFlags(bool is_field,
+                                                 const char* type_descr,
+                                                 uint32_t curr_index,
+                                                 uint32_t prev_index,
+                                                 bool* have_class,
+                                                 uint16_t* class_type_index,
+                                                 uint32_t* class_access_flags) {
+  if (curr_index < prev_index) {
+    ErrorStringPrintf("out-of-order %s indexes %" PRIu32 " and %" PRIu32,
+                      type_descr,
+                      prev_index,
+                      curr_index);
+    return false;
+  }
+
+  if (!*have_class) {
+    *have_class = FindClassFlags(curr_index, is_field, class_type_index, class_access_flags);
+    if (!*have_class) {
+      // Should have really found one.
+      ErrorStringPrintf("could not find declaring class for %s index %" PRIu32,
+                        type_descr,
+                        curr_index);
+      return false;
+    }
+  }
+  return true;
+}
+
+template <bool kStatic>
+bool DexFileVerifier::CheckIntraClassDataItemFields(ClassDataItemIterator* it,
+                                                    bool* have_class,
+                                                    uint16_t* class_type_index,
+                                                    uint32_t* class_access_flags) {
+  DCHECK(it != nullptr);
+  // These calls use the raw access flags to check whether the whole dex field is valid.
+  uint32_t prev_index = 0;
+  for (; kStatic ? it->HasNextStaticField() : it->HasNextInstanceField(); it->Next()) {
+    uint32_t curr_index = it->GetMemberIndex();
+    if (!CheckOrderAndGetClassFlags(true,
+                                    kStatic ? "static field" : "instance field",
+                                    curr_index,
+                                    prev_index,
+                                    have_class,
+                                    class_type_index,
+                                    class_access_flags)) {
+      return false;
+    }
+    prev_index = curr_index;
+
+    if (!CheckClassDataItemField(curr_index,
+                                 it->GetRawMemberAccessFlags(),
+                                 *class_access_flags,
+                                 *class_type_index,
+                                 kStatic)) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+template <bool kDirect>
+bool DexFileVerifier::CheckIntraClassDataItemMethods(
+    ClassDataItemIterator* it,
+    std::unordered_set<uint32_t>* direct_method_indexes,
+    bool* have_class,
+    uint16_t* class_type_index,
+    uint32_t* class_access_flags) {
+  uint32_t prev_index = 0;
+  for (; kDirect ? it->HasNextDirectMethod() : it->HasNextVirtualMethod(); it->Next()) {
+    uint32_t curr_index = it->GetMemberIndex();
+    if (!CheckOrderAndGetClassFlags(false,
+                                    kDirect ? "direct method" : "virtual method",
+                                    curr_index,
+                                    prev_index,
+                                    have_class,
+                                    class_type_index,
+                                    class_access_flags)) {
+      return false;
+    }
+    prev_index = curr_index;
+
+    if (!CheckClassDataItemMethod(curr_index,
+                                  it->GetRawMemberAccessFlags(),
+                                  *class_access_flags,
+                                  *class_type_index,
+                                  it->GetMethodCodeItemOffset(),
+                                  direct_method_indexes,
+                                  kDirect)) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
 bool DexFileVerifier::CheckIntraClassDataItem() {
   ClassDataItemIterator it(*dex_file_, ptr_);
   std::unordered_set<uint32_t> direct_method_indexes;
 
-  // These calls use the raw access flags to check whether the whole dex field is valid.
-  uint32_t prev_index = 0;
-  for (; it.HasNextStaticField(); it.Next()) {
-    uint32_t curr_index = it.GetMemberIndex();
-    if (curr_index < prev_index) {
-      ErrorStringPrintf("out-of-order static field indexes %d and %d", prev_index, curr_index);
-      return false;
-    }
-    prev_index = curr_index;
-    if (!CheckClassDataItemField(curr_index, it.GetRawMemberAccessFlags(), true)) {
-      return false;
-    }
+  // This code is complicated by the fact that we don't directly know which class this belongs to.
+  // So we need to explicitly search with the first item we find (either field or method), and then,
+  // as the lookup is expensive, cache the result.
+  bool have_class = false;
+  uint16_t class_type_index;
+  uint32_t class_access_flags;
+
+  // Check fields.
+  if (!CheckIntraClassDataItemFields<true>(&it,
+                                           &have_class,
+                                           &class_type_index,
+                                           &class_access_flags)) {
+    return false;
   }
-  prev_index = 0;
-  for (; it.HasNextInstanceField(); it.Next()) {
-    uint32_t curr_index = it.GetMemberIndex();
-    if (curr_index < prev_index) {
-      ErrorStringPrintf("out-of-order instance field indexes %d and %d", prev_index, curr_index);
-      return false;
-    }
-    prev_index = curr_index;
-    if (!CheckClassDataItemField(curr_index, it.GetRawMemberAccessFlags(), false)) {
-      return false;
-    }
+  if (!CheckIntraClassDataItemFields<false>(&it,
+                                            &have_class,
+                                            &class_type_index,
+                                            &class_access_flags)) {
+    return false;
   }
-  prev_index = 0;
-  for (; it.HasNextDirectMethod(); it.Next()) {
-    uint32_t curr_index = it.GetMemberIndex();
-    if (curr_index < prev_index) {
-      ErrorStringPrintf("out-of-order direct method indexes %d and %d", prev_index, curr_index);
-      return false;
-    }
-    prev_index = curr_index;
-    if (!CheckClassDataItemMethod(curr_index, it.GetRawMemberAccessFlags(),
-        it.GetMethodCodeItemOffset(), direct_method_indexes, true)) {
-      return false;
-    }
+
+  // Check methods.
+  if (!CheckIntraClassDataItemMethods<true>(&it,
+                                            &direct_method_indexes,
+                                            &have_class,
+                                            &class_type_index,
+                                            &class_access_flags)) {
+    return false;
   }
-  prev_index = 0;
-  for (; it.HasNextVirtualMethod(); it.Next()) {
-    uint32_t curr_index = it.GetMemberIndex();
-    if (curr_index < prev_index) {
-      ErrorStringPrintf("out-of-order virtual method indexes %d and %d", prev_index, curr_index);
-      return false;
-    }
-    prev_index = curr_index;
-    if (!CheckClassDataItemMethod(curr_index, it.GetRawMemberAccessFlags(),
-        it.GetMethodCodeItemOffset(), direct_method_indexes, false)) {
-      return false;
-    }
+  if (!CheckIntraClassDataItemMethods<false>(&it,
+                                             &direct_method_indexes,
+                                             &have_class,
+                                             &class_type_index,
+                                             &class_access_flags)) {
+    return false;
   }
 
   ptr_ = it.EndDataPointer();
@@ -2149,4 +2296,259 @@
   va_end(ap);
 }
 
+// Fields and methods may have only one of public/protected/private.
+static bool CheckAtMostOneOfPublicProtectedPrivate(uint32_t flags) {
+  size_t count = (((flags & kAccPublic) == 0) ? 0 : 1) +
+                 (((flags & kAccProtected) == 0) ? 0 : 1) +
+                 (((flags & kAccPrivate) == 0) ? 0 : 1);
+  return count <= 1;
+}
+
+bool DexFileVerifier::CheckFieldAccessFlags(uint32_t field_access_flags,
+                                            uint32_t class_access_flags,
+                                            std::string* error_msg) {
+  // Generally sort out >16-bit flags.
+  if ((field_access_flags & ~kAccJavaFlagsMask) != 0) {
+    *error_msg = StringPrintf("Bad class_data_item field access_flags %x", field_access_flags);
+    return false;
+  }
+
+  // Flags allowed on fields, in general. Other lower-16-bit flags are to be ignored.
+  constexpr uint32_t kFieldAccessFlags = kAccPublic |
+                                         kAccPrivate |
+                                         kAccProtected |
+                                         kAccStatic |
+                                         kAccFinal |
+                                         kAccVolatile |
+                                         kAccTransient |
+                                         kAccSynthetic |
+                                         kAccEnum;
+
+  // Fields may have only one of public/protected/final.
+  if (!CheckAtMostOneOfPublicProtectedPrivate(field_access_flags)) {
+    *error_msg = StringPrintf("Field may have only one of public/protected/private, %x",
+                              field_access_flags);
+    return false;
+  }
+
+  // Interfaces have a pretty restricted list.
+  if ((class_access_flags & kAccInterface) != 0) {
+    // Interface fields must be public final static.
+    constexpr uint32_t kPublicFinalStatic = kAccPublic | kAccFinal | kAccStatic;
+    if ((field_access_flags & kPublicFinalStatic) != kPublicFinalStatic) {
+      *error_msg = StringPrintf("Interface field is not public final static: %x",
+                                field_access_flags);
+      return false;
+    }
+    // Interface fields may be synthetic, but may not have other flags.
+    constexpr uint32_t kDisallowed = ~(kPublicFinalStatic | kAccSynthetic);
+    if ((field_access_flags & kFieldAccessFlags & kDisallowed) != 0) {
+      *error_msg = StringPrintf("Interface field has disallowed flag: %x", field_access_flags);
+      return false;
+    }
+    return true;
+  }
+
+  // Volatile fields may not be final.
+  constexpr uint32_t kVolatileFinal = kAccVolatile | kAccFinal;
+  if ((field_access_flags & kVolatileFinal) == kVolatileFinal) {
+    *error_msg = "Fields may not be volatile and final";
+    return false;
+  }
+
+  return true;
+}
+
+// Try to find the name of the method with the given index. We do not want to rely on DexFile
+// infrastructure at this point, so do it all by hand. begin and header correspond to begin_ and
+// header_ of the DexFileVerifier. str will contain the pointer to the method name on success
+// (flagged by the return value), otherwise error_msg will contain an error string.
+static bool FindMethodName(uint32_t method_index,
+                           const uint8_t* begin,
+                           const DexFile::Header* header,
+                           const char** str,
+                           std::string* error_msg) {
+  if (method_index >= header->method_ids_size_) {
+    *error_msg = "Method index not available for method flags verification";
+    return false;
+  }
+  uint32_t string_idx =
+      (reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) +
+          method_index)->name_idx_;
+  if (string_idx >= header->string_ids_size_) {
+    *error_msg = "String index not available for method flags verification";
+    return false;
+  }
+  uint32_t string_off =
+      (reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_) + string_idx)->
+          string_data_off_;
+  if (string_off >= header->file_size_) {
+    *error_msg = "String offset out of bounds for method flags verification";
+    return false;
+  }
+  const uint8_t* str_data_ptr = begin + string_off;
+  DecodeUnsignedLeb128(&str_data_ptr);
+  *str = reinterpret_cast<const char*>(str_data_ptr);
+  return true;
+}
+
+bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
+                                             uint32_t method_access_flags,
+                                             uint32_t class_access_flags,
+                                             bool has_code,
+                                             bool expect_direct,
+                                             std::string* error_msg) {
+  // Generally sort out >16-bit flags, except dex knows Constructor and DeclaredSynchronized.
+  constexpr uint32_t kAllMethodFlags =
+      kAccJavaFlagsMask | kAccConstructor | kAccDeclaredSynchronized;
+  if ((method_access_flags & ~kAllMethodFlags) != 0) {
+    *error_msg = StringPrintf("Bad class_data_item method access_flags %x", method_access_flags);
+    return false;
+  }
+
+  // Flags allowed on fields, in general. Other lower-16-bit flags are to be ignored.
+  constexpr uint32_t kMethodAccessFlags = kAccPublic |
+                                          kAccPrivate |
+                                          kAccProtected |
+                                          kAccStatic |
+                                          kAccFinal |
+                                          kAccSynthetic |
+                                          kAccSynchronized |
+                                          kAccBridge |
+                                          kAccVarargs |
+                                          kAccNative |
+                                          kAccAbstract |
+                                          kAccStrict;
+
+  // Methods may have only one of public/protected/final.
+  if (!CheckAtMostOneOfPublicProtectedPrivate(method_access_flags)) {
+    *error_msg = StringPrintf("Method may have only one of public/protected/private, %x",
+                              method_access_flags);
+    return false;
+  }
+
+  // Try to find the name, to check for constructor properties.
+  const char* str;
+  if (!FindMethodName(method_index, begin_, header_, &str, error_msg)) {
+    return false;
+  }
+  bool is_init_by_name = false;
+  constexpr const char* kInitName = "<init>";
+  size_t str_offset = (reinterpret_cast<const uint8_t*>(str) - begin_);
+  if (header_->file_size_ - str_offset >= sizeof(kInitName)) {
+    is_init_by_name = strcmp(kInitName, str) == 0;
+  }
+  bool is_clinit_by_name = false;
+  constexpr const char* kClinitName = "<clinit>";
+  if (header_->file_size_ - str_offset >= sizeof(kClinitName)) {
+    is_clinit_by_name = strcmp(kClinitName, str) == 0;
+  }
+  bool is_constructor = is_init_by_name || is_clinit_by_name;
+
+  // Only methods named "<clinit>" or "<init>" may be marked constructor. Note: we cannot enforce
+  // the reverse for backwards compatibility reasons.
+  if (((method_access_flags & kAccConstructor) != 0) && !is_constructor) {
+    *error_msg = StringPrintf("Method %" PRIu32 " is marked constructor, but doesn't match name",
+                              method_index);
+    return false;
+  }
+  // Check that the static constructor (= static initializer) is named "<clinit>" and that the
+  // instance constructor is called "<init>".
+  if (is_constructor) {
+    bool is_static = (method_access_flags & kAccStatic) != 0;
+    if (is_static ^ is_clinit_by_name) {
+      *error_msg = StringPrintf("Constructor %" PRIu32 " is not flagged correctly wrt/ static.",
+                                method_index);
+      return false;
+    }
+  }
+  // Check that static and private methods, as well as constructors, are in the direct methods list,
+  // and other methods in the virtual methods list.
+  bool is_direct = (method_access_flags & (kAccStatic | kAccPrivate)) != 0 || is_constructor;
+  if (is_direct != expect_direct) {
+    *error_msg = StringPrintf("Direct/virtual method %" PRIu32 " not in expected list %d",
+                              method_index,
+                              expect_direct);
+    return false;
+  }
+
+
+  // From here on out it is easier to mask out the bits we're supposed to ignore.
+  method_access_flags &= kMethodAccessFlags;
+
+  // If there aren't any instructions, make sure that's expected.
+  if (!has_code) {
+    // Only native or abstract methods may not have code.
+    if ((method_access_flags & (kAccNative | kAccAbstract)) == 0) {
+      *error_msg = StringPrintf("Method %" PRIu32 " has no code, but is not marked native or "
+                                "abstract",
+                                method_index);
+      return false;
+    }
+    // Constructors must always have code.
+    if (is_constructor) {
+      *error_msg = StringPrintf("Constructor %u must not be abstract or native", method_index);
+      return false;
+    }
+    if ((method_access_flags & kAccAbstract) != 0) {
+      // Abstract methods are not allowed to have the following flags.
+      constexpr uint32_t kForbidden =
+          kAccPrivate | kAccStatic | kAccFinal | kAccNative | kAccStrict | kAccSynchronized;
+      if ((method_access_flags & kForbidden) != 0) {
+        *error_msg = StringPrintf("Abstract method %" PRIu32 " has disallowed access flags %x",
+                                  method_index,
+                                  method_access_flags);
+        return false;
+      }
+      // Abstract methods must be in an abstract class or interface.
+      if ((class_access_flags & (kAccInterface | kAccAbstract)) == 0) {
+        *error_msg = StringPrintf("Method %" PRIu32 " is abstract, but the declaring class "
+                                  "is neither abstract nor an interface", method_index);
+        return false;
+      }
+    }
+    // Interfaces are special.
+    if ((class_access_flags & kAccInterface) != 0) {
+      // Interface methods must be public and abstract.
+      if ((method_access_flags & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
+        *error_msg = StringPrintf("Interface method %" PRIu32 " is not public and abstract",
+                                  method_index);
+        return false;
+      }
+      // At this point, we know the method is public and abstract. This means that all the checks
+      // for invalid combinations above applies. In addition, interface methods must not be
+      // protected. This is caught by the check for only-one-of-public-protected-private.
+    }
+    return true;
+  }
+
+  // When there's code, the method must not be native or abstract.
+  if ((method_access_flags & (kAccNative | kAccAbstract)) != 0) {
+    *error_msg = StringPrintf("Method %" PRIu32 " has code, but is marked native or abstract",
+                              method_index);
+    return false;
+  }
+
+  // Only the static initializer may have code in an interface.
+  if (((class_access_flags & kAccInterface) != 0) && !is_clinit_by_name) {
+    *error_msg = StringPrintf("Non-clinit interface method %" PRIu32 " should not have code",
+                              method_index);
+    return false;
+  }
+
+  // Instance constructors must not be synchronized and a few other flags.
+  if (is_init_by_name) {
+    static constexpr uint32_t kInitAllowed =
+        kAccPrivate | kAccProtected | kAccPublic | kAccStrict | kAccVarargs | kAccSynthetic;
+    if ((method_access_flags & ~kInitAllowed) != 0) {
+      *error_msg = StringPrintf("Constructor %" PRIu32 " flagged inappropriately %x",
+                                method_index,
+                                method_access_flags);
+      return false;
+    }
+  }
+
+  return true;
+}
+
 }  // namespace art
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index ccc40d4..4f15357 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -57,16 +57,48 @@
   uint32_t ReadUnsignedLittleEndian(uint32_t size);
   bool CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
                                  uint32_t* handler_offsets, uint32_t handlers_size);
-  bool CheckClassDataItemField(uint32_t idx, uint32_t access_flags, bool expect_static);
-  bool CheckClassDataItemMethod(uint32_t idx, uint32_t access_flags, uint32_t code_offset,
-                                std::unordered_set<uint32_t>& direct_method_indexes,
+  bool CheckClassDataItemField(uint32_t idx,
+                               uint32_t access_flags,
+                               uint32_t class_access_flags,
+                               uint16_t class_type_index,
+                               bool expect_static);
+  bool CheckClassDataItemMethod(uint32_t idx,
+                                uint32_t access_flags,
+                                uint32_t class_access_flags,
+                                uint16_t class_type_index,
+                                uint32_t code_offset,
+                                std::unordered_set<uint32_t>* direct_method_indexes,
                                 bool expect_direct);
+  bool CheckOrderAndGetClassFlags(bool is_field,
+                                  const char* type_descr,
+                                  uint32_t curr_index,
+                                  uint32_t prev_index,
+                                  bool* have_class,
+                                  uint16_t* class_type_index,
+                                  uint32_t* class_access_flags);
+
   bool CheckPadding(size_t offset, uint32_t aligned_offset);
   bool CheckEncodedValue();
   bool CheckEncodedArray();
   bool CheckEncodedAnnotation();
 
   bool CheckIntraClassDataItem();
+  // Check all fields of the given type from the given iterator. Load the class data from the first
+  // field, if necessary (and return it), or use the given values.
+  template <bool kStatic>
+  bool CheckIntraClassDataItemFields(ClassDataItemIterator* it,
+                                     bool* have_class,
+                                     uint16_t* class_type_index,
+                                     uint32_t* class_access_flags);
+  // Check all methods of the given type from the given iterator. Load the class data from the first
+  // method, if necessary (and return it), or use the given values.
+  template <bool kDirect>
+  bool CheckIntraClassDataItemMethods(ClassDataItemIterator* it,
+                                      std::unordered_set<uint32_t>* direct_method_indexes,
+                                      bool* have_class,
+                                      uint16_t* class_type_index,
+                                      uint32_t* class_access_flags);
+
   bool CheckIntraCodeItem();
   bool CheckIntraStringDataItem();
   bool CheckIntraDebugInfoItem();
@@ -112,6 +144,31 @@
   void ErrorStringPrintf(const char* fmt, ...)
       __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
 
+  // Retrieve class index and class access flag from the given member. index is the member index,
+  // which is taken as either a field or a method index (as designated by is_field). The result,
+  // if the member and declaring class could be found, is stored in class_type_index and
+  // class_access_flags.
+  // This is an expensive lookup, as we have to find the class-def by type index, which is a
+  // linear search. The output values should thus be cached by the caller.
+  bool FindClassFlags(uint32_t index,
+                      bool is_field,
+                      uint16_t* class_type_index,
+                      uint32_t* class_access_flags);
+
+  // Check validity of the given access flags, interpreted for a field in the context of a class
+  // with the given second access flags.
+  static bool CheckFieldAccessFlags(uint32_t field_access_flags,
+                                    uint32_t class_access_flags,
+                                    std::string* error_msg);
+  // Check validity of the given method and access flags, in the context of a class with the given
+  // second access flags.
+  bool CheckMethodAccessFlags(uint32_t method_index,
+                              uint32_t method_access_flags,
+                              uint32_t class_access_flags,
+                              bool has_code,
+                              bool expect_direct,
+                              std::string* error_msg);
+
   const DexFile* const dex_file_;
   const uint8_t* const begin_;
   const size_t size_;
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 9f1ffec..1b529c9 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -18,18 +18,20 @@
 
 #include "sys/mman.h"
 #include "zlib.h"
+#include <functional>
 #include <memory>
 
 #include "base/unix_file/fd_file.h"
+#include "base/bit_utils.h"
 #include "base/macros.h"
 #include "common_runtime_test.h"
+#include "dex_file-inl.h"
+#include "leb128.h"
 #include "scoped_thread_state_change.h"
 #include "thread-inl.h"
 
 namespace art {
 
-class DexFileVerifierTest : public CommonRuntimeTest {};
-
 static const uint8_t kBase64Map[256] = {
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
   255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
@@ -101,6 +103,64 @@
   return dst.release();
 }
 
+static void FixUpChecksum(uint8_t* dex_file) {
+  DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
+  uint32_t expected_size = header->file_size_;
+  uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
+  const uint32_t non_sum = sizeof(DexFile::Header::magic_) + sizeof(DexFile::Header::checksum_);
+  const uint8_t* non_sum_ptr = dex_file + non_sum;
+  adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
+  header->checksum_ = adler_checksum;
+}
+
+// Custom deleter. Necessary to clean up the memory we use (to be able to mutate).
+struct DexFileDeleter {
+  void operator()(DexFile* in) {
+    if (in != nullptr) {
+      delete in->Begin();
+      delete in;
+    }
+  }
+};
+
+using DexFileUniquePtr = std::unique_ptr<DexFile, DexFileDeleter>;
+
+class DexFileVerifierTest : public CommonRuntimeTest {
+ protected:
+  void VerifyModification(const char* dex_file_base64_content,
+                          const char* location,
+                          std::function<void(DexFile*)> f,
+                          const char* expected_error) {
+    DexFileUniquePtr dex_file(WrapAsDexFile(dex_file_base64_content));
+    f(dex_file.get());
+    FixUpChecksum(const_cast<uint8_t*>(dex_file->Begin()));
+
+    std::string error_msg;
+    bool success = DexFileVerifier::Verify(dex_file.get(),
+                                           dex_file->Begin(),
+                                           dex_file->Size(),
+                                           location,
+                                           &error_msg);
+    if (expected_error == nullptr) {
+      EXPECT_TRUE(success) << error_msg;
+    } else {
+      EXPECT_FALSE(success) << "Expected " << expected_error;
+      if (!success) {
+        EXPECT_NE(error_msg.find(expected_error), std::string::npos) << error_msg;
+      }
+    }
+  }
+
+ private:
+  static DexFile* WrapAsDexFile(const char* dex_file_content_in_base_64) {
+    // Decode base64.
+    size_t length;
+    uint8_t* dex_bytes = DecodeBase64(dex_file_content_in_base_64, &length);
+    CHECK(dex_bytes != nullptr);
+    return new DexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr);
+  }
+};
+
 static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
                                                         const char* location,
                                                         std::string* error_msg) {
@@ -133,7 +193,6 @@
   return dex_file;
 }
 
-
 // For reference.
 static const char kGoodTestDex[] =
     "ZGV4CjAzNQDrVbyVkxX1HljTznNf95AglkUAhQuFtmKkAgAAcAAAAHhWNBIAAAAAAAAAAAQCAAAN"
@@ -157,92 +216,1003 @@
   ASSERT_TRUE(raw.get() != nullptr) << error_msg;
 }
 
-static void FixUpChecksum(uint8_t* dex_file) {
-  DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
-  uint32_t expected_size = header->file_size_;
-  uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
-  const uint32_t non_sum = sizeof(DexFile::Header::magic_) + sizeof(DexFile::Header::checksum_);
-  const uint8_t* non_sum_ptr = dex_file + non_sum;
-  adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
-  header->checksum_ = adler_checksum;
-}
-
-static std::unique_ptr<const DexFile> FixChecksumAndOpen(uint8_t* bytes, size_t length,
-                                                         const char* location,
-                                                         std::string* error_msg) {
-  // Check data.
-  CHECK(bytes != nullptr);
-
-  // Fixup of checksum.
-  FixUpChecksum(bytes);
-
-  // write to provided file
-  std::unique_ptr<File> file(OS::CreateEmptyFile(location));
-  CHECK(file.get() != nullptr);
-  if (!file->WriteFully(bytes, length)) {
-    PLOG(FATAL) << "Failed to write base64 as dex file";
-  }
-  if (file->FlushCloseOrErase() != 0) {
-    PLOG(FATAL) << "Could not flush and close test file.";
-  }
-  file.reset();
-
-  // read dex file
-  ScopedObjectAccess soa(Thread::Current());
-  std::vector<std::unique_ptr<const DexFile>> tmp;
-  if (!DexFile::Open(location, location, error_msg, &tmp)) {
-    return nullptr;
-  }
-  EXPECT_EQ(1U, tmp.size());
-  std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
-  EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
-  EXPECT_TRUE(dex_file->IsReadOnly());
-  return dex_file;
-}
-
-static bool ModifyAndLoad(const char* dex_file_content, const char* location, size_t offset,
-                          uint8_t new_val, std::string* error_msg) {
-  // Decode base64.
-  size_t length;
-  std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_content, &length));
-  CHECK(dex_bytes.get() != nullptr);
-
-  // Make modifications.
-  dex_bytes.get()[offset] = new_val;
-
-  // Fixup and load.
-  std::unique_ptr<const DexFile> file(FixChecksumAndOpen(dex_bytes.get(), length, location,
-                                                         error_msg));
-  return file.get() != nullptr;
-}
-
 TEST_F(DexFileVerifierTest, MethodId) {
-  {
-    // Class error.
-    ScratchFile tmp;
-    std::string error_msg;
-    bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 220, 0xFFU, &error_msg);
-    ASSERT_TRUE(success);
-    ASSERT_NE(error_msg.find("inter_method_id_item class_idx"), std::string::npos) << error_msg;
+  // Class idx error.
+  VerifyModification(
+      kGoodTestDex,
+      "method_id_class_idx",
+      [](DexFile* dex_file) {
+        DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+        method_id->class_idx_ = 0xFF;
+      },
+      "could not find declaring class for direct method index 0");
+
+  // Proto idx error.
+  VerifyModification(
+      kGoodTestDex,
+      "method_id_proto_idx",
+      [](DexFile* dex_file) {
+        DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+        method_id->proto_idx_ = 0xFF;
+      },
+      "inter_method_id_item proto_idx");
+
+  // Name idx error.
+  VerifyModification(
+      kGoodTestDex,
+      "method_id_name_idx",
+      [](DexFile* dex_file) {
+        DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+        method_id->name_idx_ = 0xFF;
+      },
+      "String index not available for method flags verification");
+}
+
+// Method flags test class generated from the following smali code. The declared-synchronized
+// flags are there to enforce a 3-byte uLEB128 encoding so we don't have to relayout
+// the code, but we need to remove them before doing tests.
+//
+// .class public LMethodFlags;
+// .super Ljava/lang/Object;
+//
+// .method public static constructor <clinit>()V
+// .registers 1
+//     return-void
+// .end method
+//
+// .method public constructor <init>()V
+// .registers 1
+//     return-void
+// .end method
+//
+// .method private declared-synchronized foo()V
+// .registers 1
+//     return-void
+// .end method
+//
+// .method public declared-synchronized bar()V
+// .registers 1
+//     return-void
+// .end method
+
+static const char kMethodFlagsTestDex[] =
+    "ZGV4CjAzNQCyOQrJaDBwiIWv5MIuYKXhxlLLsQcx5SwgAgAAcAAAAHhWNBIAAAAAAAAAAJgBAAAH"
+    "AAAAcAAAAAMAAACMAAAAAQAAAJgAAAAAAAAAAAAAAAQAAACkAAAAAQAAAMQAAAA8AQAA5AAAAOQA"
+    "AADuAAAA9gAAAAUBAAAZAQAAHAEAACEBAAACAAAAAwAAAAQAAAAEAAAAAgAAAAAAAAAAAAAAAAAA"
+    "AAAAAAABAAAAAAAAAAUAAAAAAAAABgAAAAAAAAABAAAAAQAAAAAAAAD/////AAAAAHoBAAAAAAAA"
+    "CDxjbGluaXQ+AAY8aW5pdD4ADUxNZXRob2RGbGFnczsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgAD"
+    "YmFyAANmb28AAAAAAAAAAQAAAAAAAAAAAAAAAQAAAA4AAAABAAEAAAAAAAAAAAABAAAADgAAAAEA"
+    "AQAAAAAAAAAAAAEAAAAOAAAAAQABAAAAAAAAAAAAAQAAAA4AAAADAQCJgASsAgGBgATAAgKCgAjU"
+    "AgKBgAjoAgAACwAAAAAAAAABAAAAAAAAAAEAAAAHAAAAcAAAAAIAAAADAAAAjAAAAAMAAAABAAAA"
+    "mAAAAAUAAAAEAAAApAAAAAYAAAABAAAAxAAAAAIgAAAHAAAA5AAAAAMQAAABAAAAKAEAAAEgAAAE"
+    "AAAALAEAAAAgAAABAAAAegEAAAAQAAABAAAAmAEAAA==";
+
+// Find the method data for the first method with the given name (from class 0). Note: the pointer
+// is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index
+// delta.
+static const uint8_t* FindMethodData(const DexFile* dex_file, const char* name) {
+  const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
+  const uint8_t* class_data = dex_file->GetClassData(class_def);
+
+  ClassDataItemIterator it(*dex_file, class_data);
+
+  const uint8_t* trailing = class_data;
+  // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
+  // element has already been loaded into the iterator.
+  DecodeUnsignedLeb128(&trailing);
+  DecodeUnsignedLeb128(&trailing);
+  DecodeUnsignedLeb128(&trailing);
+  DecodeUnsignedLeb128(&trailing);
+
+  // Skip all fields.
+  while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+    trailing = it.DataPointer();
+    it.Next();
   }
 
-  {
-    // Proto error.
-    ScratchFile tmp;
-    std::string error_msg;
-    bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 222, 0xFFU, &error_msg);
-    ASSERT_TRUE(success);
-    ASSERT_NE(error_msg.find("inter_method_id_item proto_idx"), std::string::npos) << error_msg;
+  while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+    uint32_t method_index = it.GetMemberIndex();
+    uint32_t name_index = dex_file->GetMethodId(method_index).name_idx_;
+    const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+    const char* str = dex_file->GetStringData(string_id);
+    if (strcmp(name, str) == 0) {
+      DecodeUnsignedLeb128(&trailing);
+      return trailing;
+    }
+
+    trailing = it.DataPointer();
+    it.Next();
   }
 
-  {
-    // Name error.
-    ScratchFile tmp;
-    std::string error_msg;
-    bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 224, 0xFFU, &error_msg);
-    ASSERT_TRUE(success);
-    ASSERT_NE(error_msg.find("inter_method_id_item name_idx"), std::string::npos) << error_msg;
+  return nullptr;
+}
+
+// Set the method flags to the given value.
+static void SetMethodFlags(DexFile* dex_file, const char* method, uint32_t mask) {
+  uint8_t* method_flags_ptr = const_cast<uint8_t*>(FindMethodData(dex_file, method));
+  CHECK(method_flags_ptr != nullptr) << method;
+
+    // Unroll this, as we only have three bytes, anyways.
+  uint8_t base1 = static_cast<uint8_t>(mask & 0x7F);
+  *(method_flags_ptr++) = (base1 | 0x80);
+  mask >>= 7;
+
+  uint8_t base2 = static_cast<uint8_t>(mask & 0x7F);
+  *(method_flags_ptr++) = (base2 | 0x80);
+  mask >>= 7;
+
+  uint8_t base3 = static_cast<uint8_t>(mask & 0x7F);
+  *method_flags_ptr = base3;
+}
+
+static uint32_t GetMethodFlags(DexFile* dex_file, const char* method) {
+  const uint8_t* method_flags_ptr = const_cast<uint8_t*>(FindMethodData(dex_file, method));
+  CHECK(method_flags_ptr != nullptr) << method;
+  return DecodeUnsignedLeb128(&method_flags_ptr);
+}
+
+// Apply the given mask to method flags.
+static void ApplyMaskToMethodFlags(DexFile* dex_file, const char* method, uint32_t mask) {
+  uint32_t value = GetMethodFlags(dex_file, method);
+  value &= mask;
+  SetMethodFlags(dex_file, method, value);
+}
+
+// Apply the given mask to method flags.
+static void OrMaskToMethodFlags(DexFile* dex_file, const char* method, uint32_t mask) {
+  uint32_t value = GetMethodFlags(dex_file, method);
+  value |= mask;
+  SetMethodFlags(dex_file, method, value);
+}
+
+// Set code_off to 0 for the method.
+static void RemoveCode(DexFile* dex_file, const char* method) {
+  const uint8_t* ptr = FindMethodData(dex_file, method);
+  // Next is flags, pass.
+  DecodeUnsignedLeb128(&ptr);
+
+  // Figure out how many bytes the code_off is.
+  const uint8_t* tmp = ptr;
+  DecodeUnsignedLeb128(&tmp);
+  size_t bytes = tmp - ptr;
+
+  uint8_t* mod = const_cast<uint8_t*>(ptr);
+  for (size_t i = 1; i < bytes; ++i) {
+    *(mod++) = 0x80;
   }
+  *mod = 0x00;
+}
+
+TEST_F(DexFileVerifierTest, MethodAccessFlagsBase) {
+  // Check that it's OK when the wrong declared-synchronized flag is removed from "foo."
+  VerifyModification(
+      kMethodFlagsTestDex,
+      "method_flags_ok",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+        ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+      },
+      nullptr);
+}
+
+TEST_F(DexFileVerifierTest, MethodAccessFlagsConstructors) {
+  // Make sure we still accept constructors without their flags.
+  VerifyModification(
+      kMethodFlagsTestDex,
+      "method_flags_missing_constructor_tag_ok",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+        ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccConstructor);
+        ApplyMaskToMethodFlags(dex_file, "<clinit>", ~kAccConstructor);
+      },
+      nullptr);
+
+  constexpr const char* kConstructors[] = { "<clinit>", "<init>"};
+  for (size_t i = 0; i < 2; ++i) {
+    // Constructor with code marked native.
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_constructor_native",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kConstructors[i], kAccNative);
+        },
+        "has code, but is marked native or abstract");
+    // Constructor with code marked abstract.
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_constructor_abstract",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kConstructors[i], kAccAbstract);
+        },
+        "has code, but is marked native or abstract");
+    // Constructor as-is without code.
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_constructor_nocode",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          RemoveCode(dex_file, kConstructors[i]);
+        },
+        "has no code, but is not marked native or abstract");
+    // Constructor without code marked native.
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_constructor_native_nocode",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kConstructors[i], kAccNative);
+          RemoveCode(dex_file, kConstructors[i]);
+        },
+        "must not be abstract or native");
+    // Constructor without code marked abstract.
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_constructor_abstract_nocode",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kConstructors[i], kAccAbstract);
+          RemoveCode(dex_file, kConstructors[i]);
+        },
+        "must not be abstract or native");
+  }
+  // <init> may only have (modulo ignored):
+  // kAccPrivate | kAccProtected | kAccPublic | kAccStrict | kAccVarargs | kAccSynthetic
+  static constexpr uint32_t kInitAllowed[] = {
+      0,
+      kAccPrivate,
+      kAccProtected,
+      kAccPublic,
+      kAccStrict,
+      kAccVarargs,
+      kAccSynthetic
+  };
+  for (size_t i = 0; i < arraysize(kInitAllowed); ++i) {
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "init_allowed_flags",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+          OrMaskToMethodFlags(dex_file, "<init>", kInitAllowed[i]);
+        },
+        nullptr);
+  }
+  // Only one of public-private-protected.
+  for (size_t i = 1; i < 8; ++i) {
+    if (POPCOUNT(i) < 2) {
+      continue;
+    }
+    // Technically the flags match, but just be defensive here.
+    uint32_t mask = ((i & 1) != 0 ? kAccPrivate : 0) |
+                    ((i & 2) != 0 ? kAccProtected : 0) |
+                    ((i & 4) != 0 ? kAccPublic : 0);
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "init_one_of_ppp",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+          OrMaskToMethodFlags(dex_file, "<init>", mask);
+        },
+        "Method may have only one of public/protected/private");
+  }
+  // <init> doesn't allow
+  // kAccStatic | kAccFinal | kAccSynchronized | kAccBridge
+  // Need to handle static separately as it has its own error message.
+  VerifyModification(
+      kMethodFlagsTestDex,
+      "init_not_allowed_flags",
+      [&](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+        ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+        OrMaskToMethodFlags(dex_file, "<init>", kAccStatic);
+      },
+      "Constructor 1 is not flagged correctly wrt/ static");
+  static constexpr uint32_t kInitNotAllowed[] = {
+      kAccFinal,
+      kAccSynchronized,
+      kAccBridge
+  };
+  for (size_t i = 0; i < arraysize(kInitNotAllowed); ++i) {
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "init_not_allowed_flags",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
+          OrMaskToMethodFlags(dex_file, "<init>", kInitNotAllowed[i]);
+        },
+        "Constructor 1 flagged inappropriately");
+  }
+}
+
+TEST_F(DexFileVerifierTest, MethodAccessFlagsMethods) {
+  constexpr const char* kMethods[] = { "foo", "bar"};
+  for (size_t i = 0; i < arraysize(kMethods); ++i) {
+    // Make sure we reject non-constructors marked as constructors.
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_non_constructor",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kMethods[i], kAccConstructor);
+        },
+        "is marked constructor, but doesn't match name");
+
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_native_with_code",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kMethods[i], kAccNative);
+        },
+        "has code, but is marked native or abstract");
+
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_abstract_with_code",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kMethods[i], kAccAbstract);
+        },
+        "has code, but is marked native or abstract");
+
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_non_abstract_native_no_code",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          RemoveCode(dex_file, kMethods[i]);
+        },
+        "has no code, but is not marked native or abstract");
+
+    // Abstract methods may not have the following flags.
+    constexpr uint32_t kAbstractDisallowed[] = {
+        kAccPrivate,
+        kAccStatic,
+        kAccFinal,
+        kAccNative,
+        kAccStrict,
+        kAccSynchronized,
+    };
+    for (size_t j = 0; j < arraysize(kAbstractDisallowed); ++j) {
+      VerifyModification(
+          kMethodFlagsTestDex,
+          "method_flags_abstract_and_disallowed_no_code",
+          [&](DexFile* dex_file) {
+            ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+            ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+            RemoveCode(dex_file, kMethods[i]);
+
+            // Can't check private and static with foo, as it's in the virtual list and gives a
+            // different error.
+            if (((GetMethodFlags(dex_file, kMethods[i]) & kAccPublic) != 0) &&
+                ((kAbstractDisallowed[j] & (kAccPrivate | kAccStatic)) != 0)) {
+              // Use another breaking flag.
+              OrMaskToMethodFlags(dex_file, kMethods[i], kAccAbstract | kAccFinal);
+            } else {
+              OrMaskToMethodFlags(dex_file, kMethods[i], kAccAbstract | kAbstractDisallowed[j]);
+            }
+          },
+          "has disallowed access flags");
+    }
+
+    // Only one of public-private-protected.
+    for (size_t j = 1; j < 8; ++j) {
+      if (POPCOUNT(j) < 2) {
+        continue;
+      }
+      // Technically the flags match, but just be defensive here.
+      uint32_t mask = ((j & 1) != 0 ? kAccPrivate : 0) |
+                      ((j & 2) != 0 ? kAccProtected : 0) |
+                      ((j & 4) != 0 ? kAccPublic : 0);
+      VerifyModification(
+          kMethodFlagsTestDex,
+          "method_flags_one_of_ppp",
+          [&](DexFile* dex_file) {
+            ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+            ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+            ApplyMaskToMethodFlags(dex_file, kMethods[i], ~kAccPublic);
+            OrMaskToMethodFlags(dex_file, kMethods[i], mask);
+          },
+          "Method may have only one of public/protected/private");
+    }
+  }
+}
+
+TEST_F(DexFileVerifierTest, MethodAccessFlagsIgnoredOK) {
+  constexpr const char* kMethods[] = { "<clinit>", "<init>", "foo", "bar"};
+  for (size_t i = 0; i < arraysize(kMethods); ++i) {
+    // All interesting method flags, other flags are to be ignored.
+    constexpr uint32_t kAllMethodFlags =
+        kAccPublic |
+        kAccPrivate |
+        kAccProtected |
+        kAccStatic |
+        kAccFinal |
+        kAccSynchronized |
+        kAccBridge |
+        kAccVarargs |
+        kAccNative |
+        kAccAbstract |
+        kAccStrict |
+        kAccSynthetic;
+    constexpr uint32_t kIgnoredMask = ~kAllMethodFlags & 0xFFFF;
+    VerifyModification(
+        kMethodFlagsTestDex,
+        "method_flags_ignored",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToMethodFlags(dex_file, kMethods[i], kIgnoredMask);
+        },
+        nullptr);
+  }
+}
+
+// Set of dex files for interface method tests. As it's not as easy to mutate method names, it's
+// just easier to break up bad cases.
+
+// Interface with an instance constructor.
+//
+// .class public interface LInterfaceMethodFlags;
+// .super Ljava/lang/Object;
+//
+// .method public static constructor <clinit>()V
+// .registers 1
+//     return-void
+// .end method
+//
+// .method public constructor <init>()V
+// .registers 1
+//     return-void
+// .end method
+static const char kMethodFlagsInterfaceWithInit[] =
+    "ZGV4CjAzNQDRNt+hZ6X3I+xe66iVlCW7h9I38HmN4SvUAQAAcAAAAHhWNBIAAAAAAAAAAEwBAAAF"
+    "AAAAcAAAAAMAAACEAAAAAQAAAJAAAAAAAAAAAAAAAAIAAACcAAAAAQAAAKwAAAAIAQAAzAAAAMwA"
+    "AADWAAAA3gAAAPYAAAAKAQAAAgAAAAMAAAAEAAAABAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAQAA"
+    "AAAAAAABAgAAAQAAAAAAAAD/////AAAAADoBAAAAAAAACDxjbGluaXQ+AAY8aW5pdD4AFkxJbnRl"
+    "cmZhY2VNZXRob2RGbGFnczsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgAAAAAAAAAAAQAAAAAAAAAA"
+    "AAAAAQAAAA4AAAABAAEAAAAAAAAAAAABAAAADgAAAAIAAImABJQCAYGABKgCAAALAAAAAAAAAAEA"
+    "AAAAAAAAAQAAAAUAAABwAAAAAgAAAAMAAACEAAAAAwAAAAEAAACQAAAABQAAAAIAAACcAAAABgAA"
+    "AAEAAACsAAAAAiAAAAUAAADMAAAAAxAAAAEAAAAQAQAAASAAAAIAAAAUAQAAACAAAAEAAAA6AQAA"
+    "ABAAAAEAAABMAQAA";
+
+// Standard interface. Use declared-synchronized again for 3B encoding.
+//
+// .class public interface LInterfaceMethodFlags;
+// .super Ljava/lang/Object;
+//
+// .method public static constructor <clinit>()V
+// .registers 1
+//     return-void
+// .end method
+//
+// .method public abstract declared-synchronized foo()V
+// .end method
+static const char kMethodFlagsInterface[] =
+    "ZGV4CjAzNQCOM0odZ5bws1d9GSmumXaK5iE/7XxFpOm8AQAAcAAAAHhWNBIAAAAAAAAAADQBAAAF"
+    "AAAAcAAAAAMAAACEAAAAAQAAAJAAAAAAAAAAAAAAAAIAAACcAAAAAQAAAKwAAADwAAAAzAAAAMwA"
+    "AADWAAAA7gAAAAIBAAAFAQAAAQAAAAIAAAADAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAAAABAAA"
+    "AAAAAAABAgAAAQAAAAAAAAD/////AAAAACIBAAAAAAAACDxjbGluaXQ+ABZMSW50ZXJmYWNlTWV0"
+    "aG9kRmxhZ3M7ABJMamF2YS9sYW5nL09iamVjdDsAAVYAA2ZvbwAAAAAAAAABAAAAAAAAAAAAAAAB"
+    "AAAADgAAAAEBAImABJACAYGICAAAAAALAAAAAAAAAAEAAAAAAAAAAQAAAAUAAABwAAAAAgAAAAMA"
+    "AACEAAAAAwAAAAEAAACQAAAABQAAAAIAAACcAAAABgAAAAEAAACsAAAAAiAAAAUAAADMAAAAAxAA"
+    "AAEAAAAMAQAAASAAAAEAAAAQAQAAACAAAAEAAAAiAQAAABAAAAEAAAA0AQAA";
+
+// To simplify generation of interesting "sub-states" of src_value, allow a "simple" mask to apply
+// to a src_value, such that mask bit 0 applies to the lowest set bit in src_value, and so on.
+static uint32_t ApplyMaskShifted(uint32_t src_value, uint32_t mask) {
+  uint32_t result = 0;
+  uint32_t mask_index = 0;
+  while (src_value != 0) {
+    uint32_t index = CTZ(src_value);
+    if (((src_value & (1 << index)) != 0) &&
+        ((mask & (1 << mask_index)) != 0)) {
+      result |= (1 << index);
+    }
+    src_value &= ~(1 << index);
+    mask_index++;
+  }
+  return result;
+}
+
+TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
+  // Reject interface with <init>.
+  VerifyModification(
+      kMethodFlagsInterfaceWithInit,
+      "method_flags_interface_with_init",
+      [](DexFile* dex_file ATTRIBUTE_UNUSED) {},
+      "Non-clinit interface method 1 should not have code");
+
+  VerifyModification(
+      kMethodFlagsInterface,
+      "method_flags_interface_ok",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+      },
+      nullptr);
+
+  VerifyModification(
+      kMethodFlagsInterface,
+      "method_flags_interface_non_public",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+      },
+      "Interface method 1 is not public and abstract");
+  VerifyModification(
+      kMethodFlagsInterface,
+      "method_flags_interface_non_abstract",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccAbstract);
+      },
+      "Method 1 has no code, but is not marked native or abstract");
+
+  VerifyModification(
+      kMethodFlagsInterface,
+      "method_flags_interface_static",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        OrMaskToMethodFlags(dex_file, "foo", kAccStatic);
+      },
+      "Direct/virtual method 1 not in expected list 0");
+  VerifyModification(
+      kMethodFlagsInterface,
+      "method_flags_interface_private",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+        OrMaskToMethodFlags(dex_file, "foo", kAccPrivate);
+      },
+      "Direct/virtual method 1 not in expected list 0");
+
+  VerifyModification(
+      kMethodFlagsInterface,
+      "method_flags_interface_non_public",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+      },
+      "Interface method 1 is not public and abstract");
+  VerifyModification(
+      kMethodFlagsInterface,
+      "method_flags_interface_protected",
+      [](DexFile* dex_file) {
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+        OrMaskToMethodFlags(dex_file, "foo", kAccProtected);
+      },
+      "Interface method 1 is not public and abstract");
+
+  constexpr uint32_t kAllMethodFlags =
+      kAccPublic |
+      kAccPrivate |
+      kAccProtected |
+      kAccStatic |
+      kAccFinal |
+      kAccSynchronized |
+      kAccBridge |
+      kAccVarargs |
+      kAccNative |
+      kAccAbstract |
+      kAccStrict |
+      kAccSynthetic;
+  constexpr uint32_t kInterfaceMethodFlags =
+      kAccPublic | kAccAbstract | kAccVarargs | kAccBridge | kAccSynthetic;
+  constexpr uint32_t kInterfaceDisallowed = kAllMethodFlags &
+                                            ~kInterfaceMethodFlags &
+                                            // Already tested, needed to be separate.
+                                            ~kAccStatic &
+                                            ~kAccPrivate &
+                                            ~kAccProtected;
+  static_assert(kInterfaceDisallowed != 0, "There should be disallowed flags.");
+
+  uint32_t bits = POPCOUNT(kInterfaceDisallowed);
+  for (uint32_t i = 1; i < (1u << bits); ++i) {
+    VerifyModification(
+        kMethodFlagsInterface,
+        "method_flags_interface_non_abstract",
+        [&](DexFile* dex_file) {
+          ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+          uint32_t mask = ApplyMaskShifted(kInterfaceDisallowed, i);
+          if ((mask & kAccProtected) != 0) {
+            mask &= ~kAccProtected;
+            ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
+          }
+          OrMaskToMethodFlags(dex_file, "foo", mask);
+        },
+        "Abstract method 1 has disallowed access flags");
+  }
+}
+
+///////////////////////////////////////////////////////////////////
+
+// Field flags.
+
+// Find the method data for the first method with the given name (from class 0). Note: the pointer
+// is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index
+// delta.
+static const uint8_t* FindFieldData(const DexFile* dex_file, const char* name) {
+  const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
+  const uint8_t* class_data = dex_file->GetClassData(class_def);
+
+  ClassDataItemIterator it(*dex_file, class_data);
+
+  const uint8_t* trailing = class_data;
+  // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
+  // element has already been loaded into the iterator.
+  DecodeUnsignedLeb128(&trailing);
+  DecodeUnsignedLeb128(&trailing);
+  DecodeUnsignedLeb128(&trailing);
+  DecodeUnsignedLeb128(&trailing);
+
+  while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+    uint32_t field_index = it.GetMemberIndex();
+    uint32_t name_index = dex_file->GetFieldId(field_index).name_idx_;
+    const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+    const char* str = dex_file->GetStringData(string_id);
+    if (strcmp(name, str) == 0) {
+      DecodeUnsignedLeb128(&trailing);
+      return trailing;
+    }
+
+    trailing = it.DataPointer();
+    it.Next();
+  }
+
+  return nullptr;
+}
+
+// Set the method flags to the given value.
+static void SetFieldFlags(DexFile* dex_file, const char* field, uint32_t mask) {
+  uint8_t* field_flags_ptr = const_cast<uint8_t*>(FindFieldData(dex_file, field));
+  CHECK(field_flags_ptr != nullptr) << field;
+
+    // Unroll this, as we only have three bytes, anyways.
+  uint8_t base1 = static_cast<uint8_t>(mask & 0x7F);
+  *(field_flags_ptr++) = (base1 | 0x80);
+  mask >>= 7;
+
+  uint8_t base2 = static_cast<uint8_t>(mask & 0x7F);
+  *(field_flags_ptr++) = (base2 | 0x80);
+  mask >>= 7;
+
+  uint8_t base3 = static_cast<uint8_t>(mask & 0x7F);
+  *field_flags_ptr = base3;
+}
+
+static uint32_t GetFieldFlags(DexFile* dex_file, const char* field) {
+  const uint8_t* field_flags_ptr = const_cast<uint8_t*>(FindFieldData(dex_file, field));
+  CHECK(field_flags_ptr != nullptr) << field;
+  return DecodeUnsignedLeb128(&field_flags_ptr);
+}
+
+// Apply the given mask to method flags.
+static void ApplyMaskToFieldFlags(DexFile* dex_file, const char* field, uint32_t mask) {
+  uint32_t value = GetFieldFlags(dex_file, field);
+  value &= mask;
+  SetFieldFlags(dex_file, field, value);
+}
+
+// Apply the given mask to method flags.
+static void OrMaskToFieldFlags(DexFile* dex_file, const char* field, uint32_t mask) {
+  uint32_t value = GetFieldFlags(dex_file, field);
+  value |= mask;
+  SetFieldFlags(dex_file, field, value);
+}
+
+// Standard class. Use declared-synchronized again for 3B encoding.
+//
+// .class public LFieldFlags;
+// .super Ljava/lang/Object;
+//
+// .field declared-synchronized public foo:I
+//
+// .field declared-synchronized public static bar:I
+
+static const char kFieldFlagsTestDex[] =
+    "ZGV4CjAzNQBtLw7hydbfv4TdXidZyzAB70W7w3vnYJRwAQAAcAAAAHhWNBIAAAAAAAAAAAABAAAF"
+    "AAAAcAAAAAMAAACEAAAAAAAAAAAAAAACAAAAkAAAAAAAAAAAAAAAAQAAAKAAAACwAAAAwAAAAMAA"
+    "AADDAAAA0QAAAOUAAADqAAAAAAAAAAEAAAACAAAAAQAAAAMAAAABAAAABAAAAAEAAAABAAAAAgAA"
+    "AAAAAAD/////AAAAAPQAAAAAAAAAAUkADExGaWVsZEZsYWdzOwASTGphdmEvbGFuZy9PYmplY3Q7"
+    "AANiYXIAA2ZvbwAAAAAAAAEBAAAAiYAIAYGACAkAAAAAAAAAAQAAAAAAAAABAAAABQAAAHAAAAAC"
+    "AAAAAwAAAIQAAAAEAAAAAgAAAJAAAAAGAAAAAQAAAKAAAAACIAAABQAAAMAAAAADEAAAAQAAAPAA"
+    "AAAAIAAAAQAAAPQAAAAAEAAAAQAAAAABAAA=";
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsBase) {
+  // Check that it's OK when the wrong declared-synchronized flag is removed from "foo."
+  VerifyModification(
+      kFieldFlagsTestDex,
+      "field_flags_ok",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+        ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+      },
+      nullptr);
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsWrongList) {
+  // Mark the field so that it should appear in the opposite list (instance vs static).
+  VerifyModification(
+      kFieldFlagsTestDex,
+      "field_flags_wrong_list",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+        ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+        OrMaskToFieldFlags(dex_file, "foo", kAccStatic);
+      },
+      "Static/instance field not in expected list");
+  VerifyModification(
+      kFieldFlagsTestDex,
+      "field_flags_wrong_list",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+        ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToFieldFlags(dex_file, "bar", ~kAccStatic);
+      },
+      "Static/instance field not in expected list");
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsPPP) {
+  static const char* kFields[] = { "foo", "bar" };
+  for (size_t i = 0; i < arraysize(kFields); ++i) {
+    // Should be OK to remove public.
+    VerifyModification(
+        kFieldFlagsTestDex,
+        "field_flags_non_public",
+        [&](DexFile* dex_file) {
+          ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          ApplyMaskToFieldFlags(dex_file, kFields[i], ~kAccPublic);
+        },
+        nullptr);
+    constexpr uint32_t kAccFlags = kAccPublic | kAccPrivate | kAccProtected;
+    uint32_t bits = POPCOUNT(kAccFlags);
+    for (uint32_t j = 1; j < (1u << bits); ++j) {
+      if (POPCOUNT(j) < 2) {
+        continue;
+      }
+      VerifyModification(
+           kFieldFlagsTestDex,
+           "field_flags_ppp",
+           [&](DexFile* dex_file) {
+             ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+             ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+             ApplyMaskToFieldFlags(dex_file, kFields[i], ~kAccPublic);
+             uint32_t mask = ApplyMaskShifted(kAccFlags, j);
+             OrMaskToFieldFlags(dex_file, kFields[i], mask);
+           },
+           "Field may have only one of public/protected/private");
+    }
+  }
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsIgnoredOK) {
+  constexpr const char* kFields[] = { "foo", "bar"};
+  for (size_t i = 0; i < arraysize(kFields); ++i) {
+    // All interesting method flags, other flags are to be ignored.
+    constexpr uint32_t kAllFieldFlags =
+        kAccPublic |
+        kAccPrivate |
+        kAccProtected |
+        kAccStatic |
+        kAccFinal |
+        kAccVolatile |
+        kAccTransient |
+        kAccSynthetic |
+        kAccEnum;
+    constexpr uint32_t kIgnoredMask = ~kAllFieldFlags & 0xFFFF;
+    VerifyModification(
+        kFieldFlagsTestDex,
+        "field_flags_ignored",
+        [&](DexFile* dex_file) {
+          ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToFieldFlags(dex_file, kFields[i], kIgnoredMask);
+        },
+        nullptr);
+  }
+}
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsVolatileFinal) {
+  constexpr const char* kFields[] = { "foo", "bar"};
+  for (size_t i = 0; i < arraysize(kFields); ++i) {
+    VerifyModification(
+        kFieldFlagsTestDex,
+        "field_flags_final_and_volatile",
+        [&](DexFile* dex_file) {
+          ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+          ApplyMaskToFieldFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
+
+          OrMaskToFieldFlags(dex_file, kFields[i], kAccVolatile | kAccFinal);
+        },
+        "Fields may not be volatile and final");
+  }
+}
+
+// Standard interface. Needs to be separate from class as interfaces do not allow instance fields.
+// Use declared-synchronized again for 3B encoding.
+//
+// .class public interface LInterfaceFieldFlags;
+// .super Ljava/lang/Object;
+//
+// .field declared-synchronized public static final foo:I
+
+static const char kFieldFlagsInterfaceTestDex[] =
+    "ZGV4CjAzNQCVMHfEimR1zZPk6hl6O9GPAYqkl3u0umFkAQAAcAAAAHhWNBIAAAAAAAAAAPQAAAAE"
+    "AAAAcAAAAAMAAACAAAAAAAAAAAAAAAABAAAAjAAAAAAAAAAAAAAAAQAAAJQAAACwAAAAtAAAALQA"
+    "AAC3AAAAzgAAAOIAAAAAAAAAAQAAAAIAAAABAAAAAwAAAAEAAAABAgAAAgAAAAAAAAD/////AAAA"
+    "AOwAAAAAAAAAAUkAFUxJbnRlcmZhY2VGaWVsZEZsYWdzOwASTGphdmEvbGFuZy9PYmplY3Q7AANm"
+    "b28AAAAAAAABAAAAAJmACAkAAAAAAAAAAQAAAAAAAAABAAAABAAAAHAAAAACAAAAAwAAAIAAAAAE"
+    "AAAAAQAAAIwAAAAGAAAAAQAAAJQAAAACIAAABAAAALQAAAADEAAAAQAAAOgAAAAAIAAAAQAAAOwA"
+    "AAAAEAAAAQAAAPQAAAA=";
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsInterface) {
+  VerifyModification(
+      kFieldFlagsInterfaceTestDex,
+      "field_flags_interface",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+      },
+      nullptr);
+
+  VerifyModification(
+      kFieldFlagsInterfaceTestDex,
+      "field_flags_interface_non_public",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+      },
+      "Interface field is not public final static");
+  VerifyModification(
+      kFieldFlagsInterfaceTestDex,
+      "field_flags_interface_non_final",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccFinal);
+      },
+      "Interface field is not public final static");
+  VerifyModification(
+      kFieldFlagsInterfaceTestDex,
+      "field_flags_interface_protected",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+        OrMaskToFieldFlags(dex_file, "foo", kAccProtected);
+      },
+      "Interface field is not public final static");
+  VerifyModification(
+      kFieldFlagsInterfaceTestDex,
+      "field_flags_interface_private",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+        OrMaskToFieldFlags(dex_file, "foo", kAccPrivate);
+      },
+      "Interface field is not public final static");
+
+  VerifyModification(
+      kFieldFlagsInterfaceTestDex,
+      "field_flags_interface_synthetic",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+        OrMaskToFieldFlags(dex_file, "foo", kAccSynthetic);
+      },
+      nullptr);
+
+  constexpr uint32_t kAllFieldFlags =
+      kAccPublic |
+      kAccPrivate |
+      kAccProtected |
+      kAccStatic |
+      kAccFinal |
+      kAccVolatile |
+      kAccTransient |
+      kAccSynthetic |
+      kAccEnum;
+  constexpr uint32_t kInterfaceFieldFlags = kAccPublic | kAccStatic | kAccFinal | kAccSynthetic;
+  constexpr uint32_t kInterfaceDisallowed = kAllFieldFlags &
+                                            ~kInterfaceFieldFlags &
+                                            ~kAccProtected &
+                                            ~kAccPrivate;
+  static_assert(kInterfaceDisallowed != 0, "There should be disallowed flags.");
+
+  uint32_t bits = POPCOUNT(kInterfaceDisallowed);
+  for (uint32_t i = 1; i < (1u << bits); ++i) {
+    VerifyModification(
+        kFieldFlagsInterfaceTestDex,
+        "field_flags_interface_disallowed",
+        [&](DexFile* dex_file) {
+          ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+
+          uint32_t mask = ApplyMaskShifted(kInterfaceDisallowed, i);
+          if ((mask & kAccProtected) != 0) {
+            mask &= ~kAccProtected;
+            ApplyMaskToFieldFlags(dex_file, "foo", ~kAccPublic);
+          }
+          OrMaskToFieldFlags(dex_file, "foo", mask);
+        },
+        "Interface field has disallowed flag");
+  }
+}
+
+// Standard bad interface. Needs to be separate from class as interfaces do not allow instance
+// fields. Use declared-synchronized again for 3B encoding.
+//
+// .class public interface LInterfaceFieldFlags;
+// .super Ljava/lang/Object;
+//
+// .field declared-synchronized public final foo:I
+
+static const char kFieldFlagsInterfaceBadTestDex[] =
+    "ZGV4CjAzNQByMUnqYKHBkUpvvNp+9CnZ2VyDkKnRN6VkAQAAcAAAAHhWNBIAAAAAAAAAAPQAAAAE"
+    "AAAAcAAAAAMAAACAAAAAAAAAAAAAAAABAAAAjAAAAAAAAAAAAAAAAQAAAJQAAACwAAAAtAAAALQA"
+    "AAC3AAAAzgAAAOIAAAAAAAAAAQAAAAIAAAABAAAAAwAAAAEAAAABAgAAAgAAAAAAAAD/////AAAA"
+    "AOwAAAAAAAAAAUkAFUxJbnRlcmZhY2VGaWVsZEZsYWdzOwASTGphdmEvbGFuZy9PYmplY3Q7AANm"
+    "b28AAAAAAAAAAQAAAJGACAkAAAAAAAAAAQAAAAAAAAABAAAABAAAAHAAAAACAAAAAwAAAIAAAAAE"
+    "AAAAAQAAAIwAAAAGAAAAAQAAAJQAAAACIAAABAAAALQAAAADEAAAAQAAAOgAAAAAIAAAAQAAAOwA"
+    "AAAAEAAAAQAAAPQAAAA=";
+
+TEST_F(DexFileVerifierTest, FieldAccessFlagsInterfaceNonStatic) {
+  VerifyModification(
+      kFieldFlagsInterfaceBadTestDex,
+      "field_flags_interface_non_static",
+      [](DexFile* dex_file) {
+        ApplyMaskToFieldFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
+      },
+      "Interface field is not public final static");
 }
 
 // Generated from:
@@ -305,15 +1275,14 @@
     ASSERT_TRUE(raw.get() != nullptr) << error_msg;
   }
 
-  {
-    // Modify the debug information entry.
-    ScratchFile tmp;
-    std::string error_msg;
-    bool success = !ModifyAndLoad(kDebugInfoTestDex, tmp.GetFilename().c_str(), 416, 0x14U,
-                                  &error_msg);
-    ASSERT_TRUE(success);
-    ASSERT_NE(error_msg.find("DBG_START_LOCAL type_idx"), std::string::npos) << error_msg;
-  }
+  // Modify the debug information entry.
+  VerifyModification(
+      kDebugInfoTestDex,
+      "debug_start_type_idx",
+      [](DexFile* dex_file) {
+        *(const_cast<uint8_t*>(dex_file->Begin()) + 416) = 0x14U;
+      },
+      "DBG_START_LOCAL type_idx");
 }
 
 }  // namespace art
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 63c02ed..973cd7d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -407,6 +407,10 @@
     backward_branch_listeners_.push_back(listener);
     have_backward_branch_listeners_ = true;
   }
+  if (HasEvent(kInvokeVirtualOrInterface, events)) {
+    invoke_virtual_or_interface_listeners_.push_back(listener);
+    have_invoke_virtual_or_interface_listeners_ = true;
+  }
   if (HasEvent(kDexPcMoved, events)) {
     std::list<InstrumentationListener*>* modified;
     if (have_dex_pc_listeners_) {
@@ -466,13 +470,17 @@
     have_method_exit_listeners_ = !method_exit_listeners_.empty();
   }
   if (HasEvent(kMethodUnwind, events) && have_method_unwind_listeners_) {
-      method_unwind_listeners_.remove(listener);
-      have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
+    method_unwind_listeners_.remove(listener);
+    have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
   }
   if (HasEvent(kBackwardBranch, events) && have_backward_branch_listeners_) {
-      backward_branch_listeners_.remove(listener);
-      have_backward_branch_listeners_ = !backward_branch_listeners_.empty();
-    }
+    backward_branch_listeners_.remove(listener);
+    have_backward_branch_listeners_ = !backward_branch_listeners_.empty();
+  }
+  if (HasEvent(kInvokeVirtualOrInterface, events) && have_invoke_virtual_or_interface_listeners_) {
+    invoke_virtual_or_interface_listeners_.remove(listener);
+    have_invoke_virtual_or_interface_listeners_ = !invoke_virtual_or_interface_listeners_.empty();
+  }
   if (HasEvent(kDexPcMoved, events) && have_dex_pc_listeners_) {
     std::list<InstrumentationListener*>* modified =
         new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
@@ -908,6 +916,16 @@
   }
 }
 
+void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread,
+                                                   mirror::Object* this_object,
+                                                   ArtMethod* caller,
+                                                   uint32_t dex_pc,
+                                                   ArtMethod* callee) const {
+  for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) {
+    listener->InvokeVirtualOrInterface(thread, this_object, caller, dex_pc, callee);
+  }
+}
+
 void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
                                          ArtMethod* method, uint32_t dex_pc,
                                          ArtField* field) const {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 93ff567..6711ac3 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -97,6 +97,14 @@
   // Call-back for when we get a backward branch.
   virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
       SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+
+  // Call-back for when we get an invokevirtual or an invokeinterface.
+  virtual void InvokeVirtualOrInterface(Thread* thread,
+                                        mirror::Object* this_object,
+                                        ArtMethod* caller,
+                                        uint32_t dex_pc,
+                                        ArtMethod* callee)
+      SHARED_REQUIRES(Locks::mutator_lock_) = 0;
 };
 
 // Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -114,6 +122,7 @@
     kFieldWritten = 0x20,
     kExceptionCaught = 0x40,
     kBackwardBranch = 0x80,
+    kInvokeVirtualOrInterface = 0x100,
   };
 
   enum class InstrumentationLevel {
@@ -257,6 +266,10 @@
     return have_backward_branch_listeners_;
   }
 
+  bool HasInvokeVirtualOrInterfaceListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+    return have_invoke_virtual_or_interface_listeners_;
+  }
+
   bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
         have_field_read_listeners_ || have_field_write_listeners_ ||
@@ -325,6 +338,17 @@
     }
   }
 
+  void InvokeVirtualOrInterface(Thread* thread,
+                                mirror::Object* this_object,
+                                ArtMethod* caller,
+                                uint32_t dex_pc,
+                                ArtMethod* callee) const
+      SHARED_REQUIRES(Locks::mutator_lock_) {
+    if (UNLIKELY(HasInvokeVirtualOrInterfaceListeners())) {
+      InvokeVirtualOrInterfaceImpl(thread, this_object, caller, dex_pc, callee);
+    }
+  }
+
   // Inform listeners that an exception was caught.
   void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -385,6 +409,12 @@
       SHARED_REQUIRES(Locks::mutator_lock_);
   void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const
       SHARED_REQUIRES(Locks::mutator_lock_);
+  void InvokeVirtualOrInterfaceImpl(Thread* thread,
+                                    mirror::Object* this_object,
+                                    ArtMethod* caller,
+                                    uint32_t dex_pc,
+                                    ArtMethod* callee) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
                            ArtMethod* method, uint32_t dex_pc,
                            ArtField* field) const
@@ -451,6 +481,9 @@
   // Do we have any backward branch listeners? Short-cut to avoid taking the instrumentation_lock_.
   bool have_backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
 
+  // Do we have any invoke listeners? Short-cut to avoid taking the instrumentation_lock_.
+  bool have_invoke_virtual_or_interface_listeners_ GUARDED_BY(Locks::mutator_lock_);
+
   // Contains the instrumentation level required by each client of the instrumentation identified
   // by a string key.
   typedef SafeMap<const char*, InstrumentationLevel> InstrumentationLevelTable;
@@ -461,6 +494,8 @@
   std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
   std::list<InstrumentationListener*> backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
+  std::list<InstrumentationListener*> invoke_virtual_or_interface_listeners_
+      GUARDED_BY(Locks::mutator_lock_);
   std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
       GUARDED_BY(Locks::mutator_lock_);
   std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 56fe9ef..c7cc68a 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -36,7 +36,8 @@
     : received_method_enter_event(false), received_method_exit_event(false),
       received_method_unwind_event(false), received_dex_pc_moved_event(false),
       received_field_read_event(false), received_field_written_event(false),
-      received_exception_caught_event(false), received_backward_branch_event(false) {}
+      received_exception_caught_event(false), received_backward_branch_event(false),
+      received_invoke_virtual_or_interface_event(false) {}
 
   virtual ~TestInstrumentationListener() {}
 
@@ -105,6 +106,15 @@
     received_backward_branch_event = true;
   }
 
+  void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
+                                mirror::Object* this_object ATTRIBUTE_UNUSED,
+                                ArtMethod* caller ATTRIBUTE_UNUSED,
+                                uint32_t dex_pc ATTRIBUTE_UNUSED,
+                                ArtMethod* callee ATTRIBUTE_UNUSED)
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+    received_invoke_virtual_or_interface_event = true;
+  }
+
   void Reset() {
     received_method_enter_event = false;
     received_method_exit_event = false;
@@ -114,6 +124,7 @@
     received_field_written_event = false;
     received_exception_caught_event = false;
     received_backward_branch_event = false;
+    received_invoke_virtual_or_interface_event = false;
   }
 
   bool received_method_enter_event;
@@ -124,6 +135,7 @@
   bool received_field_written_event;
   bool received_exception_caught_event;
   bool received_backward_branch_event;
+  bool received_invoke_virtual_or_interface_event;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(TestInstrumentationListener);
@@ -287,6 +299,8 @@
         return instr->HasExceptionCaughtListeners();
       case instrumentation::Instrumentation::kBackwardBranch:
         return instr->HasBackwardBranchListeners();
+      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
+        return instr->HasInvokeVirtualOrInterfaceListeners();
       default:
         LOG(FATAL) << "Unknown instrumentation event " << event_type;
         UNREACHABLE();
@@ -330,6 +344,9 @@
       case instrumentation::Instrumentation::kBackwardBranch:
         instr->BackwardBranch(self, method, dex_pc);
         break;
+      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
+        instr->InvokeVirtualOrInterface(self, obj, method, dex_pc, method);
+        break;
       default:
         LOG(FATAL) << "Unknown instrumentation event " << event_type;
         UNREACHABLE();
@@ -355,6 +372,8 @@
         return listener.received_exception_caught_event;
       case instrumentation::Instrumentation::kBackwardBranch:
         return listener.received_backward_branch_event;
+      case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
+        return listener.received_invoke_virtual_or_interface_event;
       default:
         LOG(FATAL) << "Unknown instrumentation event " << event_type;
         UNREACHABLE();
@@ -418,6 +437,10 @@
   TestEvent(instrumentation::Instrumentation::kBackwardBranch);
 }
 
+TEST_F(InstrumentationTest, InvokeVirtualOrInterfaceEvent) {
+  TestEvent(instrumentation::Instrumentation::kInvokeVirtualOrInterface);
+}
+
 TEST_F(InstrumentationTest, DeoptimizeDirectMethod) {
   ScopedObjectAccess soa(Thread::Current());
   jobject class_loader = LoadDex("Instrumentation");
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 6c6232c..3ac80c6 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -399,14 +399,19 @@
   JValue value;
   // Set value to last known result in case the shadow frame chain is empty.
   value.SetJ(ret_val->GetJ());
+  // Are we executing the first shadow frame?
+  bool first = true;
   while (shadow_frame != nullptr) {
     self->SetTopOfShadowStack(shadow_frame);
     const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
     const uint32_t dex_pc = shadow_frame->GetDexPC();
     uint32_t new_dex_pc;
     if (UNLIKELY(self->IsExceptionPending())) {
+      // If we deoptimize from the QuickExceptionHandler, we already reported the exception to
+      // the instrumentation. To prevent from reporting it a second time, we simply pass a
+      // null Instrumentation*.
       const instrumentation::Instrumentation* const instrumentation =
-          Runtime::Current()->GetInstrumentation();
+          first ? nullptr : Runtime::Current()->GetInstrumentation();
       uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame, dex_pc,
                                                                     instrumentation);
       new_dex_pc = found_dex_pc;  // the dex pc of a matching catch handler
@@ -424,6 +429,7 @@
     ShadowFrame* old_frame = shadow_frame;
     shadow_frame = shadow_frame->GetLink();
     ShadowFrame::DeleteDeoptimizedFrame(old_frame);
+    first = false;
   }
   ret_val->SetJ(value.GetJ());
 }
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index af67379..6602840 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -414,20 +414,21 @@
 #undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL
 #undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL
 
+// We accept a null Instrumentation* meaning we must not report anything to the instrumentation.
 uint32_t FindNextInstructionFollowingException(
     Thread* self, ShadowFrame& shadow_frame, uint32_t dex_pc,
     const instrumentation::Instrumentation* instrumentation) {
   self->VerifyStack();
   StackHandleScope<2> hs(self);
   Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException()));
-  if (instrumentation->HasExceptionCaughtListeners()
+  if (instrumentation != nullptr && instrumentation->HasExceptionCaughtListeners()
       && self->IsExceptionThrownByCurrentMethod(exception.Get())) {
     instrumentation->ExceptionCaughtEvent(self, exception.Get());
   }
   bool clear_exception = false;
   uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(
       hs.NewHandle(exception->GetClass()), dex_pc, &clear_exception);
-  if (found_dex_pc == DexFile::kDexNoIndex) {
+  if (found_dex_pc == DexFile::kDexNoIndex && instrumentation != nullptr) {
     // Exception is not caught by the current method. We will unwind to the
     // caller. Notify any instrumentation listener.
     instrumentation->MethodUnwindEvent(self, shadow_frame.GetThisObject(),
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index fdefb9f..7398778 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -265,6 +265,13 @@
     result->SetJ(0);
     return false;
   } else {
+    if (type == kVirtual || type == kInterface) {
+      instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+      if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
+        instrumentation->InvokeVirtualOrInterface(
+            self, receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+      }
+    }
     return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
                                              result);
   }
@@ -297,6 +304,11 @@
     result->SetJ(0);
     return false;
   } else {
+    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+    if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
+      instrumentation->InvokeVirtualOrInterface(
+          self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
+    }
     // No need to check since we've been quickened.
     return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
   }
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 26a4fe4..683b2cf 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -39,6 +39,8 @@
       options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
   jit_options->compile_threshold_ =
       options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+  jit_options->warmup_threshold_ =
+      options.GetOrDefault(RuntimeArgumentMap::JITWarmupThreshold);
   jit_options->dump_info_on_shutdown_ =
       options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
   return jit_options;
@@ -160,17 +162,19 @@
   }
 }
 
-void Jit::CreateInstrumentationCache(size_t compile_threshold) {
+void Jit::CreateInstrumentationCache(size_t compile_threshold, size_t warmup_threshold) {
   CHECK_GT(compile_threshold, 0U);
   Runtime* const runtime = Runtime::Current();
   runtime->GetThreadList()->SuspendAll(__FUNCTION__);
   // Add Jit interpreter instrumentation, tells the interpreter when to notify the jit to compile
   // something.
-  instrumentation_cache_.reset(new jit::JitInstrumentationCache(compile_threshold));
+  instrumentation_cache_.reset(
+      new jit::JitInstrumentationCache(compile_threshold, warmup_threshold));
   runtime->GetInstrumentation()->AddListener(
       new jit::JitInstrumentationListener(instrumentation_cache_.get()),
       instrumentation::Instrumentation::kMethodEntered |
-      instrumentation::Instrumentation::kBackwardBranch);
+      instrumentation::Instrumentation::kBackwardBranch |
+      instrumentation::Instrumentation::kInvokeVirtualOrInterface);
   runtime->GetThreadList()->ResumeAll();
 }
 
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index ca6e7ea..643bc23 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -43,13 +43,14 @@
 class Jit {
  public:
   static constexpr bool kStressMode = kIsDebugBuild;
-  static constexpr size_t kDefaultCompileThreshold = kStressMode ? 1 : 1000;
+  static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 1000;
+  static constexpr size_t kDefaultWarmupThreshold = kDefaultCompileThreshold / 2;
 
   virtual ~Jit();
   static Jit* Create(JitOptions* options, std::string* error_msg);
   bool CompileMethod(ArtMethod* method, Thread* self)
       SHARED_REQUIRES(Locks::mutator_lock_);
-  void CreateInstrumentationCache(size_t compile_threshold);
+  void CreateInstrumentationCache(size_t compile_threshold, size_t warmup_threshold);
   void CreateThreadPool();
   CompilerCallbacks* GetCompilerCallbacks() {
     return compiler_callbacks_;
@@ -95,6 +96,9 @@
   size_t GetCompileThreshold() const {
     return compile_threshold_;
   }
+  size_t GetWarmupThreshold() const {
+    return warmup_threshold_;
+  }
   size_t GetCodeCacheCapacity() const {
     return code_cache_capacity_;
   }
@@ -112,6 +116,7 @@
   bool use_jit_;
   size_t code_cache_capacity_;
   size_t compile_threshold_;
+  size_t warmup_threshold_;
   bool dump_info_on_shutdown_;
 
   JitOptions() : use_jit_(false), code_cache_capacity_(0), compile_threshold_(0),
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index cd5f4cb..4c53162 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -82,9 +82,19 @@
   return code_cache_ptr_ - size;
 }
 
+uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
+  MutexLock mu(self, lock_);
+  size = RoundUp(size, sizeof(void*));
+  if (size > DataCacheRemain()) {
+    return nullptr;
+  }
+  data_cache_ptr_ += size;
+  return data_cache_ptr_ - size;
+}
+
 uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
   MutexLock mu(self, lock_);
-  const size_t size = end - begin;
+  const size_t size = RoundUp(end - begin, sizeof(void*));
   if (size > DataCacheRemain()) {
     return nullptr;  // Out of space in the data cache.
   }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 9707f6f..f485e4a 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -86,6 +86,9 @@
   // Reserve a region of code of size at least "size". Returns null if there is no more room.
   uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
 
+  // Reserve a region of data of size at least "size". Returns null if there is no more room.
+  uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
+
   // Add a data array of size (end - begin) with the associated contents, returns null if there
   // is no more room.
   uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 258c29d..f485682 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -26,16 +26,12 @@
 
 class JitCompileTask : public Task {
  public:
-  JitCompileTask(ArtMethod* method, JitInstrumentationCache* cache)
-      : method_(method), cache_(cache) {
-  }
+  explicit JitCompileTask(ArtMethod* method) : method_(method) {}
 
   virtual void Run(Thread* self) OVERRIDE {
     ScopedObjectAccess soa(self);
     VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
-    if (Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
-      cache_->SignalCompiled(self, method_);
-    } else {
+    if (!Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
       VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
     }
   }
@@ -46,13 +42,14 @@
 
  private:
   ArtMethod* const method_;
-  JitInstrumentationCache* const cache_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
 };
 
-JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold)
-    : lock_("jit instrumentation lock"), hot_method_threshold_(hot_method_threshold) {
+JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold,
+                                                 size_t warm_method_threshold)
+    : hot_method_threshold_(hot_method_threshold),
+      warm_method_threshold_(warm_method_threshold) {
 }
 
 void JitInstrumentationCache::CreateThreadPool() {
@@ -60,20 +57,11 @@
 }
 
 void JitInstrumentationCache::DeleteThreadPool() {
+  DCHECK(Runtime::Current()->IsShuttingDown(Thread::Current()));
   thread_pool_.reset();
 }
 
-void JitInstrumentationCache::SignalCompiled(Thread* self, ArtMethod* method) {
-  ScopedObjectAccessUnchecked soa(self);
-  jmethodID method_id = soa.EncodeMethod(method);
-  MutexLock mu(self, lock_);
-  auto it = samples_.find(method_id);
-  if (it != samples_.end()) {
-    samples_.erase(it);
-  }
-}
-
-void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t count) {
+void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t) {
   ScopedObjectAccessUnchecked soa(self);
   // Since we don't have on-stack replacement, some methods can remain in the interpreter longer
   // than we want resulting in samples even after the method is compiled.
@@ -81,34 +69,21 @@
       Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
     return;
   }
-  jmethodID method_id = soa.EncodeMethod(method);
-  bool is_hot = false;
-  {
-    MutexLock mu(self, lock_);
-    size_t sample_count = 0;
-    auto it = samples_.find(method_id);
-    if (it != samples_.end()) {
-      it->second += count;
-      sample_count = it->second;
-    } else {
-      sample_count = count;
-      samples_.insert(std::make_pair(method_id, count));
-    }
-    // If we have enough samples, mark as hot and request Jit compilation.
-    if (sample_count >= hot_method_threshold_ && sample_count - count < hot_method_threshold_) {
-      is_hot = true;
+  if (thread_pool_.get() == nullptr) {
+    DCHECK(Runtime::Current()->IsShuttingDown(self));
+    return;
+  }
+  uint16_t sample_count = method->IncrementCounter();
+  if (sample_count == warm_method_threshold_) {
+    ProfilingInfo* info = method->CreateProfilingInfo();
+    if (info != nullptr) {
+      VLOG(jit) << "Start profiling " << PrettyMethod(method);
     }
   }
-  if (is_hot) {
-    if (thread_pool_.get() != nullptr) {
-      thread_pool_->AddTask(self, new JitCompileTask(
-          method->GetInterfaceMethodIfProxy(sizeof(void*)), this));
-      thread_pool_->StartWorkers(self);
-    } else {
-      VLOG(jit) << "Compiling hot method " << PrettyMethod(method);
-      Runtime::Current()->GetJit()->CompileMethod(
-          method->GetInterfaceMethodIfProxy(sizeof(void*)), self);
-    }
+  if (sample_count == hot_method_threshold_) {
+    thread_pool_->AddTask(self, new JitCompileTask(
+        method->GetInterfaceMethodIfProxy(sizeof(void*))));
+    thread_pool_->StartWorkers(self);
   }
 }
 
@@ -117,5 +92,17 @@
   CHECK(instrumentation_cache_ != nullptr);
 }
 
+void JitInstrumentationListener::InvokeVirtualOrInterface(Thread* thread,
+                                                          mirror::Object* this_object,
+                                                          ArtMethod* caller,
+                                                          uint32_t dex_pc,
+                                                          ArtMethod* callee ATTRIBUTE_UNUSED) {
+  DCHECK(this_object != nullptr);
+  ProfilingInfo* info = caller->GetProfilingInfo();
+  if (info != nullptr) {
+    info->AddInvokeInfo(thread, dex_pc, this_object->GetClass());
+  }
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 0deaf8a..6fdef65 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -45,18 +45,15 @@
 // Keeps track of which methods are hot.
 class JitInstrumentationCache {
  public:
-  explicit JitInstrumentationCache(size_t hot_method_threshold);
+  JitInstrumentationCache(size_t hot_method_threshold, size_t warm_method_threshold);
   void AddSamples(Thread* self, ArtMethod* method, size_t samples)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
-  void SignalCompiled(Thread* self, ArtMethod* method)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+      SHARED_REQUIRES(Locks::mutator_lock_);
   void CreateThreadPool();
   void DeleteThreadPool();
 
  private:
-  Mutex lock_;
-  std::unordered_map<jmethodID, size_t> samples_;
   size_t hot_method_threshold_;
+  size_t warm_method_threshold_;
   std::unique_ptr<ThreadPool> thread_pool_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitInstrumentationCache);
@@ -66,37 +63,43 @@
  public:
   explicit JitInstrumentationListener(JitInstrumentationCache* cache);
 
-  virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
-                             ArtMethod* method, uint32_t /*dex_pc*/)
+  void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
+                     ArtMethod* method, uint32_t /*dex_pc*/)
       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     instrumentation_cache_->AddSamples(thread, method, 1);
   }
-  virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
-                            ArtMethod* /*method*/, uint32_t /*dex_pc*/,
-                            const JValue& /*return_value*/)
+  void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
+                    ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+                    const JValue& /*return_value*/)
       OVERRIDE { }
-  virtual void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
-                            ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
-  virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
-                         ArtMethod* /*method*/, uint32_t /*dex_pc*/,
-                         ArtField* /*field*/) OVERRIDE { }
-  virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
-                            ArtMethod* /*method*/, uint32_t /*dex_pc*/,
-                            ArtField* /*field*/, const JValue& /*field_value*/)
+  void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
+                    ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
+  void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
+                 ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+                 ArtField* /*field*/) OVERRIDE { }
+  void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
+                    ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+                    ArtField* /*field*/, const JValue& /*field_value*/)
       OVERRIDE { }
-  virtual void ExceptionCaught(Thread* /*thread*/,
-                               mirror::Throwable* /*exception_object*/) OVERRIDE { }
+  void ExceptionCaught(Thread* /*thread*/,
+                       mirror::Throwable* /*exception_object*/) OVERRIDE { }
 
-  virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
-                          ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
+  void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
+                  ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
 
-  // We only care about how many dex instructions were executed in the Jit.
-  virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
+  void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK_LE(dex_pc_offset, 0);
     instrumentation_cache_->AddSamples(thread, method, 1);
   }
 
+  void InvokeVirtualOrInterface(Thread* thread,
+                                mirror::Object* this_object,
+                                ArtMethod* caller,
+                                uint32_t dex_pc,
+                                ArtMethod* callee)
+      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+
  private:
   JitInstrumentationCache* const instrumentation_cache_;
 
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
new file mode 100644
index 0000000..0c039f2
--- /dev/null
+++ b/runtime/jit/profiling_info.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profiling_info.h"
+
+#include "art_method-inl.h"
+#include "dex_instruction.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+ProfilingInfo* ProfilingInfo::Create(ArtMethod* method) {
+  // Walk over the dex instructions of the method and keep track of
+  // instructions we are interested in profiling.
+  const uint16_t* code_ptr = nullptr;
+  const uint16_t* code_end = nullptr;
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    DCHECK(!method->IsNative());
+    const DexFile::CodeItem& code_item = *method->GetCodeItem();
+    code_ptr = code_item.insns_;
+    code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
+  }
+
+  uint32_t dex_pc = 0;
+  std::vector<uint32_t> entries;
+  while (code_ptr < code_end) {
+    const Instruction& instruction = *Instruction::At(code_ptr);
+    switch (instruction.Opcode()) {
+      case Instruction::INVOKE_VIRTUAL:
+      case Instruction::INVOKE_VIRTUAL_RANGE:
+      case Instruction::INVOKE_VIRTUAL_QUICK:
+      case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+      case Instruction::INVOKE_INTERFACE:
+      case Instruction::INVOKE_INTERFACE_RANGE:
+        entries.push_back(dex_pc);
+        break;
+
+      default:
+        break;
+    }
+    dex_pc += instruction.SizeInCodeUnits();
+    code_ptr += instruction.SizeInCodeUnits();
+  }
+
+  // If there is no instruction we are interested in, no need to create a `ProfilingInfo`
+  // object, it will never be filled.
+  if (entries.empty()) {
+    return nullptr;
+  }
+
+  // Allocate the `ProfilingInfo` object int the JIT's data space.
+  jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
+  size_t profile_info_size = sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size();
+  uint8_t* data = code_cache->ReserveData(Thread::Current(), profile_info_size);
+
+  if (data == nullptr) {
+    VLOG(jit) << "Cannot allocate profiling info anymore";
+    return nullptr;
+  }
+
+  return new (data) ProfilingInfo(entries);
+}
+
+void ProfilingInfo::AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls) {
+  InlineCache* cache = nullptr;
+  // TODO: binary search if array is too long.
+  for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+    if (cache_[i].dex_pc == dex_pc) {
+      cache = &cache_[i];
+      break;
+    }
+  }
+  DCHECK(cache != nullptr);
+
+  ScopedObjectAccess soa(self);
+  for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
+    mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
+    if (existing == cls) {
+      // Receiver type is already in the cache, nothing else to do.
+      return;
+    } else if (existing == nullptr) {
+      // Cache entry is empty, try to put `cls` in it.
+      GcRoot<mirror::Class> expected_root(nullptr);
+      GcRoot<mirror::Class> desired_root(cls);
+      if (!reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i])->
+              CompareExchangeStrongSequentiallyConsistent(expected_root, desired_root)) {
+        // Some other thread put a class in the cache, continue iteration starting at this
+        // entry in case the entry contains `cls`.
+        --i;
+      } else {
+        // We successfully set `cls`, just return.
+        return;
+      }
+    }
+  }
+  // Unsuccessfull - cache is full, making it megamorphic.
+  DCHECK(cache->IsMegamorphic());
+}
+
+}  // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
new file mode 100644
index 0000000..73ca41a
--- /dev/null
+++ b/runtime/jit/profiling_info.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_PROFILING_INFO_H_
+#define ART_RUNTIME_JIT_PROFILING_INFO_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "gc_root.h"
+
+namespace art {
+
+class ArtMethod;
+
+namespace mirror {
+class Class;
+}
+
+/**
+ * Profiling info for a method, created and filled by the interpreter once the
+ * method is warm, and used by the compiler to drive optimizations.
+ */
+class ProfilingInfo {
+ public:
+  static ProfilingInfo* Create(ArtMethod* method);
+
+  // Add information from an executed INVOKE instruction to the profile.
+  void AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls);
+
+  // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
+  template<typename RootVisitorType>
+  void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
+    for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+      InlineCache* cache = &cache_[i];
+      for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
+        visitor.VisitRootIfNonNull(cache->classes_[j].AddressWithoutBarrier());
+      }
+    }
+  }
+
+ private:
+  // Structure to store the classes seen at runtime for a specific instruction.
+  // Once the classes_ array is full, we consider the INVOKE to be megamorphic.
+  struct InlineCache {
+    bool IsMonomorphic() const {
+      DCHECK_GE(kIndividualCacheSize, 2);
+      return !classes_[0].IsNull() && classes_[1].IsNull();
+    }
+
+    bool IsMegamorphic() const {
+      for (size_t i = 0; i < kIndividualCacheSize; ++i) {
+        if (classes_[i].IsNull()) {
+          return false;
+        }
+      }
+      return true;
+    }
+
+    bool IsUnitialized() const {
+      return classes_[0].IsNull();
+    }
+
+    bool IsPolymorphic() const {
+      DCHECK_GE(kIndividualCacheSize, 3);
+      return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
+    }
+
+    static constexpr uint16_t kIndividualCacheSize = 5;
+    uint32_t dex_pc;
+    GcRoot<mirror::Class> classes_[kIndividualCacheSize];
+  };
+
+  explicit ProfilingInfo(const std::vector<uint32_t>& entries)
+      : number_of_inline_caches_(entries.size()) {
+    memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+    for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+      cache_[i].dex_pc = entries[i];
+    }
+  }
+
+  // Number of instructions we are profiling in the ArtMethod.
+  const uint32_t number_of_inline_caches_;
+
+  // Dynamically allocated array of size `number_of_inline_caches_`.
+  InlineCache cache_[0];
+
+  DISALLOW_COPY_AND_ASSIGN(ProfilingInfo);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_JIT_PROFILING_INFO_H_
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 25b5e49..50e2053 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -158,6 +158,9 @@
       .Define("-Xjitthreshold:_")
           .WithType<unsigned int>()
           .IntoKey(M::JITCompileThreshold)
+      .Define("-Xjitwarmupthreshold:_")
+          .WithType<unsigned int>()
+          .IntoKey(M::JITWarmupThreshold)
       .Define("-XX:HspaceCompactForOOMMinIntervalMs=_")  // in ms
           .WithType<MillisecondsToNanoseconds>()  // store as ns
           .IntoKey(M::HSpaceCompactForOOMMinIntervalsMs)
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index dd3703c..64c2249 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -40,6 +40,9 @@
   kIntrinsicReverseBits,
   kIntrinsicReverseBytes,
   kIntrinsicNumberOfLeadingZeros,
+  kIntrinsicNumberOfTrailingZeros,
+  kIntrinsicRotateRight,
+  kIntrinsicRotateLeft,
   kIntrinsicAbsInt,
   kIntrinsicAbsLong,
   kIntrinsicAbsFloat,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 9d5ce9f..60defba 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -40,7 +40,7 @@
     handler_dex_pc_(0), clear_exception_(false), handler_frame_depth_(kInvalidFrameDepth) {
 }
 
-// Finds catch handler or prepares for deoptimization.
+// Finds catch handler.
 class CatchBlockStackVisitor FINAL : public StackVisitor {
  public:
   CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
@@ -125,7 +125,7 @@
   StackHandleScope<1> hs(self_);
   Handle<mirror::Throwable> exception_ref(hs.NewHandle(exception));
 
-  // Walk the stack to find catch handler or prepare for deoptimization.
+  // Walk the stack to find catch handler.
   CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this);
   visitor.WalkStack(true);
 
@@ -146,16 +146,6 @@
     // Put exception back in root set with clear throw location.
     self_->SetException(exception_ref.Get());
   }
-  // The debugger may suspend this thread and walk its stack. Let's do this before popping
-  // instrumentation frames.
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  if (instrumentation->HasExceptionCaughtListeners()
-      && self_->IsExceptionThrownByCurrentMethod(exception)) {
-    instrumentation->ExceptionCaughtEvent(self_, exception_ref.Get());
-    // Instrumentation may have been updated.
-    method_tracing_active_ = is_deoptimization_ ||
-        Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
-  }
 }
 
 // Prepares deoptimization.
@@ -189,6 +179,12 @@
       // Ignore callee save method.
       DCHECK(method->IsCalleeSaveMethod());
       return true;
+    } else if (method->IsNative()) {
+      // If we return from JNI with a pending exception and want to deoptimize, we need to skip
+      // the native method.
+      // The top method is a runtime method, the native method comes next.
+      CHECK_EQ(GetFrameDepth(), 1U);
+      return true;
     } else {
       return HandleDeoptimization(method);
     }
@@ -201,7 +197,7 @@
 
   bool HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
-    CHECK(code_item != nullptr);
+    CHECK(code_item != nullptr) << "No code item for " << PrettyMethod(m);
     uint16_t num_regs = code_item->registers_size_;
     uint32_t dex_pc = GetDexPc();
     StackHandleScope<2> hs(self_);  // Dex cache, class loader and method.
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index e934834..4db95a8 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -43,9 +43,18 @@
     UNREACHABLE();
   }
 
+  // Find the catch handler for the given exception.
   void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Deoptimize the stack to the upcall. For every compiled frame, we create a "copy"
+  // shadow frame that will be executed with the interpreter.
   void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Update the instrumentation stack by removing all methods that will be unwound
+  // by the exception being thrown.
   void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Long jump either to a catch handler or to the upcall.
   NO_RETURN void DoLongJump() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
@@ -83,9 +92,10 @@
  private:
   Thread* const self_;
   Context* const context_;
+  // Should we deoptimize the stack?
   const bool is_deoptimization_;
   // Is method tracing active?
-  bool method_tracing_active_;
+  const bool method_tracing_active_;
   // Quick frame with found handler or last frame if no handler found.
   ArtMethod** handler_quick_frame_;
   // PC to branch to for the handler.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4797564..7c71e13 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1749,7 +1749,8 @@
   jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
   if (jit_.get() != nullptr) {
     compiler_callbacks_ = jit_->GetCompilerCallbacks();
-    jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
+    jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold(),
+                                     jit_options_->GetWarmupThreshold());
     jit_->CreateThreadPool();
   } else {
     LOG(WARNING) << "Failed to create JIT " << error_msg;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 02ed3a2..d88e84b 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -68,6 +68,7 @@
 RUNTIME_OPTIONS_KEY (bool,                EnableHSpaceCompactForOOM,      true)
 RUNTIME_OPTIONS_KEY (bool,                UseJIT,                         false)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITCompileThreshold,            jit::Jit::kDefaultCompileThreshold)
+RUNTIME_OPTIONS_KEY (unsigned int,        JITWarmupThreshold,             jit::Jit::kDefaultWarmupThreshold)
 RUNTIME_OPTIONS_KEY (MemoryKiB,           JITCodeCacheCapacity,           jit::JitCodeCache::kDefaultCapacity)
 RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
                                           HSpaceCompactForOOMMinIntervalsMs,\
diff --git a/runtime/thread.cc b/runtime/thread.cc
index af5830a..86ac140 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2344,10 +2344,31 @@
   // Get exception from thread.
   mirror::Throwable* exception = GetException();
   CHECK(exception != nullptr);
+  bool is_deoptimization = (exception == GetDeoptimizationException());
+  if (!is_deoptimization) {
+    // This is a real exception: let the instrumentation know about it.
+    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+    if (instrumentation->HasExceptionCaughtListeners() &&
+        IsExceptionThrownByCurrentMethod(exception)) {
+      // Instrumentation may cause GC so keep the exception object safe.
+      StackHandleScope<1> hs(this);
+      HandleWrapper<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
+      instrumentation->ExceptionCaughtEvent(this, exception);
+    }
+    // Does instrumentation need to deoptimize the stack?
+    // Note: we do this *after* reporting the exception to instrumentation in case it
+    // now requires deoptimization. It may happen if a debugger is attached and requests
+    // new events (single-step, breakpoint, ...) when the exception is reported.
+    is_deoptimization = Dbg::IsForcedInterpreterNeededForException(this);
+    if (is_deoptimization) {
+      // Save the exception into the deoptimization context so it can be restored
+      // before entering the interpreter.
+      PushDeoptimizationContext(JValue(), false, exception);
+    }
+  }
   // Don't leave exception visible while we try to find the handler, which may cause class
   // resolution.
   ClearException();
-  bool is_deoptimization = (exception == GetDeoptimizationException());
   QuickExceptionHandler exception_handler(this, is_deoptimization);
   if (is_deoptimization) {
     exception_handler.DeoptimizeStack();
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4ab5c0e..d629ce6 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -806,6 +806,15 @@
   LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
 }
 
+void Trace::InvokeVirtualOrInterface(Thread*,
+                                     mirror::Object*,
+                                     ArtMethod* method,
+                                     uint32_t dex_pc,
+                                     ArtMethod*) {
+  LOG(ERROR) << "Unexpected invoke event in tracing" << PrettyMethod(method)
+             << " " << dex_pc;
+}
+
 void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) {
   if (UseThreadCpuClock()) {
     uint64_t clock_base = thread->GetTraceClockBase();
diff --git a/runtime/trace.h b/runtime/trace.h
index 04be3dd..87a691d 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -166,6 +166,12 @@
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
   void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+  void InvokeVirtualOrInterface(Thread* thread,
+                                mirror::Object* this_object,
+                                ArtMethod* caller,
+                                uint32_t dex_pc,
+                                ArtMethod* callee)
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
   static std::vector<ArtMethod*>* AllocStackTrace();
   // Clear and store an old stack trace for later use.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index d768afd..1ed6980 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -424,6 +424,7 @@
       has_virtual_or_interface_invokes_(false),
       verify_to_dump_(verify_to_dump),
       allow_thread_suspension_(allow_thread_suspension),
+      is_constructor_(false),
       link_(nullptr) {
   self->PushVerifier(this);
   DCHECK(class_def != nullptr);
@@ -555,15 +556,124 @@
 }
 
 bool MethodVerifier::Verify() {
-  // If there aren't any instructions, make sure that's expected, then exit successfully.
-  if (code_item_ == nullptr) {
-    if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
+  // Some older code doesn't correctly mark constructors as such. Test for this case by looking at
+  // the name.
+  const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+  const char* method_name = dex_file_->StringDataByIdx(method_id.name_idx_);
+  bool instance_constructor_by_name = strcmp("<init>", method_name) == 0;
+  bool static_constructor_by_name = strcmp("<clinit>", method_name) == 0;
+  bool constructor_by_name = instance_constructor_by_name || static_constructor_by_name;
+  // Check that only constructors are tagged, and check for bad code that doesn't tag constructors.
+  if ((method_access_flags_ & kAccConstructor) != 0) {
+    if (!constructor_by_name) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+            << "method is marked as constructor, but not named accordingly";
       return false;
-    } else {
-      return true;
+    }
+    is_constructor_ = true;
+  } else if (constructor_by_name) {
+    LOG(WARNING) << "Method " << PrettyMethod(dex_method_idx_, *dex_file_)
+                 << " not marked as constructor.";
+    is_constructor_ = true;
+  }
+  // If it's a constructor, check whether IsStatic() matches the name.
+  // This should have been rejected by the dex file verifier. Only do in debug build.
+  if (kIsDebugBuild) {
+    if (IsConstructor()) {
+      if (IsStatic() ^ static_constructor_by_name) {
+        Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+              << "constructor name doesn't match static flag";
+        return false;
+      }
     }
   }
+
+  // Methods may only have one of public/protected/private.
+  // This should have been rejected by the dex file verifier. Only do in debug build.
+  if (kIsDebugBuild) {
+    size_t access_mod_count =
+        (((method_access_flags_ & kAccPublic) == 0) ? 0 : 1) +
+        (((method_access_flags_ & kAccProtected) == 0) ? 0 : 1) +
+        (((method_access_flags_ & kAccPrivate) == 0) ? 0 : 1);
+    if (access_mod_count > 1) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "method has more than one of public/protected/private";
+      return false;
+    }
+  }
+
+  // If there aren't any instructions, make sure that's expected, then exit successfully.
+  if (code_item_ == nullptr) {
+    // This should have been rejected by the dex file verifier. Only do in debug build.
+    if (kIsDebugBuild) {
+      // Only native or abstract methods may not have code.
+      if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
+        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
+        return false;
+      }
+      if ((method_access_flags_ & kAccAbstract) != 0) {
+        // Abstract methods are not allowed to have the following flags.
+        static constexpr uint32_t kForbidden =
+            kAccPrivate |
+            kAccStatic |
+            kAccFinal |
+            kAccNative |
+            kAccStrict |
+            kAccSynchronized;
+        if ((method_access_flags_ & kForbidden) != 0) {
+          Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+                << "method can't be abstract and private/static/final/native/strict/synchronized";
+          return false;
+        }
+      }
+      if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+        // Interface methods must be public and abstract.
+        if ((method_access_flags_ & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
+          Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be public and abstract";
+          return false;
+        }
+        // In addition to the above, interface methods must not be protected.
+        static constexpr uint32_t kForbidden = kAccProtected;
+        if ((method_access_flags_ & kForbidden) != 0) {
+          Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods can't be protected";
+          return false;
+        }
+      }
+      // We also don't allow constructors to be abstract or native.
+      if (IsConstructor()) {
+        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "constructors can't be abstract or native";
+        return false;
+      }
+    }
+    return true;
+  }
+
+  // This should have been rejected by the dex file verifier. Only do in debug build.
+  if (kIsDebugBuild) {
+    // When there's code, the method must not be native or abstract.
+    if ((method_access_flags_ & (kAccNative | kAccAbstract)) != 0) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "non-zero-length code in abstract or native method";
+      return false;
+    }
+
+    // Only the static initializer may have code in an interface.
+    if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+      // Interfaces may have static initializers for their fields.
+      if (!IsConstructor() || !IsStatic()) {
+        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
+        return false;
+      }
+    }
+
+    // Instance constructors must not be synchronized.
+    if (IsInstanceConstructor()) {
+      static constexpr uint32_t kForbidden = kAccSynchronized;
+      if ((method_access_flags_ & kForbidden) != 0) {
+        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "constructors can't be synchronized";
+        return false;
+      }
+    }
+  }
+
   // Sanity-check the register counts. ins + locals = registers, so make sure that ins <= registers.
   if (code_item_->ins_size_ > code_item_->registers_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad register counts (ins=" << code_item_->ins_size_
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index b57abf5..5e661a5 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -262,20 +262,6 @@
   ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Is the method being verified a constructor?
-  bool IsConstructor() const {
-    return (method_access_flags_ & kAccConstructor) != 0;
-  }
-
-  // Is the method verified static?
-  bool IsStatic() const {
-    return (method_access_flags_ & kAccStatic) != 0;
-  }
-
-  bool IsInstanceConstructor() const {
-    return IsConstructor() && !IsStatic();
-  }
-
   SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() {
     return string_init_pc_reg_map_;
   }
@@ -284,7 +270,21 @@
     return encountered_failure_types_;
   }
 
+  bool IsInstanceConstructor() const {
+    return IsConstructor() && !IsStatic();
+  }
+
  private:
+  // Is the method being verified a constructor? See the comment on the field.
+  bool IsConstructor() const {
+    return is_constructor_;
+  }
+
+  // Is the method verified static?
+  bool IsStatic() const {
+    return (method_access_flags_ & kAccStatic) != 0;
+  }
+
   // Private constructor for dumping.
   MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
                  Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
@@ -780,6 +780,13 @@
   // FindLocksAtDexPC, resulting in deadlocks.
   const bool allow_thread_suspension_;
 
+  // Whether the method seems to be a constructor. Note that this field exists as we can't trust
+  // the flags in the dex file. Some older code does not mark methods named "<init>" and "<clinit>"
+  // correctly.
+  //
+  // Note: this flag is only valid once Verify() has started.
+  bool is_constructor_;
+
   // Link, for the method verifier root linked list.
   MethodVerifier* link_;
 
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 08ccf0e..5913c40 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -78,6 +78,14 @@
     test_Memory_pokeShort();
     test_Memory_pokeInt();
     test_Memory_pokeLong();
+    test_Integer_numberOfTrailingZeros();
+    test_Long_numberOfTrailingZeros();
+    test_Integer_rotateRight();
+    test_Long_rotateRight();
+    test_Integer_rotateLeft();
+    test_Long_rotateLeft();
+    test_Integer_rotateRightLeft();
+    test_Long_rotateRightLeft();
   }
 
   /**
@@ -1360,4 +1368,136 @@
     poke_long.invoke(null, address + 1, (long)0x2122232425262728L, false);
     Assert.assertTrue(Arrays.equals(ru, b));
   }
+
+  public static void test_Integer_numberOfTrailingZeros() {
+    Assert.assertEquals(Integer.numberOfTrailingZeros(0), Integer.SIZE);
+    for (int i = 0; i < Integer.SIZE; i++) {
+      Assert.assertEquals(
+        Integer.numberOfTrailingZeros(0x80000000 >> i),
+        Integer.SIZE - 1 - i);
+      Assert.assertEquals(
+        Integer.numberOfTrailingZeros((0x80000000 >> i) | 0x80000000),
+        Integer.SIZE - 1 - i);
+      Assert.assertEquals(Integer.numberOfTrailingZeros(1 << i), i);
+    }
+  }
+
+  public static void test_Long_numberOfTrailingZeros() {
+    Assert.assertEquals(Long.numberOfTrailingZeros(0), Long.SIZE);
+    for (int i = 0; i < Long.SIZE; i++) {
+      Assert.assertEquals(
+        Long.numberOfTrailingZeros(0x8000000000000000L >> i),
+        Long.SIZE - 1 - i);
+      Assert.assertEquals(
+        Long.numberOfTrailingZeros((0x8000000000000000L >> i) | 0x8000000000000000L),
+        Long.SIZE - 1 - i);
+      Assert.assertEquals(Long.numberOfTrailingZeros(1L << i), i);
+    }
+  }
+
+  public static void test_Integer_rotateRight() throws Exception {
+    Assert.assertEquals(Integer.rotateRight(0x11, 0), 0x11);
+
+    Assert.assertEquals(Integer.rotateRight(0x11, 1), 0x80000008);
+    Assert.assertEquals(Integer.rotateRight(0x11, Integer.SIZE - 1), 0x22);
+    Assert.assertEquals(Integer.rotateRight(0x11, Integer.SIZE), 0x11);
+    Assert.assertEquals(Integer.rotateRight(0x11, Integer.SIZE + 1), 0x80000008);
+
+    Assert.assertEquals(Integer.rotateRight(0x11, -1), 0x22);
+    Assert.assertEquals(Integer.rotateRight(0x11, -(Integer.SIZE - 1)), 0x80000008);
+    Assert.assertEquals(Integer.rotateRight(0x11, -Integer.SIZE), 0x11);
+    Assert.assertEquals(Integer.rotateRight(0x11, -(Integer.SIZE + 1)), 0x22);
+
+    Assert.assertEquals(Integer.rotateRight(0x80000000, 1), 0x40000000);
+
+    for (int i = 0; i < Integer.SIZE; i++) {
+      Assert.assertEquals(
+        Integer.rotateRight(0xBBAAAADD, i),
+        (0xBBAAAADD >>> i) | (0xBBAAAADD << (Integer.SIZE - i)));
+    }
+  }
+
+  public static void test_Long_rotateRight() throws Exception {
+    Assert.assertEquals(Long.rotateRight(0x11, 0), 0x11);
+
+    Assert.assertEquals(Long.rotateRight(0x11, 1), 0x8000000000000008L);
+    Assert.assertEquals(Long.rotateRight(0x11, Long.SIZE - 1), 0x22);
+    Assert.assertEquals(Long.rotateRight(0x11, Long.SIZE), 0x11);
+    Assert.assertEquals(Long.rotateRight(0x11, Long.SIZE + 1), 0x8000000000000008L);
+
+    Assert.assertEquals(Long.rotateRight(0x11, -1), 0x22);
+    Assert.assertEquals(Long.rotateRight(0x11, -(Long.SIZE - 1)), 0x8000000000000008L);
+    Assert.assertEquals(Long.rotateRight(0x11, -Long.SIZE), 0x11);
+    Assert.assertEquals(Long.rotateRight(0x11, -(Long.SIZE + 1)), 0x22);
+
+    Assert.assertEquals(Long.rotateRight(0x8000000000000000L, 1), 0x4000000000000000L);
+
+    for (int i = 0; i < Long.SIZE; i++) {
+      Assert.assertEquals(
+        Long.rotateRight(0xBBAAAADDFF0000DDL, i),
+        (0xBBAAAADDFF0000DDL >>> i) | (0xBBAAAADDFF0000DDL << (Long.SIZE - i)));
+    }
+  }
+
+  public static void test_Integer_rotateLeft() throws Exception {
+    Assert.assertEquals(Integer.rotateLeft(0x11, 0), 0x11);
+
+    Assert.assertEquals(Integer.rotateLeft(0x11, 1), 0x22);
+    Assert.assertEquals(Integer.rotateLeft(0x11, Integer.SIZE - 1), 0x80000008);
+    Assert.assertEquals(Integer.rotateLeft(0x11, Integer.SIZE), 0x11);
+    Assert.assertEquals(Integer.rotateLeft(0x11, Integer.SIZE + 1), 0x22);
+
+    Assert.assertEquals(Integer.rotateLeft(0x11, -1), 0x80000008);
+    Assert.assertEquals(Integer.rotateLeft(0x11, -(Integer.SIZE - 1)), 0x22);
+    Assert.assertEquals(Integer.rotateLeft(0x11, -Integer.SIZE), 0x11);
+    Assert.assertEquals(Integer.rotateLeft(0x11, -(Integer.SIZE + 1)), 0x80000008);
+
+    Assert.assertEquals(Integer.rotateLeft(0xC0000000, 1), 0x80000001);
+
+    for (int i = 0; i < Integer.SIZE; i++) {
+      Assert.assertEquals(
+        Integer.rotateLeft(0xBBAAAADD, i),
+        (0xBBAAAADD << i) | (0xBBAAAADD >>> (Integer.SIZE - i)));
+    }
+  }
+
+  public static void test_Long_rotateLeft() throws Exception {
+    Assert.assertEquals(Long.rotateLeft(0x11, 0), 0x11);
+
+    Assert.assertEquals(Long.rotateLeft(0x11, 1), 0x22);
+    Assert.assertEquals(Long.rotateLeft(0x11, Long.SIZE - 1), 0x8000000000000008L);
+    Assert.assertEquals(Long.rotateLeft(0x11, Long.SIZE), 0x11);
+    Assert.assertEquals(Long.rotateLeft(0x11, Long.SIZE + 1), 0x22);
+
+    Assert.assertEquals(Long.rotateLeft(0x11, -1), 0x8000000000000008L);
+    Assert.assertEquals(Long.rotateLeft(0x11, -(Long.SIZE - 1)), 0x22);
+    Assert.assertEquals(Long.rotateLeft(0x11, -Long.SIZE), 0x11);
+    Assert.assertEquals(Long.rotateLeft(0x11, -(Long.SIZE + 1)), 0x8000000000000008L);
+
+    Assert.assertEquals(Long.rotateLeft(0xC000000000000000L, 1), 0x8000000000000001L);
+
+    for (int i = 0; i < Long.SIZE; i++) {
+      Assert.assertEquals(
+        Long.rotateLeft(0xBBAAAADDFF0000DDL, i),
+        (0xBBAAAADDFF0000DDL << i) | (0xBBAAAADDFF0000DDL >>> (Long.SIZE - i)));
+    }
+  }
+
+  public static void test_Integer_rotateRightLeft() throws Exception {
+    for (int i = 0; i < Integer.SIZE * 2; i++) {
+      Assert.assertEquals(Integer.rotateLeft(0xBBAAAADD, i),
+                          Integer.rotateRight(0xBBAAAADD, -i));
+      Assert.assertEquals(Integer.rotateLeft(0xBBAAAADD, -i),
+                          Integer.rotateRight(0xBBAAAADD, i));
+    }
+  }
+
+  public static void test_Long_rotateRightLeft() throws Exception {
+    for (int i = 0; i < Long.SIZE * 2; i++) {
+      Assert.assertEquals(Long.rotateLeft(0xBBAAAADDFF0000DDL, i),
+                          Long.rotateRight(0xBBAAAADDFF0000DDL, -i));
+      Assert.assertEquals(Long.rotateLeft(0xBBAAAADDFF0000DDL, -i),
+                          Long.rotateRight(0xBBAAAADDFF0000DDL, i));
+    }
+  }
 }
diff --git a/test/800-smali/smali/b_18380491AbstractBase.smali b/test/800-smali/smali/b_18380491AbstractBase.smali
index 7aa1b1a..cc05221 100644
--- a/test/800-smali/smali/b_18380491AbstractBase.smali
+++ b/test/800-smali/smali/b_18380491AbstractBase.smali
@@ -1,4 +1,4 @@
-.class public LB18380491ActractBase;
+.class public abstract LB18380491AbstractBase;
 
 .super Ljava/lang/Object;
 
diff --git a/test/800-smali/smali/b_18380491ConcreteClass.smali b/test/800-smali/smali/b_18380491ConcreteClass.smali
index db5ef3b..1ba684f 100644
--- a/test/800-smali/smali/b_18380491ConcreteClass.smali
+++ b/test/800-smali/smali/b_18380491ConcreteClass.smali
@@ -1,10 +1,10 @@
 .class public LB18380491ConcreteClass;
 
-.super LB18380491ActractBase;
+.super LB18380491AbstractBase;
 
 .method public constructor <init>()V
     .locals 0
-    invoke-direct {p0}, LB18380491ActractBase;-><init>()V
+    invoke-direct {p0}, LB18380491AbstractBase;-><init>()V
     return-void
 .end method
 
@@ -13,7 +13,7 @@
   if-eqz p1, :invoke_super_abstract
   return p1
   :invoke_super_abstract
-  invoke-super {p0, p1}, LB18380491ActractBase;->foo(I)I
+  invoke-super {p0, p1}, LB18380491AbstractBase;->foo(I)I
   move-result v0
   return v0
 .end method
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ad3fb41..efc0bfb 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -39,7 +39,11 @@
 SECONDARY_DEX=""
 TIME_OUT="gdb"  # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
 # Value in seconds
-TIME_OUT_VALUE=600  # 10 minutes.
+if [ "$ART_USE_READ_BARRIER" = "true" ]; then
+  TIME_OUT_VALUE=900  # 15 minutes.
+else
+  TIME_OUT_VALUE=600  # 10 minutes.
+fi
 USE_GDB="n"
 USE_JVM="n"
 VERIFY="y" # y=yes,n=no,s=softfail