Merge "Remove default argument values in GenerateGcRootFieldLoad."
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 56b4ebd..1b87725 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -24,6 +24,8 @@
 #include <malloc.h>  // For mallinfo
 #endif
 
+#include "android-base/strings.h"
+
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/array_ref.h"
@@ -2011,12 +2013,18 @@
       CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
           << klass->PrettyDescriptor() << ": state=" << klass->GetStatus();
 
-      // It is *very* problematic if there are verification errors in the boot classpath.
-      // For example, we rely on things working OK without verification when the
-      // decryption dialog is brought up. So abort in a debug build if we find this violated.
-      DCHECK(!manager_->GetCompiler()->GetCompilerOptions().IsBootImage() || klass->IsVerified())
-          << "Boot classpath class " << klass->PrettyClass()
-          << " failed to fully verify.";
+      // It is *very* problematic if there are verification errors in the boot classpath. For example,
+      // we rely on things working OK without verification when the decryption dialog is brought up.
+      // So abort in a debug build if we find this violated.
+      if (kIsDebugBuild) {
+        // TODO(narayan): Remove this special case for signature polymorphic
+        // invokes once verifier support is fully implemented.
+        if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage() &&
+            !android::base::StartsWith(descriptor, "Ljava/lang/invoke/")) {
+          DCHECK(klass->IsVerified()) << "Boot classpath class " << klass->PrettyClass()
+              << " failed to fully verify: state= " << klass->GetStatus();
+        }
+      }
     } else {
       // Make the skip a soft failure, essentially being considered as verify at runtime.
       failure_kind = verifier::MethodVerifier::kSoftFailure;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 660afec..be65f89 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -601,11 +601,21 @@
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
 };
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathARM below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
 class ReadBarrierMarkSlowPathARM : public SlowPathCodeARM {
  public:
-  ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location obj)
-      : SlowPathCodeARM(instruction), obj_(obj) {
+  ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location ref)
+      : SlowPathCodeARM(instruction), ref_(ref) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -613,9 +623,9 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Register reg = obj_.AsRegister<Register>();
+    Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -634,40 +644,213 @@
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
-    DCHECK_NE(reg, SP);
-    DCHECK_NE(reg, LR);
-    DCHECK_NE(reg, PC);
+    DCHECK_NE(ref_reg, SP);
+    DCHECK_NE(ref_reg, LR);
+    DCHECK_NE(ref_reg, PC);
     // IP is used internally by the ReadBarrierMarkRegX entry point
     // as a temporary, it cannot be the entry point's input/output.
-    DCHECK_NE(reg, IP);
-    DCHECK(0 <= reg && reg < kNumberOfCoreRegisters) << reg;
+    DCHECK_NE(ref_reg, IP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCoreRegisters) << ref_reg;
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in R0):
     //
-    //   R0 <- obj
+    //   R0 <- ref
     //   R0 <- ReadBarrierMark(R0)
-    //   obj <- R0
+    //   ref <- R0
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(reg);
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ref_reg);
     // This runtime call does not require a stack map.
     arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
     __ b(GetExitLabel());
   }
 
  private:
-  const Location obj_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
 
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM);
 };
 
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathARM above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathARM : public SlowPathCodeARM {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathARM(HInstruction* instruction,
+                                           Location ref,
+                                           Register obj,
+                                           Location field_offset,
+                                           Register temp1,
+                                           Register temp2)
+      : SlowPathCodeARM(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_offset_(field_offset),
+        temp1_(temp1),
+        temp2_(temp2) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathARM"; }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    Register ref_reg = ref_.AsRegister<Register>();
+    DCHECK(locations->CanCall());
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+    DCHECK(field_offset_.IsRegisterPair()) << field_offset_;
+
+    __ Bind(GetEntryLabel());
+
+    // Save the old reference.
+    // Note that we cannot use IP to save the old reference, as IP is
+    // used internally by the ReadBarrierMarkRegX entry point, and we
+    // need the old reference after the call to that entry point.
+    DCHECK_NE(temp1_, IP);
+    __ Mov(temp1_, ref_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+    DCHECK_NE(ref_reg, SP);
+    DCHECK_NE(ref_reg, LR);
+    DCHECK_NE(ref_reg, PC);
+    // IP is used internally by the ReadBarrierMarkRegX entry point
+    // as a temporary, it cannot be the entry point's input/output.
+    DCHECK_NE(ref_reg, IP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCoreRegisters) << ref_reg;
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in R0):
+    //
+    //   R0 <- ref
+    //   R0 <- ReadBarrierMark(R0)
+    //   ref <- R0
+    //
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    int32_t entry_point_offset =
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ref_reg);
+    // This runtime call does not require a stack map.
+    arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*(obj_ + field_offset_)`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LDREX/SUBS/ITNE sequence of instructions in the compare-and-set
+    // (CAS) operation below would abort the CAS, leaving the field
+    // as-is.
+    Label done;
+    __ cmp(temp1_, ShifterOperand(ref_reg));
+    __ b(&done, EQ);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achieved
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+
+    // Convenience aliases.
+    Register base = obj_;
+    // The UnsafeCASObject intrinsic uses a register pair as field
+    // offset ("long offset"), of which only the low part contains
+    // data.
+    Register offset = field_offset_.AsRegisterPairLow<Register>();
+    Register expected = temp1_;
+    Register value = ref_reg;
+    Register tmp_ptr = IP;       // Pointer to actual memory.
+    Register tmp = temp2_;       // Value in memory.
+
+    __ add(tmp_ptr, base, ShifterOperand(offset));
+
+    if (kPoisonHeapReferences) {
+      __ PoisonHeapReference(expected);
+      if (value == expected) {
+        // Do not poison `value`, as it is the same register as
+        // `expected`, which has just been poisoned.
+      } else {
+        __ PoisonHeapReference(value);
+      }
+    }
+
+    // do {
+    //   tmp = [r_ptr] - expected;
+    // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
+
+    Label loop_head, exit_loop;
+    __ Bind(&loop_head);
+
+    __ ldrex(tmp, tmp_ptr);
+
+    __ subs(tmp, tmp, ShifterOperand(expected));
+
+    __ it(NE);
+    __ clrex(NE);
+
+    __ b(&exit_loop, NE);
+
+    __ strex(tmp, value, tmp_ptr);
+    __ cmp(tmp, ShifterOperand(1));
+    __ b(&loop_head, EQ);
+
+    __ Bind(&exit_loop);
+
+    if (kPoisonHeapReferences) {
+      __ UnpoisonHeapReference(expected);
+      if (value == expected) {
+        // Do not unpoison `value`, as it is the same register as
+        // `expected`, which has just been unpoisoned.
+      } else {
+        __ UnpoisonHeapReference(value);
+      }
+    }
+
+    __ Bind(&done);
+    __ b(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const Register obj_;
+  // The location of the offset of the marked reference field within `obj_`.
+  Location field_offset_;
+
+  const Register temp1_;
+  const Register temp2_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathARM);
+};
+
 // Slow path generating a read barrier for a heap reference.
 class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCodeARM {
  public:
@@ -6644,7 +6827,9 @@
                                                                  Location index,
                                                                  ScaleFactor scale_factor,
                                                                  Location temp,
-                                                                 bool needs_null_check) {
+                                                                 bool needs_null_check,
+                                                                 bool always_update_field,
+                                                                 Register* temp2) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
@@ -6689,8 +6874,9 @@
 
   // The actual reference load.
   if (index.IsValid()) {
-    // Load types involving an "index": ArrayGet and
-    // UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
+    // Load types involving an "index": ArrayGet,
+    // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+    // intrinsics.
     // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
     if (index.IsConstant()) {
       size_t computed_offset =
@@ -6698,9 +6884,9 @@
       __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
     } else {
       // Handle the special case of the
-      // UnsafeGetObject/UnsafeGetObjectVolatile intrinsics, which use
-      // a register pair as index ("long offset"), of which only the low
-      // part contains data.
+      // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+      // intrinsics, which use a register pair as index ("long
+      // offset"), of which only the low part contains data.
       Register index_reg = index.IsRegisterPair()
           ? index.AsRegisterPairLow<Register>()
           : index.AsRegister<Register>();
@@ -6716,8 +6902,21 @@
   __ MaybeUnpoisonHeapReference(ref_reg);
 
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCodeARM* slow_path =
-      new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref);
+  SlowPathCodeARM* slow_path;
+  if (always_update_field) {
+    DCHECK(temp2 != nullptr);
+    // ReadBarrierMarkAndUpdateFieldSlowPathARM only supports address
+    // of the form `obj + field_offset`, where `obj` is a register and
+    // `field_offset` is a register pair (of which only the lower half
+    // is used). Thus `offset` and `scale_factor` above are expected
+    // to be null in this code path.
+    DCHECK_EQ(offset, 0u);
+    DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathARM(
+        instruction, ref, obj, /* field_offset */ index, temp_reg, *temp2);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref);
+  }
   AddSlowPath(slow_path);
 
   // if (rb_state == ReadBarrier::gray_ptr_)
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 729cbe1..3d46aab 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -508,6 +508,18 @@
                                              bool needs_null_check);
   // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
+
+  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+  //
+  // Load the object reference located at the address
+  // `obj + offset + (index << scale_factor)`, held by object `obj`, into
+  // `ref`, and mark it if needed.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).  This operation
+  // requires an extra temporary register, which must be provided as a
+  // non-null pointer (`temp2`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  Register obj,
@@ -515,7 +527,9 @@
                                                  Location index,
                                                  ScaleFactor scale_factor,
                                                  Location temp,
-                                                 bool needs_null_check);
+                                                 bool needs_null_check,
+                                                 bool always_update_field = false,
+                                                 Register* temp2 = nullptr);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 78c164b..b537509 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -589,11 +589,21 @@
   }
 }
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathARM64 below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
 class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  ReadBarrierMarkSlowPathARM64(HInstruction* instruction, Location obj)
-      : SlowPathCodeARM64(instruction), obj_(obj) {
+  ReadBarrierMarkSlowPathARM64(HInstruction* instruction, Location ref)
+      : SlowPathCodeARM64(instruction), ref_(ref) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -602,7 +612,8 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(obj_.reg()));
+    DCHECK(ref_.IsRegister()) << ref_;
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -621,40 +632,207 @@
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
-    DCHECK_NE(obj_.reg(), LR);
-    DCHECK_NE(obj_.reg(), WSP);
-    DCHECK_NE(obj_.reg(), WZR);
+    DCHECK_NE(ref_.reg(), LR);
+    DCHECK_NE(ref_.reg(), WSP);
+    DCHECK_NE(ref_.reg(), WZR);
     // IP0 is used internally by the ReadBarrierMarkRegX entry point
     // as a temporary, it cannot be the entry point's input/output.
-    DCHECK_NE(obj_.reg(), IP0);
-    DCHECK(0 <= obj_.reg() && obj_.reg() < kNumberOfWRegisters) << obj_.reg();
+    DCHECK_NE(ref_.reg(), IP0);
+    DCHECK(0 <= ref_.reg() && ref_.reg() < kNumberOfWRegisters) << ref_.reg();
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in W0):
     //
-    //   W0 <- obj
+    //   W0 <- ref
     //   W0 <- ReadBarrierMark(W0)
-    //   obj <- W0
+    //   ref <- W0
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(obj_.reg());
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ref_.reg());
     // This runtime call does not require a stack map.
     arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
     __ B(GetExitLabel());
   }
 
  private:
-  const Location obj_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
 
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM64);
 };
 
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathARM64 above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathARM64(HInstruction* instruction,
+                                             Location ref,
+                                             Register obj,
+                                             Location field_offset,
+                                             Register temp)
+      : SlowPathCodeARM64(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_offset_(field_offset),
+        temp_(temp) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE {
+    return "ReadBarrierMarkAndUpdateFieldSlowPathARM64";
+  }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    Register ref_reg = WRegisterFrom(ref_);
+    DCHECK(locations->CanCall());
+    DCHECK(ref_.IsRegister()) << ref_;
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+    DCHECK(field_offset_.IsRegister()) << field_offset_;
+
+    __ Bind(GetEntryLabel());
+
+    // Save the old reference.
+    // Note that we cannot use IP to save the old reference, as IP is
+    // used internally by the ReadBarrierMarkRegX entry point, and we
+    // need the old reference after the call to that entry point.
+    DCHECK_NE(LocationFrom(temp_).reg(), IP0);
+    __ Mov(temp_.W(), ref_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+    DCHECK_NE(ref_.reg(), LR);
+    DCHECK_NE(ref_.reg(), WSP);
+    DCHECK_NE(ref_.reg(), WZR);
+    // IP0 is used internally by the ReadBarrierMarkRegX entry point
+    // as a temporary, it cannot be the entry point's input/output.
+    DCHECK_NE(ref_.reg(), IP0);
+    DCHECK(0 <= ref_.reg() && ref_.reg() < kNumberOfWRegisters) << ref_.reg();
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in W0):
+    //
+    //   W0 <- ref
+    //   W0 <- ReadBarrierMark(W0)
+    //   ref <- W0
+    //
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    int32_t entry_point_offset =
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ref_.reg());
+    // This runtime call does not require a stack map.
+    arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*(obj_ + field_offset_)`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LDXR/CMP/BNE sequence of instructions in the compare-and-set
+    // (CAS) operation below would abort the CAS, leaving the field
+    // as-is.
+    vixl::aarch64::Label done;
+    __ Cmp(temp_.W(), ref_reg);
+    __ B(eq, &done);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achieved
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+
+    MacroAssembler* masm = arm64_codegen->GetVIXLAssembler();
+    UseScratchRegisterScope temps(masm);
+
+    // Convenience aliases.
+    Register base = obj_.W();
+    Register offset = XRegisterFrom(field_offset_);
+    Register expected = temp_.W();
+    Register value = ref_reg;
+    Register tmp_ptr = temps.AcquireX();    // Pointer to actual memory.
+    Register tmp_value = temps.AcquireW();  // Value in memory.
+
+    __ Add(tmp_ptr, base.X(), Operand(offset));
+
+    if (kPoisonHeapReferences) {
+      arm64_codegen->GetAssembler()->PoisonHeapReference(expected);
+      if (value.Is(expected)) {
+        // Do not poison `value`, as it is the same register as
+        // `expected`, which has just been poisoned.
+      } else {
+        arm64_codegen->GetAssembler()->PoisonHeapReference(value);
+      }
+    }
+
+    // do {
+    //   tmp_value = [tmp_ptr] - expected;
+    // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
+
+    vixl::aarch64::Label loop_head, comparison_failed, exit_loop;
+    __ Bind(&loop_head);
+    __ Ldxr(tmp_value, MemOperand(tmp_ptr));
+    __ Cmp(tmp_value, expected);
+    __ B(&comparison_failed, ne);
+    __ Stxr(tmp_value, value, MemOperand(tmp_ptr));
+    __ Cbnz(tmp_value, &loop_head);
+    __ B(&exit_loop);
+    __ Bind(&comparison_failed);
+    __ Clrex();
+    __ Bind(&exit_loop);
+
+    if (kPoisonHeapReferences) {
+      arm64_codegen->GetAssembler()->UnpoisonHeapReference(expected);
+      if (value.Is(expected)) {
+        // Do not unpoison `value`, as it is the same register as
+        // `expected`, which has just been unpoisoned.
+      } else {
+        arm64_codegen->GetAssembler()->UnpoisonHeapReference(value);
+      }
+    }
+
+    __ Bind(&done);
+    __ B(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const Register obj_;
+  // The location of the offset of the marked reference field within `obj_`.
+  Location field_offset_;
+
+  const Register temp_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathARM64);
+};
+
 // Slow path generating a read barrier for a heap reference.
 class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
  public:
@@ -768,7 +946,7 @@
         DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
                (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
             << instruction_->AsInvoke()->GetIntrinsic();
-        DCHECK_EQ(offset_, 0U);
+        DCHECK_EQ(offset_, 0u);
         DCHECK(index_.IsRegister());
       }
     }
@@ -5175,7 +5353,7 @@
 
   // /* HeapReference<Object> */ ref = *(obj + offset)
   Location no_index = Location::NoLocation();
-  size_t no_scale_factor = 0U;
+  size_t no_scale_factor = 0u;
   GenerateReferenceLoadWithBakerReadBarrier(instruction,
                                             ref,
                                             obj,
@@ -5226,7 +5404,8 @@
                                                                    size_t scale_factor,
                                                                    Register temp,
                                                                    bool needs_null_check,
-                                                                   bool use_load_acquire) {
+                                                                   bool use_load_acquire,
+                                                                   bool always_update_field) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
   // If we are emitting an array load, we should not be using a
@@ -5279,7 +5458,9 @@
 
   // The actual reference load.
   if (index.IsValid()) {
-    // Load types involving an "index".
+    // Load types involving an "index": ArrayGet,
+    // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+    // intrinsics.
     if (use_load_acquire) {
       // UnsafeGetObjectVolatile intrinsic case.
       // Register `index` is not an index in an object array, but an
@@ -5288,9 +5469,9 @@
       DCHECK(instruction->GetLocations()->Intrinsified());
       DCHECK(instruction->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)
           << instruction->AsInvoke()->GetIntrinsic();
-      DCHECK_EQ(offset, 0U);
-      DCHECK_EQ(scale_factor, 0U);
-      DCHECK_EQ(needs_null_check, 0U);
+      DCHECK_EQ(offset, 0u);
+      DCHECK_EQ(scale_factor, 0u);
+      DCHECK_EQ(needs_null_check, 0u);
       // /* HeapReference<Object> */ ref = *(obj + index)
       MemOperand field = HeapOperand(obj, XRegisterFrom(index));
       LoadAcquire(instruction, ref_reg, field, /* needs_null_check */ false);
@@ -5301,10 +5482,10 @@
         uint32_t computed_offset = offset + (Int64ConstantFrom(index) << scale_factor);
         Load(type, ref_reg, HeapOperand(obj, computed_offset));
       } else {
-        Register temp2 = temps.AcquireW();
-        __ Add(temp2, obj, offset);
-        Load(type, ref_reg, HeapOperand(temp2, XRegisterFrom(index), LSL, scale_factor));
-        temps.Release(temp2);
+        Register temp3 = temps.AcquireW();
+        __ Add(temp3, obj, offset);
+        Load(type, ref_reg, HeapOperand(temp3, XRegisterFrom(index), LSL, scale_factor));
+        temps.Release(temp3);
       }
     }
   } else {
@@ -5321,8 +5502,19 @@
   GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
 
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCodeARM64* slow_path =
-      new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, ref);
+  SlowPathCodeARM64* slow_path;
+  if (always_update_field) {
+    // ReadBarrierMarkAndUpdateFieldSlowPathARM64 only supports
+    // address of the form `obj + field_offset`, where `obj` is a
+    // register and `field_offset` is a register. Thus `offset` and
+    // `scale_factor` above are expected to be null in this code path.
+    DCHECK_EQ(offset, 0u);
+    DCHECK_EQ(scale_factor, 0u);  /* "times 1" */
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathARM64(
+        instruction, ref, obj, /* field_offset */ index, temp);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, ref);
+  }
   AddSlowPath(slow_path);
 
   // if (rb_state == ReadBarrier::gray_ptr_)
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7de84be..7f54b4b 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -594,6 +594,13 @@
                                              bool needs_null_check);
   // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
+  //
+  // Load the object reference located at the address
+  // `obj + offset + (index << scale_factor)`, held by object `obj`, into
+  // `ref`, and mark it if needed.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  vixl::aarch64::Register obj,
@@ -602,7 +609,8 @@
                                                  size_t scale_factor,
                                                  vixl::aarch64::Register temp,
                                                  bool needs_null_check,
-                                                 bool use_load_acquire);
+                                                 bool use_load_acquire,
+                                                 bool always_update_field = false);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d930016..efd33c7 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -426,11 +426,25 @@
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
 };
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathX86 below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
 class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
  public:
-  ReadBarrierMarkSlowPathX86(HInstruction* instruction, Location obj, bool unpoison)
-      : SlowPathCode(instruction), obj_(obj), unpoison_(unpoison) {
+  ReadBarrierMarkSlowPathX86(HInstruction* instruction,
+                             Location ref,
+                             bool unpoison_ref_before_marking)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -438,9 +452,9 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Register reg = obj_.AsRegister<Register>();
+    Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -455,44 +469,211 @@
         << instruction_->DebugName();
 
     __ Bind(GetEntryLabel());
-    if (unpoison_) {
+    if (unpoison_ref_before_marking_) {
       // Object* ref = ref_addr->AsMirrorPtr()
-      __ MaybeUnpoisonHeapReference(reg);
+      __ MaybeUnpoisonHeapReference(ref_reg);
     }
     // No need to save live registers; it's taken care of by the
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
-    DCHECK_NE(reg, ESP);
-    DCHECK(0 <= reg && reg < kNumberOfCpuRegisters) << reg;
+    DCHECK_NE(ref_reg, ESP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in EAX):
     //
-    //   EAX <- obj
+    //   EAX <- ref
     //   EAX <- ReadBarrierMark(EAX)
-    //   obj <- EAX
+    //   ref <- EAX
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(reg);
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(ref_reg);
     // This runtime call does not require a stack map.
     x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
     __ jmp(GetExitLabel());
   }
 
  private:
-  const Location obj_;
-  const bool unpoison_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
 
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86);
 };
 
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathX86 above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathX86(HInstruction* instruction,
+                                           Location ref,
+                                           Register obj,
+                                           const Address& field_addr,
+                                           bool unpoison_ref_before_marking,
+                                           Register temp)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_addr_(field_addr),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking),
+        temp_(temp) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    Register ref_reg = ref_.AsRegister<Register>();
+    DCHECK(locations->CanCall());
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+
+    __ Bind(GetEntryLabel());
+    if (unpoison_ref_before_marking_) {
+      // Object* ref = ref_addr->AsMirrorPtr()
+      __ MaybeUnpoisonHeapReference(ref_reg);
+    }
+
+    // Save the old (unpoisoned) reference.
+    __ movl(temp_, ref_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+    DCHECK_NE(ref_reg, ESP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in EAX):
+    //
+    //   EAX <- ref
+    //   EAX <- ReadBarrierMark(EAX)
+    //   ref <- EAX
+    //
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    int32_t entry_point_offset =
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(ref_reg);
+    // This runtime call does not require a stack map.
+    x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*field_addr`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LOCK CMPXCHGL instruction in the compare-and-set (CAS)
+    // operation below would abort the CAS, leaving the field as-is.
+    NearLabel done;
+    __ cmpl(temp_, ref_reg);
+    __ j(kEqual, &done);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achieved
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+    // This operation is implemented with a 32-bit LOCK CMPXLCHG
+    // instruction, which requires the expected value (the old
+    // reference) to be in EAX.  Save EAX beforehand, and move the
+    // expected value (stored in `temp_`) into EAX.
+    __ pushl(EAX);
+    __ movl(EAX, temp_);
+
+    // Convenience aliases.
+    Register base = obj_;
+    Register expected = EAX;
+    Register value = ref_reg;
+
+    bool base_equals_value = (base == value);
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value` to a temporary register.  This way, poisoning
+        // `value` won't invalidate `base`.
+        value = temp_;
+        __ movl(value, base);
+      }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (EAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value, expected);
+      DCHECK_NE(base, expected);
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(value);
+    }
+
+    __ LockCmpxchgl(field_addr_, value);
+
+    // If heap poisoning is enabled, we need to unpoison the values
+    // that were poisoned earlier.
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value` has been moved to a temporary register, no need
+        // to unpoison it.
+      } else {
+        __ UnpoisonHeapReference(value);
+      }
+      // No need to unpoison `expected` (EAX), as it is be overwritten below.
+    }
+
+    // Restore EAX.
+    __ popl(EAX);
+
+    __ Bind(&done);
+    __ jmp(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const Register obj_;
+  // The address of the marked reference field.  The base of this address must be `obj_`.
+  const Address field_addr_;
+
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
+
+  const Register temp_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathX86);
+};
+
 // Slow path generating a read barrier for a heap reference.
 class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
  public:
@@ -6831,7 +7012,7 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
-          instruction, root, /* unpoison */ false);
+          instruction, root, /* unpoison_ref_before_marking */ false);
       codegen_->AddSlowPath(slow_path);
 
       __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>().Int32Value()),
@@ -6896,7 +7077,9 @@
                                                                  Location ref,
                                                                  Register obj,
                                                                  const Address& src,
-                                                                 bool needs_null_check) {
+                                                                 bool needs_null_check,
+                                                                 bool always_update_field,
+                                                                 Register* temp) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
@@ -6953,8 +7136,15 @@
 
   // Note: Reference unpoisoning modifies the flags, so we need to delay it after the branch.
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
-      instruction, ref, /* unpoison */ true);
+  SlowPathCode* slow_path;
+  if (always_update_field) {
+    DCHECK(temp != nullptr);
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+        instruction, ref, /* unpoison_ref_before_marking */ true);
+  }
   AddSlowPath(slow_path);
 
   // We have done the "if" of the gray bit check above, now branch based on the flags.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 9e5bc83..1b51999 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -499,13 +499,24 @@
                                              uint32_t data_offset,
                                              Location index,
                                              bool needs_null_check);
-  // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
-  // and GenerateArrayLoadWithBakerReadBarrier.
+  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+  //
+  // Load the object reference located at address `src`, held by
+  // object `obj`, into `ref`, and mark it if needed.  The base of
+  // address `src` must be `obj`.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).  This operation
+  // requires a temporary register, which must be provided as a
+  // non-null pointer (`temp`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  Register obj,
                                                  const Address& src,
-                                                 bool needs_null_check);
+                                                 bool needs_null_check,
+                                                 bool always_update_field = false,
+                                                 Register* temp = nullptr);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1f0d648..fcabeea 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -445,11 +445,25 @@
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
 };
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathX86_64 below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
 class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
  public:
-  ReadBarrierMarkSlowPathX86_64(HInstruction* instruction, Location obj, bool unpoison)
-      : SlowPathCode(instruction), obj_(obj), unpoison_(unpoison) {
+  ReadBarrierMarkSlowPathX86_64(HInstruction* instruction,
+                                Location ref,
+                                bool unpoison_ref_before_marking)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -457,10 +471,10 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    CpuRegister cpu_reg = obj_.AsRegister<CpuRegister>();
-    Register reg = cpu_reg.AsRegister();
+    CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
+    Register ref_reg = ref_cpu_reg.AsRegister();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -475,44 +489,218 @@
         << instruction_->DebugName();
 
     __ Bind(GetEntryLabel());
-    if (unpoison_) {
+    if (unpoison_ref_before_marking_) {
       // Object* ref = ref_addr->AsMirrorPtr()
-      __ MaybeUnpoisonHeapReference(cpu_reg);
+      __ MaybeUnpoisonHeapReference(ref_cpu_reg);
     }
     // No need to save live registers; it's taken care of by the
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
-    DCHECK_NE(reg, RSP);
-    DCHECK(0 <= reg && reg < kNumberOfCpuRegisters) << reg;
+    DCHECK_NE(ref_reg, RSP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in R0):
     //
-    //   RDI <- obj
+    //   RDI <- ref
     //   RAX <- ReadBarrierMark(RDI)
-    //   obj <- RAX
+    //   ref <- RAX
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(reg);
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(ref_reg);
     // This runtime call does not require a stack map.
     x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
     __ jmp(GetExitLabel());
   }
 
  private:
-  const Location obj_;
-  const bool unpoison_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
 
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86_64);
 };
 
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathX86_64 above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathX86_64(HInstruction* instruction,
+                                              Location ref,
+                                              CpuRegister obj,
+                                              const Address& field_addr,
+                                              bool unpoison_ref_before_marking,
+                                              CpuRegister temp1,
+                                              CpuRegister temp2)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_addr_(field_addr),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking),
+        temp1_(temp1),
+        temp2_(temp2) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE {
+    return "ReadBarrierMarkAndUpdateFieldSlowPathX86_64";
+  }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
+    Register ref_reg = ref_cpu_reg.AsRegister();
+    DCHECK(locations->CanCall());
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+
+    __ Bind(GetEntryLabel());
+    if (unpoison_ref_before_marking_) {
+      // Object* ref = ref_addr->AsMirrorPtr()
+      __ MaybeUnpoisonHeapReference(ref_cpu_reg);
+    }
+
+    // Save the old (unpoisoned) reference.
+    __ movl(temp1_, ref_cpu_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+    DCHECK_NE(ref_reg, RSP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in R0):
+    //
+    //   RDI <- ref
+    //   RAX <- ReadBarrierMark(RDI)
+    //   ref <- RAX
+    //
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    int32_t entry_point_offset =
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(ref_reg);
+    // This runtime call does not require a stack map.
+    x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*field_addr`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LOCK CMPXCHGL instruction in the compare-and-set (CAS)
+    // operation below would abort the CAS, leaving the field as-is.
+    NearLabel done;
+    __ cmpl(temp1_, ref_cpu_reg);
+    __ j(kEqual, &done);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achived
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+    // This operation is implemented with a 32-bit LOCK CMPXLCHG
+    // instruction, which requires the expected value (the old
+    // reference) to be in EAX.  Save RAX beforehand, and move the
+    // expected value (stored in `temp1_`) into EAX.
+    __ movq(temp2_, CpuRegister(RAX));
+    __ movl(CpuRegister(RAX), temp1_);
+
+    // Convenience aliases.
+    CpuRegister base = obj_;
+    CpuRegister expected = CpuRegister(RAX);
+    CpuRegister value = ref_cpu_reg;
+
+    bool base_equals_value = (base.AsRegister() == value.AsRegister());
+    Register value_reg = ref_reg;
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value_reg` to a temporary register.  This way, poisoning
+        // `value_reg` won't invalidate `base`.
+        value_reg = temp1_.AsRegister();
+        __ movl(CpuRegister(value_reg), base);
+      }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (RAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value_reg, expected.AsRegister());
+      DCHECK_NE(base.AsRegister(), expected.AsRegister());
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(CpuRegister(value_reg));
+    }
+
+    __ LockCmpxchgl(field_addr_, CpuRegister(value_reg));
+
+    // If heap poisoning is enabled, we need to unpoison the values
+    // that were poisoned earlier.
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value_reg` has been moved to a temporary register, no need
+        // to unpoison it.
+      } else {
+        __ UnpoisonHeapReference(CpuRegister(value_reg));
+      }
+      // No need to unpoison `expected` (RAX), as it is be overwritten below.
+    }
+
+    // Restore RAX.
+    __ movq(CpuRegister(RAX), temp2_);
+
+    __ Bind(&done);
+    __ jmp(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const CpuRegister obj_;
+  // The address of the marked reference field.  The base of this address must be `obj_`.
+  const Address field_addr_;
+
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
+
+  const CpuRegister temp1_;
+  const CpuRegister temp2_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathX86_64);
+};
+
 // Slow path generating a read barrier for a heap reference.
 class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
  public:
@@ -4122,7 +4310,7 @@
       // /* HeapReference<Object> */ out = *(base + offset)
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // Note that a potential implicit null check is handled in this
-        // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
+        // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
             instruction, out, base, offset, /* needs_null_check */ true);
         if (is_volatile) {
@@ -4569,7 +4757,7 @@
       //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // Note that a potential implicit null check is handled in this
-        // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
+        // CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
         codegen_->GenerateArrayLoadWithBakerReadBarrier(
             instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
       } else {
@@ -6264,7 +6452,7 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
-          instruction, root, /* unpoison */ false);
+          instruction, root, /* unpoison_ref_before_marking */ false);
       codegen_->AddSlowPath(slow_path);
 
       __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>().Int32Value(),
@@ -6330,7 +6518,10 @@
                                                                     Location ref,
                                                                     CpuRegister obj,
                                                                     const Address& src,
-                                                                    bool needs_null_check) {
+                                                                    bool needs_null_check,
+                                                                    bool always_update_field,
+                                                                    CpuRegister* temp1,
+                                                                    CpuRegister* temp2) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
@@ -6387,8 +6578,16 @@
 
   // Note: Reference unpoisoning modifies the flags, so we need to delay it after the branch.
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
-      instruction, ref, /* unpoison */ true);
+  SlowPathCode* slow_path;
+  if (always_update_field) {
+    DCHECK(temp1 != nullptr);
+    DCHECK(temp2 != nullptr);
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+        instruction, ref, /* unpoison_ref_before_marking */ true);
+  }
   AddSlowPath(slow_path);
 
   // We have done the "if" of the gray bit check above, now branch based on the flags.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index eb082a0..8b19dad 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -434,13 +434,25 @@
                                              uint32_t data_offset,
                                              Location index,
                                              bool needs_null_check);
-  // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
-  // and GenerateArrayLoadWithBakerReadBarrier.
+  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+  //
+  // Load the object reference located at address `src`, held by
+  // object `obj`, into `ref`, and mark it if needed.  The base of
+  // address `src` must be `obj`.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).  This operation
+  // requires two temporary registers, which must be provided as
+  // non-null pointers (`temp1` and `temp2`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  CpuRegister obj,
                                                  const Address& src,
-                                                 bool needs_null_check);
+                                                 bool needs_null_check,
+                                                 bool always_update_field = false,
+                                                 CpuRegister* temp1 = nullptr,
+                                                 CpuRegister* temp2 = nullptr);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 96a6ecb..8790c1e 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -652,9 +652,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -663,7 +663,7 @@
   locations->SetInAt(1, Location::RequiresRegister());
   locations->SetInAt(2, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(),
-                    can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
   if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
     // We need a temporary register for the read barrier marking slow
     // path in InstructionCodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier.
@@ -891,8 +891,13 @@
 static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena,
                                                 HInvoke* invoke,
                                                 Primitive::Type type) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -901,36 +906,65 @@
   locations->SetInAt(4, Location::RequiresRegister());
 
   // If heap poisoning is enabled, we don't want the unpoisoning
-  // operations to potentially clobber the output.
-  Location::OutputOverlap overlaps = (kPoisonHeapReferences && type == Primitive::kPrimNot)
+  // operations to potentially clobber the output. Likewise when
+  // emitting a (Baker) read barrier, which may call.
+  Location::OutputOverlap overlaps =
+      ((kPoisonHeapReferences && type == Primitive::kPrimNot) || can_call)
       ? Location::kOutputOverlap
       : Location::kNoOutputOverlap;
   locations->SetOut(Location::RequiresRegister(), overlaps);
 
+  // Temporary registers used in CAS. In the object case
+  // (UnsafeCASObject intrinsic), these are also used for
+  // card-marking, and possibly for (Baker) read barrier.
   locations->AddTemp(Location::RequiresRegister());  // Pointer.
   locations->AddTemp(Location::RequiresRegister());  // Temp 1.
 }
 
-static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM* codegen) {
+static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM* codegen) {
   DCHECK_NE(type, Primitive::kPrimLong);
 
   ArmAssembler* assembler = codegen->GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
 
-  Register out = locations->Out().AsRegister<Register>();              // Boolean result.
+  Location out_loc = locations->Out();
+  Register out = out_loc.AsRegister<Register>();                  // Boolean result.
 
-  Register base = locations->InAt(1).AsRegister<Register>();           // Object pointer.
-  Register offset = locations->InAt(2).AsRegisterPairLow<Register>();  // Offset (discard high 4B).
-  Register expected_lo = locations->InAt(3).AsRegister<Register>();    // Expected.
-  Register value_lo = locations->InAt(4).AsRegister<Register>();       // Value.
+  Register base = locations->InAt(1).AsRegister<Register>();      // Object pointer.
+  Location offset_loc = locations->InAt(2);
+  Register offset = offset_loc.AsRegisterPairLow<Register>();     // Offset (discard high 4B).
+  Register expected = locations->InAt(3).AsRegister<Register>();  // Expected.
+  Register value = locations->InAt(4).AsRegister<Register>();     // Value.
 
-  Register tmp_ptr = locations->GetTemp(0).AsRegister<Register>();     // Pointer to actual memory.
-  Register tmp_lo = locations->GetTemp(1).AsRegister<Register>();      // Value in memory.
+  Location tmp_ptr_loc = locations->GetTemp(0);
+  Register tmp_ptr = tmp_ptr_loc.AsRegister<Register>();          // Pointer to actual memory.
+  Register tmp = locations->GetTemp(1).AsRegister<Register>();    // Value in memory.
 
   if (type == Primitive::kPrimNot) {
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
     // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
     // object and scan the receiver at the next GC for nothing.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(tmp_ptr, tmp_lo, base, value_lo, value_can_be_null);
+    codegen->MarkGCCard(tmp_ptr, tmp, base, value, value_can_be_null);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          out_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          /* offset */ 0u,
+          /* index */ offset_loc,
+          ScaleFactor::TIMES_1,
+          tmp_ptr_loc,
+          /* needs_null_check */ false,
+          /* always_update_field */ true,
+          &tmp);
+    }
   }
 
   // Prevent reordering with prior memory operations.
@@ -942,12 +976,12 @@
   __ add(tmp_ptr, base, ShifterOperand(offset));
 
   if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    codegen->GetAssembler()->PoisonHeapReference(expected_lo);
-    if (value_lo == expected_lo) {
-      // Do not poison `value_lo`, as it is the same register as
-      // `expected_lo`, which has just been poisoned.
+    __ PoisonHeapReference(expected);
+    if (value == expected) {
+      // Do not poison `value`, as it is the same register as
+      // `expected`, which has just been poisoned.
     } else {
-      codegen->GetAssembler()->PoisonHeapReference(value_lo);
+      __ PoisonHeapReference(value);
     }
   }
 
@@ -959,37 +993,29 @@
   Label loop_head;
   __ Bind(&loop_head);
 
-  // TODO: When `type == Primitive::kPrimNot`, add a read barrier for
-  // the reference stored in the object before attempting the CAS,
-  // similar to the one in the art::Unsafe_compareAndSwapObject JNI
-  // implementation.
-  //
-  // Note that this code is not (yet) used when read barriers are
-  // enabled (see IntrinsicLocationsBuilderARM::VisitUnsafeCASObject).
-  DCHECK(!(type == Primitive::kPrimNot && kEmitCompilerReadBarrier));
-  __ ldrex(tmp_lo, tmp_ptr);
+  __ ldrex(tmp, tmp_ptr);
 
-  __ subs(tmp_lo, tmp_lo, ShifterOperand(expected_lo));
+  __ subs(tmp, tmp, ShifterOperand(expected));
 
   __ it(EQ, ItState::kItT);
-  __ strex(tmp_lo, value_lo, tmp_ptr, EQ);
-  __ cmp(tmp_lo, ShifterOperand(1), EQ);
+  __ strex(tmp, value, tmp_ptr, EQ);
+  __ cmp(tmp, ShifterOperand(1), EQ);
 
   __ b(&loop_head, EQ);
 
   __ dmb(ISH);
 
-  __ rsbs(out, tmp_lo, ShifterOperand(1));
+  __ rsbs(out, tmp, ShifterOperand(1));
   __ it(CC);
   __ mov(out, ShifterOperand(0), CC);
 
   if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    codegen->GetAssembler()->UnpoisonHeapReference(expected_lo);
-    if (value_lo == expected_lo) {
-      // Do not unpoison `value_lo`, as it is the same register as
-      // `expected_lo`, which has just been unpoisoned.
+    __ UnpoisonHeapReference(expected);
+    if (value == expected) {
+      // Do not unpoison `value`, as it is the same register as
+      // `expected`, which has just been unpoisoned.
     } else {
-      codegen->GetAssembler()->UnpoisonHeapReference(value_lo);
+      __ UnpoisonHeapReference(value);
     }
   }
 }
@@ -998,33 +1024,23 @@
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimInt);
 }
 void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimNot);
 }
 void IntrinsicCodeGeneratorARM::VisitUnsafeCASInt(HInvoke* invoke) {
-  GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+  GenCas(invoke, Primitive::kPrimInt, codegen_);
 }
 void IntrinsicCodeGeneratorARM::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
-  GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+  GenCas(invoke, Primitive::kPrimNot, codegen_);
 }
 
 void IntrinsicLocationsBuilderARM::VisitStringCompareTo(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index e2c1802..db1c022 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -863,9 +863,9 @@
     codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
                                                        trg_loc,
                                                        base,
-                                                       /* offset */ 0U,
+                                                       /* offset */ 0u,
                                                        /* index */ offset_loc,
-                                                       /* scale_factor */ 0U,
+                                                       /* scale_factor */ 0u,
                                                        temp,
                                                        /* needs_null_check */ false,
                                                        is_volatile);
@@ -880,7 +880,7 @@
 
     if (type == Primitive::kPrimNot) {
       DCHECK(trg.IsW());
-      codegen->MaybeGenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc);
+      codegen->MaybeGenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0u, offset_loc);
     }
   }
 }
@@ -890,9 +890,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -901,7 +901,7 @@
   locations->SetInAt(1, Location::RequiresRegister());
   locations->SetInAt(2, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(),
-                    can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
 }
 
 void IntrinsicLocationsBuilderARM64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1086,8 +1086,13 @@
 static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
                                        HInvoke* invoke,
                                        Primitive::Type type) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -1096,20 +1101,29 @@
   locations->SetInAt(4, Location::RequiresRegister());
 
   // If heap poisoning is enabled, we don't want the unpoisoning
-  // operations to potentially clobber the output.
-  Location::OutputOverlap overlaps = (kPoisonHeapReferences && type == Primitive::kPrimNot)
+  // operations to potentially clobber the output. Likewise when
+  // emitting a (Baker) read barrier, which may call.
+  Location::OutputOverlap overlaps =
+      ((kPoisonHeapReferences && type == Primitive::kPrimNot) || can_call)
       ? Location::kOutputOverlap
       : Location::kNoOutputOverlap;
   locations->SetOut(Location::RequiresRegister(), overlaps);
+  if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+    // Temporary register for (Baker) read barrier.
+    locations->AddTemp(Location::RequiresRegister());
+  }
 }
 
-static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
+static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM64* codegen) {
   MacroAssembler* masm = codegen->GetVIXLAssembler();
+  LocationSummary* locations = invoke->GetLocations();
 
-  Register out = WRegisterFrom(locations->Out());                  // Boolean result.
+  Location out_loc = locations->Out();
+  Register out = WRegisterFrom(out_loc);                           // Boolean result.
 
   Register base = WRegisterFrom(locations->InAt(1));               // Object pointer.
-  Register offset = XRegisterFrom(locations->InAt(2));             // Long offset.
+  Location offset_loc = locations->InAt(2);
+  Register offset = XRegisterFrom(offset_loc);                     // Long offset.
   Register expected = RegisterFrom(locations->InAt(3), type);      // Expected.
   Register value = RegisterFrom(locations->InAt(4), type);         // Value.
 
@@ -1118,6 +1132,27 @@
     // Mark card for object assuming new value is stored.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
     codegen->MarkGCCard(base, value, value_can_be_null);
+
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      Register temp = WRegisterFrom(locations->GetTemp(0));
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          out_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          /* offset */ 0u,
+          /* index */ offset_loc,
+          /* scale_factor */ 0u,
+          temp,
+          /* needs_null_check */ false,
+          /* use_load_acquire */ false,
+          /* always_update_field */ true);
+    }
   }
 
   UseScratchRegisterScope temps(masm);
@@ -1145,14 +1180,6 @@
 
   vixl::aarch64::Label loop_head, exit_loop;
   __ Bind(&loop_head);
-  // TODO: When `type == Primitive::kPrimNot`, add a read barrier for
-  // the reference stored in the object before attempting the CAS,
-  // similar to the one in the art::Unsafe_compareAndSwapObject JNI
-  // implementation.
-  //
-  // Note that this code is not (yet) used when read barriers are
-  // enabled (see IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject).
-  DCHECK(!(type == Primitive::kPrimNot && kEmitCompilerReadBarrier));
   __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
   __ Cmp(tmp_value, expected);
   __ B(&exit_loop, ne);
@@ -1179,14 +1206,9 @@
   CreateIntIntIntIntIntToInt(arena_, invoke, Primitive::kPrimLong);
 }
 void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
@@ -1194,22 +1216,17 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitUnsafeCASInt(HInvoke* invoke) {
-  GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+  GenCas(invoke, Primitive::kPrimInt, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeCASLong(HInvoke* invoke) {
-  GenCas(invoke->GetLocations(), Primitive::kPrimLong, codegen_);
+  GenCas(invoke, Primitive::kPrimLong, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
-  GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+  GenCas(invoke, Primitive::kPrimNot, codegen_);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index f41e4d9..aae3899 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2056,9 +2056,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -2076,7 +2076,7 @@
     }
   } else {
     locations->SetOut(Location::RequiresRegister(),
-                      can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                      (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
   }
 }
 
@@ -2255,10 +2255,16 @@
   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ true, codegen_);
 }
 
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type type,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+                                       Primitive::Type type,
                                        HInvoke* invoke) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -2278,7 +2284,8 @@
   // Force a byte register for the output.
   locations->SetOut(Location::RegisterLocation(EAX));
   if (type == Primitive::kPrimNot) {
-    // Need temp registers for card-marking.
+    // Need temporary registers for card-marking, and possibly for
+    // (Baker) read barrier.
     locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     // Need a byte register for marking.
     locations->AddTemp(Location::RegisterLocation(ECX));
@@ -2294,14 +2301,9 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
@@ -2317,7 +2319,18 @@
   Location out = locations->Out();
   DCHECK_EQ(out.AsRegister<Register>(), EAX);
 
+  // The address of the field within the holding object.
+  Address field_addr(base, offset, ScaleFactor::TIMES_1, 0);
+
   if (type == Primitive::kPrimNot) {
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+    Location temp1_loc = locations->GetTemp(0);
+    Register temp1 = temp1_loc.AsRegister<Register>();
+    Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+
     Register expected = locations->InAt(3).AsRegister<Register>();
     // Ensure `expected` is in EAX (required by the CMPXCHG instruction).
     DCHECK_EQ(expected, EAX);
@@ -2325,11 +2338,20 @@
 
     // Mark card for object assuming new value is stored.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
-                        locations->GetTemp(1).AsRegister<Register>(),
-                        base,
-                        value,
-                        value_can_be_null);
+    codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          temp1_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          field_addr,
+          /* needs_null_check */ false,
+          /* always_update_field */ true,
+          &temp2);
+    }
 
     bool base_equals_value = (base == value);
     if (kPoisonHeapReferences) {
@@ -2337,7 +2359,7 @@
         // If `base` and `value` are the same register location, move
         // `value` to a temporary register.  This way, poisoning
         // `value` won't invalidate `base`.
-        value = locations->GetTemp(0).AsRegister<Register>();
+        value = temp1;
         __ movl(value, base);
       }
 
@@ -2356,19 +2378,12 @@
       __ PoisonHeapReference(value);
     }
 
-    // TODO: Add a read barrier for the reference stored in the object
-    // before attempting the CAS, similar to the one in the
-    // art::Unsafe_compareAndSwapObject JNI implementation.
-    //
-    // Note that this code is not (yet) used when read barriers are
-    // enabled (see IntrinsicLocationsBuilderX86::VisitUnsafeCASObject).
-    DCHECK(!kEmitCompilerReadBarrier);
-    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
+    __ LockCmpxchgl(field_addr, value);
 
     // LOCK CMPXCHG has full barrier semantics, and we don't need
     // scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setb(kZero, out.AsRegister<Register>());
     __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
 
@@ -2392,8 +2407,7 @@
       // Ensure the expected value is in EAX (required by the CMPXCHG
       // instruction).
       DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX);
-      __ LockCmpxchgl(Address(base, offset, TIMES_1, 0),
-                      locations->InAt(4).AsRegister<Register>());
+      __ LockCmpxchgl(field_addr, locations->InAt(4).AsRegister<Register>());
     } else if (type == Primitive::kPrimLong) {
       // Ensure the expected value is in EAX:EDX and that the new
       // value is in EBX:ECX (required by the CMPXCHG8B instruction).
@@ -2401,7 +2415,7 @@
       DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
       DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
       DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
-      __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
+      __ LockCmpxchg8b(field_addr);
     } else {
       LOG(FATAL) << "Unexpected CAS type " << type;
     }
@@ -2409,7 +2423,7 @@
     // LOCK CMPXCHG/LOCK CMPXCHG8B have full barrier semantics, and we
     // don't need scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setb(kZero, out.AsRegister<Register>());
     __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
   }
@@ -2424,14 +2438,9 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
   GenCAS(Primitive::kPrimNot, invoke, codegen_);
 }
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 4b0afca..cdef22f 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2172,9 +2172,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -2183,7 +2183,7 @@
   locations->SetInAt(1, Location::RequiresRegister());
   locations->SetInAt(2, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(),
-                    can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) {
@@ -2333,10 +2333,16 @@
   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ true, codegen_);
 }
 
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type type,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+                                       Primitive::Type type,
                                        HInvoke* invoke) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -2347,7 +2353,8 @@
 
   locations->SetOut(Location::RequiresRegister());
   if (type == Primitive::kPrimNot) {
-    // Need temp registers for card-marking.
+    // Need temporary registers for card-marking, and possibly for
+    // (Baker) read barrier.
     locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     locations->AddTemp(Location::RequiresRegister());
   }
@@ -2362,14 +2369,9 @@
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
@@ -2386,16 +2388,37 @@
   // Ensure `expected` is in RAX (required by the CMPXCHG instruction).
   DCHECK_EQ(expected.AsRegister(), RAX);
   CpuRegister value = locations->InAt(4).AsRegister<CpuRegister>();
-  CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+  Location out_loc = locations->Out();
+  CpuRegister out = out_loc.AsRegister<CpuRegister>();
 
   if (type == Primitive::kPrimNot) {
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+    CpuRegister temp1 = locations->GetTemp(0).AsRegister<CpuRegister>();
+    CpuRegister temp2 = locations->GetTemp(1).AsRegister<CpuRegister>();
+
     // Mark card for object assuming new value is stored.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
-                        locations->GetTemp(1).AsRegister<CpuRegister>(),
-                        base,
-                        value,
-                        value_can_be_null);
+    codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null);
+
+    // The address of the field within the holding object.
+    Address field_addr(base, offset, ScaleFactor::TIMES_1, 0);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          out_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          field_addr,
+          /* needs_null_check */ false,
+          /* always_update_field */ true,
+          &temp1,
+          &temp2);
+    }
 
     bool base_equals_value = (base.AsRegister() == value.AsRegister());
     Register value_reg = value.AsRegister();
@@ -2404,7 +2427,7 @@
         // If `base` and `value` are the same register location, move
         // `value_reg` to a temporary register.  This way, poisoning
         // `value_reg` won't invalidate `base`.
-        value_reg = locations->GetTemp(0).AsRegister<CpuRegister>().AsRegister();
+        value_reg = temp1.AsRegister();
         __ movl(CpuRegister(value_reg), base);
       }
 
@@ -2423,19 +2446,12 @@
       __ PoisonHeapReference(CpuRegister(value_reg));
     }
 
-    // TODO: Add a read barrier for the reference stored in the object
-    // before attempting the CAS, similar to the one in the
-    // art::Unsafe_compareAndSwapObject JNI implementation.
-    //
-    // Note that this code is not (yet) used when read barriers are
-    // enabled (see IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject).
-    DCHECK(!kEmitCompilerReadBarrier);
-    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg));
+    __ LockCmpxchgl(field_addr, CpuRegister(value_reg));
 
     // LOCK CMPXCHG has full barrier semantics, and we don't need
     // scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setcc(kZero, out);
     __ movzxb(out, out);
 
@@ -2468,7 +2484,7 @@
     // LOCK CMPXCHG has full barrier semantics, and we don't need
     // scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setcc(kZero, out);
     __ movzxb(out, out);
   }
@@ -2483,14 +2499,9 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
   GenCAS(Primitive::kPrimNot, invoke, codegen_);
 }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c23b1b1..7f15a16 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1309,12 +1309,9 @@
         const size_t num_types = dex_file->NumTypeIds();
         const size_t num_methods = dex_file->NumMethodIds();
         const size_t num_fields = dex_file->NumFieldIds();
-        size_t num_method_types = 0;
-        if (Runtime::Current()->IsMethodHandlesEnabled()) {
-          num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
-          if (dex_file->NumProtoIds() < num_method_types) {
-            num_method_types = dex_file->NumProtoIds();
-          }
+        size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+        if (dex_file->NumProtoIds() < num_method_types) {
+          num_method_types = dex_file->NumProtoIds();
         }
 
         CHECK_EQ(num_strings, dex_cache->NumStrings());
@@ -2110,21 +2107,18 @@
   //
   // If this needs to be mitigated in a production system running this code,
   // DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
-  const bool is_method_handles_enabled = Runtime::Current()->IsMethodHandlesEnabled();
   mirror::MethodTypeDexCacheType* method_types = nullptr;
   size_t num_method_types = 0;
 
-  if (is_method_handles_enabled) {
-    if (dex_file.NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
-      num_method_types = dex_file.NumProtoIds();
-    } else {
-      num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
-    }
+  if (dex_file.NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
+    num_method_types = dex_file.NumProtoIds();
+  } else {
+    num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+  }
 
-    if (num_method_types > 0) {
-      method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
-          raw_arrays + layout.MethodTypesOffset());
-    }
+  if (num_method_types > 0) {
+    method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
+        raw_arrays + layout.MethodTypesOffset());
   }
 
   DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 1d377a4..7ff845d 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -38,10 +38,10 @@
     case kGcCauseDebugger: return "Debugger";
     case kGcCauseClassLinker: return "ClassLinker";
     case kGcCauseJitCodeCache: return "JitCodeCache";
-    default:
-      LOG(FATAL) << "Unreachable";
-      UNREACHABLE();
+    case kGcCauseAddRemoveSystemWeakHolder: return "SystemWeakHolder";
   }
+  LOG(FATAL) << "Unreachable";
+  UNREACHABLE();
 }
 
 std::ostream& operator<<(std::ostream& os, const GcCause& gc_cause) {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 43bc9bd..243ed57 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -1558,6 +1558,7 @@
       }
       case Instruction::INVOKE_POLYMORPHIC: {
         PREAMBLE();
+        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
         bool success = DoInvokePolymorphic<false, do_access_check>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
@@ -1565,6 +1566,7 @@
       }
       case Instruction::INVOKE_POLYMORPHIC_RANGE: {
         PREAMBLE();
+        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
         bool success = DoInvokePolymorphic<true, do_access_check>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 75b91b1..6bf7e15 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1272,7 +1272,7 @@
     mirror::HeapReference<mirror::Object>* field_addr =
         reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
             reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
-    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /* kAlwaysUpdateField */ true>(
         obj,
         MemberOffset(offset),
         field_addr);
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index e95ca21..916f1cf 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -55,9 +55,8 @@
   EXPECT_EQ(java_lang_dex_file_->NumTypeIds(),   dex_cache->NumResolvedTypes());
   EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
   EXPECT_EQ(java_lang_dex_file_->NumFieldIds(),  dex_cache->NumResolvedFields());
-  // This should always be zero because the -Xexperimental:method-handles isn't
-  // set.
-  EXPECT_EQ(0u, dex_cache->NumResolvedMethodTypes());
+  EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
+      || java_lang_dex_file_->NumProtoIds() == dex_cache->NumResolvedMethodTypes());
 }
 
 TEST_F(DexCacheMethodHandlesTest, Open) {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index cdf4b14..644df07 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -65,7 +65,7 @@
     mirror::HeapReference<mirror::Object>* field_addr =
         reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
             reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset));
-    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /* kAlwaysUpdateField */ true>(
         obj.Ptr(),
         MemberOffset(offset),
         field_addr);
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 7292946..b91acb6 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -323,7 +323,18 @@
                                        jint* count_ptr,
                                        jobject** object_result_ptr,
                                        jlong** tag_result_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    JNIEnv* jni_env = GetJniEnv(env);
+    if (jni_env == nullptr) {
+      return ERR(INTERNAL);
+    }
+
+    art::ScopedObjectAccess soa(jni_env);
+    return gObjectTagTable.GetTaggedObjects(env,
+                                            tag_count,
+                                            tags,
+                                            count_ptr,
+                                            object_result_ptr,
+                                            tag_result_ptr);
   }
 
   static jvmtiError ForceGarbageCollection(jvmtiEnv* env) {
diff --git a/runtime/openjdkjvmti/jvmti_allocator.h b/runtime/openjdkjvmti/jvmti_allocator.h
new file mode 100644
index 0000000..1225c14
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_allocator.h
@@ -0,0 +1,170 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_ALLOCATOR_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_ALLOCATOR_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+template <typename T> class JvmtiAllocator;
+
+template <>
+class JvmtiAllocator<void> {
+ public:
+  typedef void value_type;
+  typedef void* pointer;
+  typedef const void* const_pointer;
+
+  template <typename U>
+  struct rebind {
+    typedef JvmtiAllocator<U> other;
+  };
+
+  explicit JvmtiAllocator(jvmtiEnv* env) : env_(env) {}
+
+  template <typename U>
+  JvmtiAllocator(const JvmtiAllocator<U>& other)  // NOLINT, implicit
+      : env_(other.env_) {}
+
+  JvmtiAllocator(const JvmtiAllocator& other) = default;
+  JvmtiAllocator& operator=(const JvmtiAllocator& other) = default;
+  ~JvmtiAllocator() = default;
+
+ private:
+  jvmtiEnv* env_;
+
+  template <typename U>
+  friend class JvmtiAllocator;
+
+  template <typename U>
+  friend bool operator==(const JvmtiAllocator<U>& lhs, const JvmtiAllocator<U>& rhs);
+};
+
+template <typename T>
+class JvmtiAllocator {
+ public:
+  typedef T value_type;
+  typedef T* pointer;
+  typedef T& reference;
+  typedef const T* const_pointer;
+  typedef const T& const_reference;
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+
+  template <typename U>
+  struct rebind {
+    typedef JvmtiAllocator<U> other;
+  };
+
+  explicit JvmtiAllocator(jvmtiEnv* env) : env_(env) {}
+
+  template <typename U>
+  JvmtiAllocator(const JvmtiAllocator<U>& other)  // NOLINT, implicit
+      : env_(other.env_) {}
+
+  JvmtiAllocator(const JvmtiAllocator& other) = default;
+  JvmtiAllocator& operator=(const JvmtiAllocator& other) = default;
+  ~JvmtiAllocator() = default;
+
+  size_type max_size() const {
+    return static_cast<size_type>(-1) / sizeof(T);
+  }
+
+  pointer address(reference x) const { return &x; }
+  const_pointer address(const_reference x) const { return &x; }
+
+  pointer allocate(size_type n, JvmtiAllocator<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
+    DCHECK_LE(n, max_size());
+    if (env_ == nullptr) {
+      T* result = reinterpret_cast<T*>(malloc(n * sizeof(T)));
+      CHECK(result != nullptr || n == 0u);  // Abort if malloc() fails.
+      return result;
+    } else {
+      unsigned char* result;
+      jvmtiError alloc_error = env_->Allocate(n * sizeof(T), &result);
+      CHECK(alloc_error == JVMTI_ERROR_NONE);
+      return reinterpret_cast<T*>(result);
+    }
+  }
+  void deallocate(pointer p, size_type n ATTRIBUTE_UNUSED) {
+    if (env_ == nullptr) {
+      free(p);
+    } else {
+      jvmtiError dealloc_error = env_->Deallocate(reinterpret_cast<unsigned char*>(p));
+      CHECK(dealloc_error == JVMTI_ERROR_NONE);
+    }
+  }
+
+  void construct(pointer p, const_reference val) {
+    new (static_cast<void*>(p)) value_type(val);
+  }
+  template <class U, class... Args>
+  void construct(U* p, Args&&... args) {
+    ::new (static_cast<void*>(p)) U(std::forward<Args>(args)...);
+  }
+  void destroy(pointer p) {
+    p->~value_type();
+  }
+
+  inline bool operator==(JvmtiAllocator const& other) {
+    return env_ == other.env_;
+  }
+  inline bool operator!=(JvmtiAllocator const& other) {
+    return !operator==(other);
+  }
+
+ private:
+  jvmtiEnv* env_;
+
+  template <typename U>
+  friend class JvmtiAllocator;
+
+  template <typename U>
+  friend bool operator==(const JvmtiAllocator<U>& lhs, const JvmtiAllocator<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const JvmtiAllocator<T>& lhs, const JvmtiAllocator<T>& rhs) {
+  return lhs.env_ == rhs.env_;
+}
+
+template <typename T>
+inline bool operator!=(const JvmtiAllocator<T>& lhs, const JvmtiAllocator<T>& rhs) {
+  return !(lhs == rhs);
+}
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_RUNTIME_OPENJDKJVMTI_JVMTI_ALLOCATOR_H_
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index f16b023..b359f36 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -39,6 +39,7 @@
 #include "gc/allocation_listener.h"
 #include "instrumentation.h"
 #include "jni_env_ext-inl.h"
+#include "jvmti_allocator.h"
 #include "mirror/class.h"
 #include "mirror/object.h"
 #include "runtime.h"
@@ -211,4 +212,142 @@
   // TODO: consider rehash here.
 }
 
+template <typename T, class Allocator = std::allocator<T>>
+struct ReleasableContainer {
+  using allocator_type = Allocator;
+
+  explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
+      : allocator(alloc),
+        data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
+        size(0),
+        capacity(reserve) {
+  }
+
+  ~ReleasableContainer() {
+    if (data != nullptr) {
+      allocator.deallocate(data, capacity);
+      capacity = 0;
+      size = 0;
+    }
+  }
+
+  T* Release() {
+    T* tmp = data;
+
+    data = nullptr;
+    size = 0;
+    capacity = 0;
+
+    return tmp;
+  }
+
+  void Resize(size_t new_capacity) {
+    CHECK_GT(new_capacity, capacity);
+
+    T* tmp = allocator.allocate(new_capacity);
+    DCHECK(tmp != nullptr);
+    if (data != nullptr) {
+      memcpy(tmp, data, sizeof(T) * size);
+    }
+    T* old = data;
+    data = tmp;
+    allocator.deallocate(old, capacity);
+    capacity = new_capacity;
+  }
+
+  void Pushback(const T& elem) {
+    if (size == capacity) {
+      size_t new_capacity = 2 * capacity + 1;
+      Resize(new_capacity);
+    }
+    data[size++] = elem;
+  }
+
+  Allocator allocator;
+  T* data;
+  size_t size;
+  size_t capacity;
+};
+
+jvmtiError ObjectTagTable::GetTaggedObjects(jvmtiEnv* jvmti_env,
+                                            jint tag_count,
+                                            const jlong* tags,
+                                            jint* count_ptr,
+                                            jobject** object_result_ptr,
+                                            jlong** tag_result_ptr) {
+  if (tag_count < 0) {
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+  if (tag_count > 0) {
+    for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+      if (tags[i] == 0) {
+        return ERR(ILLEGAL_ARGUMENT);
+      }
+    }
+  }
+  if (tags == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+  if (count_ptr == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  art::Thread* self = art::Thread::Current();
+  art::MutexLock mu(self, allow_disallow_lock_);
+  Wait(self);
+
+  art::JNIEnvExt* jni_env = self->GetJniEnv();
+
+  constexpr size_t kDefaultSize = 10;
+  size_t initial_object_size;
+  size_t initial_tag_size;
+  if (tag_count == 0) {
+    initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+    initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+  } else {
+    initial_object_size = initial_tag_size = kDefaultSize;
+  }
+  JvmtiAllocator<void> allocator(jvmti_env);
+  ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator, initial_object_size);
+  ReleasableContainer<jlong, JvmtiAllocator<jlong>> selected_tags(allocator, initial_tag_size);
+
+  size_t count = 0;
+  for (auto& pair : tagged_objects_) {
+    bool select;
+    if (tag_count > 0) {
+      select = false;
+      for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+        if (tags[i] == pair.second) {
+          select = true;
+          break;
+        }
+      }
+    } else {
+      select = true;
+    }
+
+    if (select) {
+      art::mirror::Object* obj = pair.first.Read<art::kWithReadBarrier>();
+      if (obj != nullptr) {
+        count++;
+        if (object_result_ptr != nullptr) {
+          selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
+        }
+        if (tag_result_ptr != nullptr) {
+          selected_tags.Pushback(pair.second);
+        }
+      }
+    }
+  }
+
+  if (object_result_ptr != nullptr) {
+    *object_result_ptr = selected_objects.Release();
+  }
+  if (tag_result_ptr != nullptr) {
+    *tag_result_ptr = selected_tags.Release();
+  }
+  *count_ptr = static_cast<jint>(count);
+  return ERR(NONE);
+}
+
 }  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 579dc22..071d139 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -23,6 +23,7 @@
 #include "gc/system_weak.h"
 #include "gc_root-inl.h"
 #include "globals.h"
+#include "jvmti.h"
 #include "mirror/object.h"
 #include "thread-inl.h"
 
@@ -64,6 +65,15 @@
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_);
 
+  jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
+                              jint tag_count,
+                              const jlong* tags,
+                              jint* count_ptr,
+                              jobject** object_result_ptr,
+                              jlong** tag_result_ptr)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+
  private:
   bool SetLocked(art::Thread* self, art::mirror::Object* obj, jlong tag)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 67e5a81..be95600 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -54,7 +54,7 @@
         // Slow-path.
         ref = reinterpret_cast<MirrorType*>(Mark(ref));
         // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
-        // updates before us, but it's ok.
+        // updates before us, but it's OK.
         if (kAlwaysUpdateField && ref != old_ref) {
           obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
               offset, old_ref, ref);
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index 837ee2d..52be2df 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -23,6 +23,7 @@
 #include "common_runtime_test.h"
 #include "dex_file.h"
 #include "scoped_thread_state_change-inl.h"
+#include "utils.h"
 #include "verifier_log_mode.h"
 
 namespace art {
@@ -40,7 +41,14 @@
     std::string error_msg;
     MethodVerifier::FailureKind failure = MethodVerifier::VerifyClass(
         self, klass, nullptr, true, HardFailLogMode::kLogWarning, &error_msg);
-    ASSERT_TRUE(failure == MethodVerifier::kNoFailure) << error_msg;
+
+    if (StartsWith(descriptor, "Ljava/lang/invoke")) {
+      ASSERT_TRUE(failure == MethodVerifier::kSoftFailure ||
+                  failure == MethodVerifier::kNoFailure) << error_msg;
+
+    } else {
+      ASSERT_TRUE(failure == MethodVerifier::kNoFailure) << error_msg;
+    }
   }
 
   void VerifyDexFile(const DexFile& dex)
diff --git a/test/903-hello-tagging/expected.txt b/test/903-hello-tagging/expected.txt
index e69de29..872b79b 100644
--- a/test/903-hello-tagging/expected.txt
+++ b/test/903-hello-tagging/expected.txt
@@ -0,0 +1,10 @@
+18
+<nothing>
+18
+[<1;1>, <11;1>, <2;2>, <12;2>, <3;3>, <13;3>, <4;4>, <14;4>, <5;5>, <15;5>, <6;6>, <16;6>, <7;7>, <17;7>, <8;8>, <18;8>, <9;9>, <19;9>]
+4
+[<2;2>, <12;2>, <5;5>, <15;5>]
+18
+[<null;1>, <null;1>, <null;2>, <null;2>, <null;3>, <null;3>, <null;4>, <null;4>, <null;5>, <null;5>, <null;6>, <null;6>, <null;7>, <null;7>, <null;8>, <null;8>, <null;9>, <null;9>]
+18
+[<1;0>, <2;0>, <3;0>, <4;0>, <5;0>, <6;0>, <7;0>, <8;0>, <9;0>, <11;0>, <12;0>, <13;0>, <14;0>, <15;0>, <16;0>, <17;0>, <18;0>, <19;0>]
diff --git a/test/903-hello-tagging/src/Main.java b/test/903-hello-tagging/src/Main.java
index 2856a39..a8aedb4 100644
--- a/test/903-hello-tagging/src/Main.java
+++ b/test/903-hello-tagging/src/Main.java
@@ -15,11 +15,14 @@
  */
 
 import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Arrays;
 
 public class Main {
   public static void main(String[] args) {
     System.loadLibrary(args[1]);
     doTest();
+    testGetTaggedObjects();
   }
 
   public static void doTest() {
@@ -68,6 +71,100 @@
     }
   }
 
+  private static void testGetTaggedObjects() {
+    // Use an array list to ensure that the objects stay live for a bit. Also gives us a source
+    // to compare to. We use index % 10 as the tag.
+    ArrayList<Object> l = new ArrayList<>();
+
+    for (int i = 0; i < 20; i++) {
+      Integer o = new Integer(i);
+      l.add(o);
+      if (i % 10 != 0) {
+        setTag(o, i % 10);
+      }
+    }
+
+    testGetTaggedObjectsRun(l, null, false, false);
+    testGetTaggedObjectsRun(l, null, true, true);
+    testGetTaggedObjectsRun(l, new long[] { 2, 5 }, true, true);
+    testGetTaggedObjectsRun(l, null, false, true);
+    testGetTaggedObjectsRun(l, null, true, false);
+  }
+
+  private static void testGetTaggedObjectsRun(ArrayList<Object> l, long[] searchTags,
+      boolean returnObjects, boolean returnTags) {
+    Object[] result = getTaggedObjects(searchTags, returnObjects, returnTags);
+
+    Object[] objects = (Object[])result[0];
+    long[] tags = (long[])result[1];
+    int count = (int)result[2];
+
+    System.out.println(count);
+    printArraysSorted(objects, tags);
+  }
+
+  private static void printArraysSorted(Object[] objects, long[] tags) {
+    if (objects == null && tags == null) {
+      System.out.println("<nothing>");
+      return;
+    }
+
+    int l1 = objects == null ? 0 : objects.length;
+    int l2 = tags == null ? 0 : tags.length;
+    int l = Math.max(l1, l2);
+    Pair[] tmp = new Pair[l];
+    for (int i = 0; i < l; i++) {
+      tmp[i] = new Pair(objects == null ? null : objects[i], tags == null ? 0 : tags[i]);
+    }
+
+    Arrays.sort(tmp);
+
+    System.out.println(Arrays.toString(tmp));
+  }
+
+  private static class Pair implements Comparable<Pair> {
+    Object obj;
+    long tag;
+    public Pair(Object o, long t) {
+      obj = o;
+      tag = t;
+    }
+
+    public int compareTo(Pair p) {
+      if (tag != p.tag) {
+        return Long.compare(tag, p.tag);
+      }
+
+      if ((obj instanceof Comparable) && (p.obj instanceof Comparable)) {
+        // It's not really correct, but w/e, best effort.
+        int result = ((Comparable<Object>)obj).compareTo(p.obj);
+        if (result != 0) {
+          return result;
+        }
+      }
+
+      if (obj != null && p.obj != null) {
+        return obj.hashCode() - p.obj.hashCode();
+      }
+
+      if (obj != null) {
+        return 1;
+      }
+
+      if (p.obj != null) {
+        return -1;
+      }
+
+      return hashCode() - p.hashCode();
+    }
+
+    public String toString() {
+      return "<" + obj + ";" + tag + ">";
+    }
+  }
+
   private static native void setTag(Object o, long tag);
   private static native long getTag(Object o);
+  private static native Object[] getTaggedObjects(long[] searchTags, boolean returnObjects,
+      boolean returnTags);
 }
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index 7d692fb..bed4e5d 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -21,9 +21,12 @@
 #include <stdio.h>
 #include <vector>
 
+#include "jni.h"
+#include "ScopedLocalRef.h"
+#include "ScopedPrimitiveArray.h"
+
 #include "art_method-inl.h"
 #include "base/logging.h"
-#include "jni.h"
 #include "openjdkjvmti/jvmti.h"
 #include "ti-agent/common_load.h"
 #include "utils.h"
@@ -56,6 +59,84 @@
   return tag;
 }
 
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTaggedObjects(JNIEnv* env,
+                                                                     jclass,
+                                                                     jlongArray searchTags,
+                                                                     jboolean returnObjects,
+                                                                     jboolean returnTags) {
+  ScopedLongArrayRO scoped_array(env);
+  if (searchTags != nullptr) {
+    scoped_array.reset(searchTags);
+  }
+  const jlong* tag_ptr = scoped_array.get();
+  if (tag_ptr == nullptr) {
+    // Can never pass null.
+    tag_ptr = reinterpret_cast<const jlong*>(1);
+  }
+
+  jint result_count;
+  jobject* result_object_array;
+  jobject** result_object_array_ptr = returnObjects == JNI_TRUE ? &result_object_array : nullptr;
+  jlong* result_tag_array;
+  jlong** result_tag_array_ptr = returnTags == JNI_TRUE ? &result_tag_array : nullptr;
+
+  jvmtiError ret = jvmti_env->GetObjectsWithTags(scoped_array.size(),
+                                                 tag_ptr,
+                                                 &result_count,
+                                                 result_object_array_ptr,
+                                                 result_tag_array_ptr);
+  if (ret != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(ret, &err);
+    printf("Failure running GetLoadedClasses: %s\n", err);
+    return nullptr;
+  }
+
+  CHECK_GE(result_count, 0);
+
+  ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/Object"));
+  if (obj_class.get() == nullptr) {
+    return nullptr;
+  }
+
+  jobjectArray resultObjectArray = nullptr;
+  if (returnObjects == JNI_TRUE) {
+    resultObjectArray = env->NewObjectArray(result_count, obj_class.get(), nullptr);
+    if (resultObjectArray == nullptr) {
+      return nullptr;
+    }
+    for (jint i = 0; i < result_count; ++i) {
+      env->SetObjectArrayElement(resultObjectArray, i, result_object_array[i]);
+    }
+  }
+
+  jlongArray resultTagArray = nullptr;
+  if (returnTags == JNI_TRUE) {
+    resultTagArray = env->NewLongArray(result_count);
+    env->SetLongArrayRegion(resultTagArray, 0, result_count, result_tag_array);
+  }
+
+  jobject count_integer;
+  {
+    ScopedLocalRef<jclass> integer_class(env, env->FindClass("java/lang/Integer"));
+    jmethodID methodID = env->GetMethodID(integer_class.get(), "<init>", "(I)V");
+    count_integer = env->NewObject(integer_class.get(), methodID, result_count);
+    if (count_integer == nullptr) {
+      return nullptr;
+    }
+  }
+
+  jobjectArray resultArray = env->NewObjectArray(3, obj_class.get(), nullptr);
+  if (resultArray == nullptr) {
+    return nullptr;
+  }
+  env->SetObjectArrayElement(resultArray, 0, resultObjectArray);
+  env->SetObjectArrayElement(resultArray, 1, resultTagArray);
+  env->SetObjectArrayElement(resultArray, 2, count_integer);
+
+  return resultArray;
+}
+
 // Don't do anything
 jint OnLoad(JavaVM* vm,
             char* options ATTRIBUTE_UNUSED,
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index a3c16e6..b140645 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -370,13 +370,16 @@
 # Tests that are broken with GC stress.
 # * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
 #   hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 908-gc-start-finish expects GCs only to be run at clear points. The reduced heap size makes
+#   this non-deterministic.
 # * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often
 #   will take more than the timeout to run when gcstress is enabled. This is because gcstress
 #   slows down allocations significantly which these tests do a lot.
 TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
   137-cfi \
+  908-gc-start-finish \
   961-default-iface-resolution-gen \
-  964-default-iface-init-gen
+  964-default-iface-init-gen \  
 
 ifneq (,$(filter gcstress,$(GC_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \