Merge "Fix the documentation of all-run-test-names."
diff --git a/runtime/base/memory_tool.h b/runtime/base/memory_tool.h
index 31162a3..e0bdcfe 100644
--- a/runtime/base/memory_tool.h
+++ b/runtime/base/memory_tool.h
@@ -27,9 +27,17 @@
 
 #include <sanitizer/asan_interface.h>
 #define ADDRESS_SANITIZER
+
+#ifdef ART_ENABLE_ADDRESS_SANITIZER
 #define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
 #define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
 #define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
+#else
+#define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
+#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+#define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+#endif
+
 #define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
 #define RUNNING_ON_MEMORY_TOOL 1U
 constexpr bool kMemoryToolIsValgrind = false;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b5d5c34..eed5cf2 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -73,10 +73,12 @@
   }
 }
 
-void ConcurrentCopying::MarkHeapReference(
-    mirror::HeapReference<mirror::Object>* from_ref ATTRIBUTE_UNUSED) {
-  // Unused, usually called from mod union tables.
-  UNIMPLEMENTED(FATAL);
+void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
+  // Used for preserving soft references, should be OK to not have a CAS here since there should be
+  // no other threads which can trigger read barriers on the same referent during reference
+  // processing.
+  from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
+  DCHECK(!from_ref->IsNull());
 }
 
 ConcurrentCopying::~ConcurrentCopying() {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 4f92ea0..f1317b8 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -121,7 +121,7 @@
   void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
   accounting::ObjectStack* GetAllocationStack();
   accounting::ObjectStack* GetLiveStack();
-  void ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ProcessMarkStack() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool ProcessMarkStackOnce() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index e10bef4..cfc4f96 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -181,8 +181,8 @@
   void RecordFreeLOS(const ObjectBytePair& freed);
   void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
 
-  // Helper functions for querying if objects are marked at compile time. These are used for
-  // reading system weaks, processing references.
+  // Helper functions for querying if objects are marked. These are used for processing references,
+  // and will be used for reading system weaks while the GC is running.
   virtual mirror::Object* IsMarked(mirror::Object* obj)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 65e6b40..0623fd4 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -142,7 +142,7 @@
 
 inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
   if (obj == nullptr) {
-    return obj;
+    return nullptr;
   }
   if (kUseBakerOrBrooksReadBarrier) {
     // Verify all the objects have the correct forward pointer installed.
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index e0d6d6b..d59a21d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -337,6 +337,11 @@
   }
 }
 
+mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
+  MarkObject(obj, nullptr, MemberOffset(0));
+  return obj;
+}
+
 inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
   DCHECK(obj != nullptr);
   if (MarkObjectParallel(obj)) {
@@ -498,11 +503,6 @@
   return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
 }
 
-mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
-  MarkObject(obj, nullptr, MemberOffset(0));
-  return obj;
-}
-
 void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
   MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
 }
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index c13755c..7692b06 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -205,7 +205,7 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
  protected:
-  // Returns true if the object has its bit set in the mark bitmap.
+  // Returns object if the object is marked in the heap bitmap, otherwise null.
   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 256cdd2..39ba743 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -138,7 +138,8 @@
     if (concurrent) {
       StartPreservingReferences(self);
     }
-    // TODO: Add smarter logic for preserving soft references.
+    // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
+    // mark if the SoftReference is supposed to be preserved.
     soft_reference_queue_.ForwardSoftReferences(collector);
     collector->ProcessMarkStack();
     if (concurrent) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index e8769f9..1acc442 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -33,6 +33,11 @@
 // Size of Dex virtual registers.
 static constexpr size_t kVRegSize = 4;
 
+// We encode the number of bytes needed for writing a value on 3 bits
+// (i.e. up to 8 values), for values that we know are maximum 32-bit
+// long.
+static constexpr size_t kNumberOfBitForNumberOfBytesForEncoding = 3;
+
 class CodeInfo;
 class StackMapEncoding;
 
@@ -991,17 +996,11 @@
   }
 
   void SetEncodingAt(size_t bit_offset, size_t number_of_bytes) {
-    // We encode the number of bytes needed for writing a value on 3 bits,
-    // for values that we know are maximum 32bits.
-    region_.StoreBit(bit_offset, (number_of_bytes & 1));
-    region_.StoreBit(bit_offset + 1, (number_of_bytes & 2));
-    region_.StoreBit(bit_offset + 2, (number_of_bytes & 4));
+    region_.StoreBits(bit_offset, number_of_bytes, kNumberOfBitForNumberOfBytesForEncoding);
   }
 
   size_t GetNumberOfBytesForEncoding(size_t bit_offset) const {
-    return region_.LoadBit(bit_offset)
-        + (region_.LoadBit(bit_offset + 1) << 1)
-        + (region_.LoadBit(bit_offset + 2) << 2);
+    return region_.LoadBits(bit_offset, kNumberOfBitForNumberOfBytesForEncoding);
   }
 
   bool HasInlineInfo() const {
@@ -1143,10 +1142,14 @@
 
   static constexpr int kHasInlineInfoBitOffset = (kEncodingInfoOffset * kBitsPerByte);
   static constexpr int kInlineInfoBitOffset = kHasInlineInfoBitOffset + 1;
-  static constexpr int kDexRegisterMapBitOffset = kInlineInfoBitOffset + 3;
-  static constexpr int kDexPcBitOffset = kDexRegisterMapBitOffset + 3;
-  static constexpr int kNativePcBitOffset = kDexPcBitOffset + 3;
-  static constexpr int kRegisterMaskBitOffset = kNativePcBitOffset + 3;
+  static constexpr int kDexRegisterMapBitOffset =
+      kInlineInfoBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
+  static constexpr int kDexPcBitOffset =
+      kDexRegisterMapBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
+  static constexpr int kNativePcBitOffset =
+      kDexPcBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
+  static constexpr int kRegisterMaskBitOffset =
+      kNativePcBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
 
   MemoryRegion GetStackMaps(const StackMapEncoding& encoding) const {
     return region_.size() == 0
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 7c40eb7..60c9b5e 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -381,23 +381,7 @@
   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
   CHECK_NE(self->GetState(), kRunnable);
 
-  std::vector<Thread*> runnable_threads;
-  std::vector<Thread*> other_threads;
-
-  // Suspend all threads once.
-  {
-    MutexLock mu(self, *Locks::thread_list_lock_);
-    MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
-    // Update global suspend all state for attaching threads.
-    ++suspend_all_count_;
-    // Increment everybody's suspend count (except our own).
-    for (const auto& thread : list_) {
-      if (thread == self) {
-        continue;
-      }
-      thread->ModifySuspendCount(self, +1, nullptr, false);
-    }
-  }
+  SuspendAllInternal(self, self, nullptr);
 
   // Run the flip callback for the collector.
   Locks::mutator_lock_->ExclusiveLock(self);
@@ -406,6 +390,8 @@
   collector->RegisterPause(NanoTime() - start_time);
 
   // Resume runnable threads.
+  std::vector<Thread*> runnable_threads;
+  std::vector<Thread*> other_threads;
   {
     MutexLock mu(self, *Locks::thread_list_lock_);
     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 1435607..9c52819 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -583,24 +583,20 @@
 
 const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
   DCHECK(!Equals(incoming_type));  // Trivial equality handled by caller
-  // Perform pointer equality tests for conflict to avoid virtual method dispatch.
+  // Perform pointer equality tests for undefined and conflict to avoid virtual method dispatch.
+  const UndefinedType& undefined = reg_types->Undefined();
   const ConflictType& conflict = reg_types->Conflict();
-  if (IsUndefined() || incoming_type.IsUndefined()) {
+  DCHECK_EQ(this == &undefined, IsUndefined());
+  DCHECK_EQ(&incoming_type == &undefined, incoming_type.IsUndefined());
+  DCHECK_EQ(this == &conflict, IsConflict());
+  DCHECK_EQ(&incoming_type == &conflict, incoming_type.IsConflict());
+  if (this == &undefined || &incoming_type == &undefined) {
     // There is a difference between undefined and conflict. Conflicts may be copied around, but
     // not used. Undefined registers must not be copied. So any merge with undefined should return
     // undefined.
-    if (IsUndefined()) {
-      return *this;
-    }
-    return incoming_type;
-  } else if (this == &conflict) {
-    DCHECK(IsConflict());
-    return *this;  // Conflict MERGE * => Conflict
-  } else if (&incoming_type == &conflict) {
-    DCHECK(incoming_type.IsConflict());
-    return incoming_type;  // * MERGE Conflict => Conflict
-  } else if (IsUndefined() || incoming_type.IsUndefined()) {
-    return conflict;  // Unknown MERGE * => Conflict
+    return undefined;
+  } else if (this == &conflict || &incoming_type == &conflict) {
+    return conflict;  // (Conflict MERGE *) or (* MERGE Conflict) => Conflict
   } else if (IsConstant() && incoming_type.IsConstant()) {
     const ConstantType& type1 = *down_cast<const ConstantType*>(this);
     const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type);
diff --git a/test/800-smali/smali/b_22331663.smali b/test/800-smali/smali/b_22331663.smali
index 057fc7f..bae75c2 100644
--- a/test/800-smali/smali/b_22331663.smali
+++ b/test/800-smali/smali/b_22331663.smali
@@ -4,29 +4,29 @@
 
 .method public static run(Z)V
 .registers 6
-       # Make v4 defined, just use null.
-       const v4, 0
+       if-eqz v5, :if_eqz_target
 
-       if-eqz v5, :Label2
-
-:Label1
        # Construct a java.lang.Object completely, and throw a new exception.
        new-instance v4, Ljava/lang/Object;
        invoke-direct {v4}, Ljava/lang/Object;-><init>()V
 
        new-instance v3, Ljava/lang/RuntimeException;
        invoke-direct {v3}, Ljava/lang/RuntimeException;-><init>()V
+:throw1_begin
        throw v3
+:throw1_end
 
-:Label2
+:if_eqz_target
        # Allocate a java.lang.Object (do not initialize), and throw a new exception.
        new-instance v4, Ljava/lang/Object;
 
        new-instance v3, Ljava/lang/RuntimeException;
        invoke-direct {v3}, Ljava/lang/RuntimeException;-><init>()V
+:throw2_begin
        throw v3
+:throw2_end
 
-:Label3
+:catch_entry
        # Catch handler. Here we had to merge the uninitialized with the initialized reference,
        # which creates a conflict. Copy the conflict, and then return. This should not make the
        # verifier fail the method.
@@ -34,5 +34,6 @@
 
        return-void
 
-.catchall {:Label1 .. :Label3} :Label3
+.catchall {:throw1_begin .. :throw1_end} :catch_entry
+.catchall {:throw2_begin .. :throw2_end} :catch_entry
 .end method