Merge "Use test's `-d' option when checking for a directory."
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index f48f4c3..e5e9964 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -550,7 +550,7 @@
 }
 
 void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
-                                           const uint64_t* data,
+                                           const int64_t* data,
                                            uint32_t element_count,
                                            uint32_t dex_offset) {
   for (uint32_t i = 0; i < element_count; ++i) {
@@ -783,6 +783,16 @@
       break;
     }
 
+    case Instruction::SUB_FLOAT: {
+      Binop_23x<HSub>(instruction, Primitive::kPrimFloat);
+      break;
+    }
+
+    case Instruction::SUB_DOUBLE: {
+      Binop_23x<HSub>(instruction, Primitive::kPrimDouble);
+      break;
+    }
+
     case Instruction::ADD_INT_2ADDR: {
       Binop_12x<HAdd>(instruction, Primitive::kPrimInt);
       break;
@@ -833,6 +843,16 @@
       break;
     }
 
+    case Instruction::SUB_FLOAT_2ADDR: {
+      Binop_12x<HSub>(instruction, Primitive::kPrimFloat);
+      break;
+    }
+
+    case Instruction::SUB_DOUBLE_2ADDR: {
+      Binop_12x<HSub>(instruction, Primitive::kPrimDouble);
+      break;
+    }
+
     case Instruction::MUL_INT_2ADDR: {
       Binop_12x<HMul>(instruction, Primitive::kPrimInt);
       break;
@@ -939,25 +959,29 @@
 
       switch (payload->element_width) {
         case 1:
-          BuildFillArrayData(null_check, data, element_count, Primitive::kPrimByte, dex_offset);
+          BuildFillArrayData(null_check,
+                             reinterpret_cast<const int8_t*>(data),
+                             element_count,
+                             Primitive::kPrimByte,
+                             dex_offset);
           break;
         case 2:
           BuildFillArrayData(null_check,
-                             reinterpret_cast<const uint16_t*>(data),
+                             reinterpret_cast<const int16_t*>(data),
                              element_count,
                              Primitive::kPrimShort,
                              dex_offset);
           break;
         case 4:
           BuildFillArrayData(null_check,
-                             reinterpret_cast<const uint32_t*>(data),
+                             reinterpret_cast<const int32_t*>(data),
                              element_count,
                              Primitive::kPrimInt,
                              dex_offset);
           break;
         case 8:
           BuildFillWideArrayData(null_check,
-                                 reinterpret_cast<const uint64_t*>(data),
+                                 reinterpret_cast<const int64_t*>(data),
                                  element_count,
                                  dex_offset);
           break;
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index c5e02db..b55ef07 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -154,7 +154,7 @@
   // Fills the given object with data as specified in the fill-array-data
   // instruction. The data must be for long and double arrays.
   void BuildFillWideArrayData(HInstruction* object,
-                              const uint64_t* data,
+                              const int64_t* data,
                               uint32_t element_count,
                               uint32_t dex_offset);
 
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 34d8bfe..03951e2 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -16,6 +16,7 @@
 
 #include <functional>
 
+#include "base/macros.h"
 #include "builder.h"
 #include "code_generator_arm.h"
 #include "code_generator_arm64.h"
@@ -433,5 +434,121 @@
   TestCode(data, true, 12);
 }
 
+TEST(CodegenTest, MaterializedCondition1) {
+  // Check that condition are materialized correctly. A materialized condition
+  // should yield `1` if it evaluated to true, and `0` otherwise.
+  // We force the materialization of comparisons for different combinations of
+  // inputs and check the results.
+
+  int lhs[] = {1, 2, -1, 2, 0xabc};
+  int rhs[] = {2, 1, 2, -1, 0xabc};
+
+  for (size_t i = 0; i < arraysize(lhs); i++) {
+    ArenaPool pool;
+    ArenaAllocator allocator(&pool);
+    HGraph* graph = new (&allocator) HGraph(&allocator);
+
+    HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(entry_block);
+    graph->SetEntryBlock(entry_block);
+    entry_block->AddInstruction(new (&allocator) HGoto());
+    HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(code_block);
+    HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(exit_block);
+    exit_block->AddInstruction(new (&allocator) HExit());
+
+    entry_block->AddSuccessor(code_block);
+    code_block->AddSuccessor(exit_block);
+    graph->SetExitBlock(exit_block);
+
+    HIntConstant cst_lhs(lhs[i]);
+    code_block->AddInstruction(&cst_lhs);
+    HIntConstant cst_rhs(rhs[i]);
+    code_block->AddInstruction(&cst_rhs);
+    HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+    code_block->AddInstruction(&cmp_lt);
+    HReturn ret(&cmp_lt);
+    code_block->AddInstruction(&ret);
+
+    auto hook_before_codegen = [](HGraph* graph) {
+      HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+      HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+      block->InsertInstructionBefore(move, block->GetLastInstruction());
+    };
+
+    RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+  }
+}
+
+TEST(CodegenTest, MaterializedCondition2) {
+  // Check that HIf correctly interprets a materialized condition.
+  // We force the materialization of comparisons for different combinations of
+  // inputs. An HIf takes the materialized combination as input and returns a
+  // value that we verify.
+
+  int lhs[] = {1, 2, -1, 2, 0xabc};
+  int rhs[] = {2, 1, 2, -1, 0xabc};
+
+
+  for (size_t i = 0; i < arraysize(lhs); i++) {
+    ArenaPool pool;
+    ArenaAllocator allocator(&pool);
+    HGraph* graph = new (&allocator) HGraph(&allocator);
+
+    HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(entry_block);
+    graph->SetEntryBlock(entry_block);
+    entry_block->AddInstruction(new (&allocator) HGoto());
+
+    HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(if_block);
+    HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(if_true_block);
+    HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(if_false_block);
+    HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+    graph->AddBlock(exit_block);
+    exit_block->AddInstruction(new (&allocator) HExit());
+
+    graph->SetEntryBlock(entry_block);
+    entry_block->AddSuccessor(if_block);
+    if_block->AddSuccessor(if_true_block);
+    if_block->AddSuccessor(if_false_block);
+    if_true_block->AddSuccessor(exit_block);
+    if_false_block->AddSuccessor(exit_block);
+    graph->SetExitBlock(exit_block);
+
+    HIntConstant cst_lhs(lhs[i]);
+    if_block->AddInstruction(&cst_lhs);
+    HIntConstant cst_rhs(rhs[i]);
+    if_block->AddInstruction(&cst_rhs);
+    HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+    if_block->AddInstruction(&cmp_lt);
+    // We insert a temporary to separate the HIf from the HLessThan and force
+    // the materialization of the condition.
+    HTemporary force_materialization(0);
+    if_block->AddInstruction(&force_materialization);
+    HIf if_lt(&cmp_lt);
+    if_block->AddInstruction(&if_lt);
+
+    HIntConstant cst_lt(1);
+    if_true_block->AddInstruction(&cst_lt);
+    HReturn ret_lt(&cst_lt);
+    if_true_block->AddInstruction(&ret_lt);
+    HIntConstant cst_ge(0);
+    if_false_block->AddInstruction(&cst_ge);
+    HReturn ret_ge(&cst_ge);
+    if_false_block->AddInstruction(&ret_ge);
+
+    auto hook_before_codegen = [](HGraph* graph) {
+      HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+      HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+      block->InsertInstructionBefore(move, block->GetLastInstruction());
+    };
+
+    RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+  }
+}
 
 }  // namespace art
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 7b6aac9..d9a433b 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -34,7 +34,7 @@
 
   void Reset() OVERRIDE;
 
-  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE;
+  void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetSP(uintptr_t new_sp) OVERRIDE {
     bool success = SetGPR(SP, new_sp);
diff --git a/runtime/base/casts.h b/runtime/base/casts.h
index be94c2e..138c2fd 100644
--- a/runtime/base/casts.h
+++ b/runtime/base/casts.h
@@ -19,6 +19,8 @@
 
 #include <assert.h>
 #include <string.h>
+#include <type_traits>
+
 #include "base/macros.h"
 
 namespace art {
@@ -65,16 +67,9 @@
 
 template<typename To, typename From>     // use like this: down_cast<T*>(foo);
 inline To down_cast(From* f) {                   // so we only accept pointers
-  // Ensures that To is a sub-type of From *.  This test is here only
-  // for compile-time type checking, and has no overhead in an
-  // optimized build at run-time, as it will be optimized away
-  // completely.
-  if (false) {
-    implicit_cast<From*, To>(0);
-  }
+  static_assert(std::is_base_of<From, typename std::remove_pointer<To>::type>::value,
+                "down_cast unsafe as To is not a subtype of From");
 
-  //
-  // assert(f == NULL || dynamic_cast<To>(f) != NULL);  // RTTI: debug mode only!
   return static_cast<To>(f);
 }
 
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6362a98..423ea77 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -646,6 +646,7 @@
 }
 #endif
 
+#if ART_USE_FUTEXES
 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
   // Owner holds it exclusively, hang up.
   ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
@@ -657,6 +658,7 @@
   }
   --num_pending_readers_;
 }
+#endif
 
 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
   DCHECK(self == NULL || self == Thread::Current());
@@ -726,7 +728,7 @@
   CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
 #if !defined(__APPLE__)
   // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
-  CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
+  CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
 #endif
   CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
 #endif
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 25fdd59..628231a 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -360,10 +360,10 @@
   virtual void Dump(std::ostream& os) const;
 
  private:
+#if ART_USE_FUTEXES
   // Out-of-inline path for handling contention for a SharedLock.
   void HandleSharedLockContention(Thread* self, int32_t cur_state);
 
-#if ART_USE_FUTEXES
   // -1 implies held exclusive, +ve shared held by state_ many owners.
   AtomicInteger state_;
   // Exclusive owner. Modification guarded by this mutex.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 18bbc38..d9061c8 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2017,7 +2017,7 @@
   } else if (error == JDWP::ERR_NONE) {
     mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
     CHECK(c != nullptr);
-    mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
+    mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
     CHECK(f != nullptr);
     mirror::Object* group = f->GetObject(thread_object);
     CHECK(group != nullptr);
@@ -2058,8 +2058,7 @@
     return error;
   }
   ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
-  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
-  mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
+  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
   CHECK(f != nullptr);
   mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
 
@@ -2078,9 +2077,7 @@
   mirror::Object* parent;
   {
     ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
-    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
-    CHECK(c != nullptr);
-    mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
+    mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
     CHECK(f != nullptr);
     parent = f->GetObject(thread_group);
   }
@@ -2095,12 +2092,20 @@
   CHECK(thread_group != nullptr);
 
   // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
-  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
+  mirror::ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
   mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
+  {
+    // The "groups" field is declared as a java.util.List: check it really is
+    // an instance of java.util.ArrayList.
+    CHECK(groups_array_list != nullptr);
+    mirror::Class* java_util_ArrayList_class =
+        soa.Decode<mirror::Class*>(WellKnownClasses::java_util_ArrayList);
+    CHECK(groups_array_list->InstanceOf(java_util_ArrayList_class));
+  }
 
   // Get the array and size out of the ArrayList<ThreadGroup>...
-  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
-  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
+  mirror::ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
+  mirror::ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
   mirror::ObjectArray<mirror::Object>* groups_array =
       array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
   const int32_t size = size_field->GetInt(groups_array_list);
@@ -2386,7 +2391,7 @@
 }
 
 void Dbg::ResumeVM() {
-  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
+  Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
 }
 
 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index cb7adae..48e457f 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -246,7 +246,9 @@
    */
   static int64_t LastDebuggerActivity();
 
-  static void UndoDebuggerSuspensions();
+  static void UndoDebuggerSuspensions()
+    LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                   Locks::thread_suspend_count_lock_);
 
   /*
    * Class, Object, Array
@@ -459,7 +461,9 @@
   static void SuspendVM()
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);
-  static void ResumeVM();
+  static void ResumeVM()
+      LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                     Locks::thread_suspend_count_lock_);
   static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true)
       LOCKS_EXCLUDED(Locks::mutator_lock_,
                      Locks::thread_list_lock_,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index c2baa29..bceac44 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -437,7 +437,7 @@
 MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
                                            int prot_flags, std::string* out_error_str) {
   while (true) {
-    MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
+    MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
                                        PROT_READ | PROT_WRITE, true, out_error_str);
     if (map != nullptr || request_begin == nullptr) {
       return map;
@@ -2138,6 +2138,13 @@
   } else {
     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
   }
+  if (IsGcConcurrent()) {
+    // Disable concurrent GC check so that we don't have spammy JNI requests.
+    // This gets recalculated in GrowForUtilization. It is important that it is disabled /
+    // calculated in the same thread so that there aren't any races that can cause it to become
+    // permanantly disabled. b/17942071
+    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+  }
   CHECK(collector != nullptr)
       << "Could not find garbage collector with collector_type="
       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
@@ -2956,9 +2963,6 @@
       self->IsHandlingStackOverflow()) {
     return;
   }
-  // We already have a request pending, no reason to start more until we update
-  // concurrent_start_bytes_.
-  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   JNIEnv* env = self->GetJniEnv();
   DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
   DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index f981522..fc3da36 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -633,7 +633,6 @@
     SetEntrypointsInstrumented(true);
   }
   ++quick_alloc_entry_points_instrumentation_counter_;
-  LOG(INFO) << "Counter: " << quick_alloc_entry_points_instrumentation_counter_;
 }
 
 void Instrumentation::UninstrumentQuickAllocEntryPointsLocked() {
@@ -643,7 +642,6 @@
   if (quick_alloc_entry_points_instrumentation_counter_ == 0) {
     SetEntrypointsInstrumented(false);
   }
-  LOG(INFO) << "Counter: " << quick_alloc_entry_points_instrumentation_counter_;
 }
 
 void Instrumentation::ResetQuickAllocEntryPoints() {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 6e18904..f8c8fdb 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -666,6 +666,7 @@
     {
       MutexLock mu(self, *Locks::thread_suspend_count_lock_);
       // Update global suspend all state for attaching threads.
+      DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
       ++suspend_all_count_;
       ++debug_suspend_all_count_;
       // Increment everybody's suspend count (except our own).
@@ -757,6 +758,55 @@
   VLOG(threads) << *self << " self-reviving (debugger)";
 }
 
+void ThreadList::ResumeAllForDebugger() {
+  Thread* self = Thread::Current();
+  Thread* debug_thread = Dbg::GetDebugThread();
+  bool needs_resume = false;
+
+  VLOG(threads) << *self << " ResumeAllForDebugger starting...";
+
+  // Threads can't resume if we exclusively hold the mutator lock.
+  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+
+  {
+    MutexLock mu(self, *Locks::thread_list_lock_);
+    {
+      MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+      // Update global suspend all state for attaching threads.
+      DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
+      needs_resume = (debug_suspend_all_count_ > 0);
+      if (needs_resume) {
+        --suspend_all_count_;
+        --debug_suspend_all_count_;
+        // Decrement everybody's suspend count (except our own).
+        for (const auto& thread : list_) {
+          if (thread == self || thread == debug_thread) {
+            continue;
+          }
+          if (thread->GetDebugSuspendCount() == 0) {
+            // This thread may have been individually resumed with ThreadReference.Resume.
+            continue;
+          }
+          VLOG(threads) << "requesting thread resume: " << *thread;
+          thread->ModifySuspendCount(self, -1, true);
+        }
+      } else {
+        // We've been asked to resume all threads without being asked to
+        // suspend them all before. Let's print a warning.
+        LOG(WARNING) << "Debugger attempted to resume all threads without "
+                     << "having suspended them all before.";
+      }
+    }
+  }
+
+  if (needs_resume) {
+    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+    Thread::resume_cond_->Broadcast(self);
+  }
+
+  VLOG(threads) << *self << " ResumeAllForDebugger complete";
+}
+
 void ThreadList::UndoDebuggerSuspensions() {
   Thread* self = Thread::Current();
 
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 9f47f9f..a7f2c53 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -105,6 +105,11 @@
   void SuspendSelfForDebugger()
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
 
+  // Resume all threads
+  void ResumeAllForDebugger()
+      LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                     Locks::thread_suspend_count_lock_);
+
   void UndoDebuggerSuspensions()
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index fc3c879..16338c4 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -54,6 +54,7 @@
 jclass WellKnownClasses::java_lang_ThreadGroup;
 jclass WellKnownClasses::java_lang_Throwable;
 jclass WellKnownClasses::java_nio_DirectByteBuffer;
+jclass WellKnownClasses::java_util_ArrayList;
 jclass WellKnownClasses::java_util_Collections;
 jclass WellKnownClasses::libcore_util_EmptyArray;
 jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk;
@@ -97,8 +98,10 @@
 jfieldID WellKnownClasses::java_lang_Thread_priority;
 jfieldID WellKnownClasses::java_lang_Thread_uncaughtHandler;
 jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
+jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_name;
+jfieldID WellKnownClasses::java_lang_ThreadGroup_parent;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup;
 jfieldID WellKnownClasses::java_lang_Throwable_cause;
 jfieldID WellKnownClasses::java_lang_Throwable_detailMessage;
@@ -110,6 +113,8 @@
 jfieldID WellKnownClasses::java_lang_reflect_Proxy_h;
 jfieldID WellKnownClasses::java_nio_DirectByteBuffer_capacity;
 jfieldID WellKnownClasses::java_nio_DirectByteBuffer_effectiveDirectAddress;
+jfieldID WellKnownClasses::java_util_ArrayList_array;
+jfieldID WellKnownClasses::java_util_ArrayList_size;
 jfieldID WellKnownClasses::java_util_Collections_EMPTY_LIST;
 jfieldID WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT;
 jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data;
@@ -189,6 +194,7 @@
   java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
   java_lang_Throwable = CacheClass(env, "java/lang/Throwable");
   java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer");
+  java_util_ArrayList = CacheClass(env, "java/util/ArrayList");
   java_util_Collections = CacheClass(env, "java/util/Collections");
   libcore_util_EmptyArray = CacheClass(env, "libcore/util/EmptyArray");
   org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
@@ -227,8 +233,10 @@
   java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
   java_lang_Thread_uncaughtHandler = CacheField(env, java_lang_Thread, false, "uncaughtHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
   java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
+  java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "Ljava/util/List;");
   java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
   java_lang_ThreadGroup_name = CacheField(env, java_lang_ThreadGroup, false, "name", "Ljava/lang/String;");
+  java_lang_ThreadGroup_parent = CacheField(env, java_lang_ThreadGroup, false, "parent", "Ljava/lang/ThreadGroup;");
   java_lang_ThreadGroup_systemThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "systemThreadGroup", "Ljava/lang/ThreadGroup;");
   java_lang_Throwable_cause = CacheField(env, java_lang_Throwable, false, "cause", "Ljava/lang/Throwable;");
   java_lang_Throwable_detailMessage = CacheField(env, java_lang_Throwable, false, "detailMessage", "Ljava/lang/String;");
@@ -240,6 +248,8 @@
   java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;");
   java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I");
   java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "effectiveDirectAddress", "J");
+  java_util_ArrayList_array = CacheField(env, java_util_ArrayList, false, "array", "[Ljava/lang/Object;");
+  java_util_ArrayList_size = CacheField(env, java_util_ArrayList, false, "size", "I");
   java_util_Collections_EMPTY_LIST = CacheField(env, java_util_Collections, true, "EMPTY_LIST", "Ljava/util/List;");
   libcore_util_EmptyArray_STACK_TRACE_ELEMENT = CacheField(env, libcore_util_EmptyArray, true, "STACK_TRACE_ELEMENT", "[Ljava/lang/StackTraceElement;");
   org_apache_harmony_dalvik_ddmc_Chunk_data = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "data", "[B");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 790d7f7..d651b90 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -64,6 +64,7 @@
   static jclass java_lang_ThreadGroup;
   static jclass java_lang_Thread__UncaughtExceptionHandler;
   static jclass java_lang_Throwable;
+  static jclass java_util_ArrayList;
   static jclass java_util_Collections;
   static jclass java_nio_DirectByteBuffer;
   static jclass libcore_util_EmptyArray;
@@ -111,8 +112,10 @@
   static jfieldID java_lang_Thread_priority;
   static jfieldID java_lang_Thread_uncaughtHandler;
   static jfieldID java_lang_Thread_nativePeer;
+  static jfieldID java_lang_ThreadGroup_groups;
   static jfieldID java_lang_ThreadGroup_mainThreadGroup;
   static jfieldID java_lang_ThreadGroup_name;
+  static jfieldID java_lang_ThreadGroup_parent;
   static jfieldID java_lang_ThreadGroup_systemThreadGroup;
   static jfieldID java_lang_Throwable_cause;
   static jfieldID java_lang_Throwable_detailMessage;
@@ -121,6 +124,8 @@
   static jfieldID java_lang_Throwable_suppressedExceptions;
   static jfieldID java_nio_DirectByteBuffer_capacity;
   static jfieldID java_nio_DirectByteBuffer_effectiveDirectAddress;
+  static jfieldID java_util_ArrayList_array;
+  static jfieldID java_util_ArrayList_size;
   static jfieldID java_util_Collections_EMPTY_LIST;
   static jfieldID libcore_util_EmptyArray_STACK_TRACE_ELEMENT;
   static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_data;
diff --git a/test/412-new-array/src/Main.java b/test/412-new-array/src/Main.java
index 3c74275..168420c 100644
--- a/test/412-new-array/src/Main.java
+++ b/test/412-new-array/src/Main.java
@@ -24,6 +24,8 @@
   public static void main(String[] args) throws Exception {
     $opt$TestAllocations();
     $opt$TestWithInitializations();
+    $opt$TestNegativeValueNewByteArray();
+    $opt$TestNegativeValueNewCharArray();
     testSmaliFilledNewArray();
     testSmaliFillArrayData();
     testSmaliVerifyError();
@@ -109,6 +111,24 @@
     assertEquals(obj2, i[1]);
   }
 
+  static void $opt$TestNegativeValueNewByteArray() {
+    // Use an array initializer to hint the use of filled-new-array.
+    byte[] a = { (byte)0xa0, (byte)0xa1, (byte)0xa2, (byte)0xa3,
+                 (byte)0xa4, (byte)0xa5, (byte)0xa6, (byte)0xa7 };
+    for (int i = 0; i < a.length; i++) {
+      assertEquals((byte)0xa0 + i, a[i]);
+    }
+  }
+
+  static void $opt$TestNegativeValueNewCharArray() {
+    // Use an array initializer to hint the use of filled-new-array.
+    char[] a = { (char)0xa000, (char)0xa001, (char)0xa002, (char)0xa003,
+                 (char)0xa004, (char)0xa005, (char)0xa006, (char)0xa007 };
+    for (int i = 0; i < a.length; i++) {
+      assertEquals((char)0xa000 + i, a[i]);
+    }
+  }
+
   public static void testSmaliFilledNewArray() throws Exception {
     Class<?> c = Class.forName("FilledNewArray");
 
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 9279ce3..6822825 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -316,6 +316,7 @@
   411-optimizing-arith \
   412-new-array \
   413-regalloc-regression \
+  414-optimizing-arith-sub \
   700-LoadArgRegs \
   800-smali