Change DisableGC to DisableMovingGC.

Also removed the WaitForConcurrentGC in IncrementDisableMovingGC
since we do not currently support any type of concurrent moving
collectors.

This fixes the performance regression introduced by waiting for the
concurrent GC which manifested itself in framework perf benchmarks
as a result of background compaction.

Change-Id: I524f9ab52e1992419626a27649f232ca6967b03d
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 5186399..80ca5fb 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2195,7 +2195,7 @@
     Array* array = soa.Decode<Array*>(java_array);
     gc::Heap* heap = Runtime::Current()->GetHeap();
     if (heap->IsMovableObject(array)) {
-      heap->IncrementDisableGC(soa.Self());
+      heap->IncrementDisableMovingGC(soa.Self());
       // Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
       array = soa.Decode<Array*>(java_array);
     }
@@ -2646,7 +2646,8 @@
       if (is_copy) {
         delete[] reinterpret_cast<uint64_t*>(elements);
       } else if (heap->IsMovableObject(array)) {
-        heap->DecrementDisableGC(soa.Self());
+        // Non copy to a movable object must means that we had disabled the moving GC.
+        heap->DecrementDisableMovingGC(soa.Self());
       }
       UnpinPrimitiveArray(soa, array);
     }