Refactor allocation entrypoints.

Adds support for switching entrypoints during runtime. Enables
addition of new allocators with out requiring significant copy
paste. Slight speedup on ritzperf probably due to more inlining.

TODO: Ensuring that the entire allocation path is inlined so
that the switch statement in the allocation code is optimized
out.

Rosalloc measurements:
4583
4453
4439
4434
4751

After change:
4184
4287
4131
4335
4097

Change-Id: I1352a3cbcdf6dae93921582726324d91312df5c9
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 2aa59d5..16f11c6 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -147,6 +147,7 @@
 	arch/arm/registers_arm.cc \
 	arch/x86/registers_x86.cc \
 	arch/mips/registers_mips.cc \
+	arch/quick_alloc_entrypoints.cc \
 	entrypoints/entrypoint_utils.cc \
 	entrypoints/interpreter/interpreter_entrypoints.cc \
 	entrypoints/jni/jni_entrypoints.cc \
diff --git a/runtime/arch/alloc_entrypoints.S b/runtime/arch/alloc_entrypoints.S
deleted file mode 100644
index 840f3c6..0000000
--- a/runtime/arch/alloc_entrypoints.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Called by managed code to allocate an object */
-TWO_ARG_DOWNCALL art_quick_alloc_object, artAllocObjectFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_alloc_object_instrumented, artAllocObjectFromCodeInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
-/* Called by managed code to allocate an object when the caller doesn't know whether it has access
- * to the created type. */
-TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check_instrumented, artAllocObjectFromCodeWithAccessCheckInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
-/* Called by managed code to allocate an array. */
-THREE_ARG_DOWNCALL art_quick_alloc_array, artAllocArrayFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_instrumented, artAllocArrayFromCodeInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
-/* Called by managed code to allocate an array when the caller doesn't know whether it has access
- * to the created type. */
-THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_RESULT_IS_NON_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check_instrumented, artAllocArrayFromCodeWithAccessCheckInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
-/* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array, artCheckAndAllocArrayFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_instrumented, artCheckAndAllocArrayFromCodeInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
-/* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check_instrumented, artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 3dac636..5166d29 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -34,21 +34,6 @@
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
 
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
-
-extern "C" void* art_quick_alloc_array_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_instrumented(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_with_access_check_instrumented(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
-
 // Cast entrypoints.
 extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
                                             const mirror::Class* ref_class);
@@ -142,29 +127,7 @@
 extern "C" void art_quick_throw_null_pointer_exception();
 extern "C" void art_quick_throw_stack_overflow(void*);
 
-static bool quick_alloc_entry_points_instrumented = false;
-
-void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
-  quick_alloc_entry_points_instrumented = instrumented;
-}
-
-void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
-  if (quick_alloc_entry_points_instrumented) {
-    qpoints->pAllocArray = art_quick_alloc_array_instrumented;
-    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check_instrumented;
-    qpoints->pAllocObject = art_quick_alloc_object_instrumented;
-    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check_instrumented;
-    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array_instrumented;
-    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check_instrumented;
-  } else {
-    qpoints->pAllocArray = art_quick_alloc_array;
-    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
-    qpoints->pAllocObject = art_quick_alloc_object;
-    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
-    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
-    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
-  }
-}
+extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
 
 void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
                      PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index dbfb93a..1976af5 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -864,7 +864,7 @@
 END \name
 .endm
 
-#include "arch/alloc_entrypoints.S"
+#include "arch/quick_alloc_entrypoints.S"
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 331a461..e1b441a 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -33,21 +33,6 @@
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
 
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
-
-extern "C" void* art_quick_alloc_array_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_instrumented(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_with_access_check_instrumented(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
-
 // Cast entrypoints.
 extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
                                             const mirror::Class* ref_class);
@@ -143,29 +128,7 @@
 extern "C" void art_quick_throw_null_pointer_exception();
 extern "C" void art_quick_throw_stack_overflow(void*);
 
-static bool quick_alloc_entry_points_instrumented = false;
-
-void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
-  quick_alloc_entry_points_instrumented = instrumented;
-}
-
-void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
-  if (quick_alloc_entry_points_instrumented) {
-    qpoints->pAllocArray = art_quick_alloc_array_instrumented;
-    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check_instrumented;
-    qpoints->pAllocObject = art_quick_alloc_object_instrumented;
-    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check_instrumented;
-    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array_instrumented;
-    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check_instrumented;
-  } else {
-    qpoints->pAllocArray = art_quick_alloc_array;
-    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
-    qpoints->pAllocObject = art_quick_alloc_object;
-    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
-    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
-    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
-  }
-}
+extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
 
 void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
                      PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 8862711..6d6d796 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -231,7 +231,7 @@
     DELIVER_PENDING_EXCEPTION
 .endm
 
-.macro RETURN_IF_NONZERO
+.macro RETURN_IF_RESULT_IS_NON_ZERO
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
     beqz   $v0, 1f                       # success?
     nop
@@ -689,7 +689,7 @@
     # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
     jal     artInitializeStaticStorageFromCode
     move    $a3, $sp                            # pass $sp
-    RETURN_IF_NONZERO
+    RETURN_IF_RESULT_IS_NON_ZERO
 END art_quick_initialize_static_storage
 
     /*
@@ -703,7 +703,7 @@
     # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
     jal     artInitializeTypeFromCode
     move    $a3, $sp                           # pass $sp
-    RETURN_IF_NONZERO
+    RETURN_IF_RESULT_IS_NON_ZERO
 END art_quick_initialize_type
 
     /*
@@ -718,7 +718,7 @@
     # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
     jal     artInitializeTypeAndVerifyAccessFromCode
     move    $a3, $sp                           # pass $sp
-    RETURN_IF_NONZERO
+    RETURN_IF_RESULT_IS_NON_ZERO
 END art_quick_initialize_type_and_verify_access
 
     /*
@@ -902,156 +902,36 @@
     # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
     jal     artResolveStringFromCode
     move    $a3, $sp                  # pass $sp
-    RETURN_IF_NONZERO
+    RETURN_IF_RESULT_IS_NON_ZERO
 END art_quick_resolve_string
 
-    /*
-     * Called by managed code to allocate an object.
-     */
-    .extern artAllocObjectFromCode
-ENTRY art_quick_alloc_object
+
+// Macro to facilitate adding new allocation entrypoints.
+.macro TWO_ARG_DOWNCALL name, entrypoint, return
+    .extern \entrypoint
+ENTRY \name
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a2, rSELF                # pass Thread::Current
-    jal     artAllocObjectFromCode    # (uint32_t type_idx, Method* method, Thread*, $sp)
+    jal     \entrypoint
     move    $a3, $sp                  # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_object
+    \return
+END \name
+.endm
 
-    .extern artAllocObjectFromCodeInstrumented
-ENTRY art_quick_alloc_object_instrumented
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a2, rSELF                # pass Thread::Current
-    jal     artAllocObjectFromCodeInstrumented    # (uint32_t type_idx, Method* method, Thread*, $sp)
-    move    $a3, $sp                  # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_object_instrumented
-
-    /*
-     * Called by managed code to allocate an object when the caller doesn't know whether it has
-     * access to the created type.
-     */
-    .extern artAllocObjectFromCodeWithAccessCheck
-ENTRY art_quick_alloc_object_with_access_check
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a2, rSELF                # pass Thread::Current
-    jal     artAllocObjectFromCodeWithAccessCheck  # (uint32_t type_idx, Method* method, Thread*, $sp)
-    move    $a3, $sp                  # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_object_with_access_check
-
-    .extern artAllocObjectFromCodeWithAccessCheckInstrumented
-ENTRY art_quick_alloc_object_with_access_check_instrumented
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a2, rSELF                # pass Thread::Current
-    jal     artAllocObjectFromCodeWithAccessCheckInstrumented  # (uint32_t type_idx, Method* method, Thread*, $sp)
-    move    $a3, $sp                  # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_object_with_access_check_instrumented
-
-    /*
-     * Called by managed code to allocate an array.
-     */
-    .extern artAllocArrayFromCode
-ENTRY art_quick_alloc_array
+.macro THREE_ARG_DOWNCALL name, entrypoint, return
+    .extern \entrypoint
+ENTRY \name
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
-    # artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, $sp)
-    jal     artAllocArrayFromCode
+    jal     \entrypoint
     sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_array
+    \return
+END \name
+.endm
 
-    .extern artAllocArrayFromCodeInstrumented
-ENTRY art_quick_alloc_array_instrumented
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a3, rSELF                # pass Thread::Current
-    # artAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t component_count, Thread*, $sp)
-    jal     artAllocArrayFromCodeInstrumented
-    sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_array_instrumented
-
-    /*
-     * Called by managed code to allocate an array when the caller doesn't know whether it has
-     * access to the created type.
-     */
-    .extern artAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_alloc_array_with_access_check
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a3, rSELF                # pass Thread::Current
-    # artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, $sp)
-    jal     artAllocArrayFromCodeWithAccessCheck
-    sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_array_with_access_check
-
-    .extern artAllocArrayFromCodeWithAccessCheckInstrumented
-ENTRY art_quick_alloc_array_with_access_check_instrumented
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a3, rSELF                # pass Thread::Current
-    # artAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, component_count, Thread*, $sp)
-    jal     artAllocArrayFromCodeWithAccessCheckInstrumented
-    sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_alloc_array_with_access_check_instrumented
-
-    /*
-     * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
-     */
-    .extern artCheckAndAllocArrayFromCode
-ENTRY art_quick_check_and_alloc_array
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a3, rSELF                # pass Thread::Current
-    # artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , $sp)
-    jal     artCheckAndAllocArrayFromCode
-    sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array
-
-    .extern artCheckAndAllocArrayFromCodeInstrumented
-ENTRY art_quick_check_and_alloc_array_instrumented
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a3, rSELF                # pass Thread::Current
-    # artCheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t count, Thread* , $sp)
-    jal     artCheckAndAllocArrayFromCodeInstrumented
-    sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array_instrumented
-
-    /*
-     * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
-     */
-    .extern artCheckAndAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_check_and_alloc_array_with_access_check
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a3, rSELF                # pass Thread::Current
-    # artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , $sp)
-    jal     artCheckAndAllocArrayFromCodeWithAccessCheck
-    sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array_with_access_check
-
-    .extern artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
-ENTRY art_quick_check_and_alloc_array_with_access_check_instrumented
-    GENERATE_GLOBAL_POINTER
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
-    move    $a3, rSELF                # pass Thread::Current
-    # artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, count, Thread* , $sp)
-    jal     artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
-    sw      $sp, 16($sp)              # pass $sp
-    RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array_with_access_check_instrumented
+#include "arch/quick_alloc_entrypoints.S"
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
new file mode 100644
index 0000000..0109c13
--- /dev/null
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
+// Called by managed code to allocate an object.
+TWO_ARG_DOWNCALL art_quick_alloc_object\c_suffix, artAllocObjectFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+// Called by managed code to allocate an object when the caller doesn't know whether it has access
+// to the created type.
+TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+// Called by managed code to allocate an array.
+THREE_ARG_DOWNCALL art_quick_alloc_array\c_suffix, artAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+// Called by managed code to allocate an array when the caller doesn't know whether it has access
+// to the created type.
+THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check\c_suffix, artAllocArrayFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+// Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array\c_suffix, artCheckAndAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+// Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check\c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+.endm
+
+GENERATE_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS _instrumented, Instrumented
+GENERATE_ALLOC_ENTRYPOINTS _bump_pointer, BumpPointer
+GENERATE_ALLOC_ENTRYPOINTS _bump_pointer_instrumented, BumpPointerInstrumented
diff --git a/runtime/arch/quick_alloc_entrypoints.cc b/runtime/arch/quick_alloc_entrypoints.cc
new file mode 100644
index 0000000..192b124
--- /dev/null
+++ b/runtime/arch/quick_alloc_entrypoints.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/heap.h"
+
+#define GENERATE_ENTRYPOINTS(suffix) \
+extern "C" void* art_quick_alloc_array##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
+void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrumented) { \
+  if (instrumented) { \
+    qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
+    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \
+    qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \
+    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
+    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
+    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
+  } else { \
+    qpoints->pAllocArray = art_quick_alloc_array##suffix; \
+    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \
+    qpoints->pAllocObject = art_quick_alloc_object##suffix; \
+    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
+    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
+    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
+  } \
+}
+
+namespace art {
+
+// Generate the entrypoint functions.
+GENERATE_ENTRYPOINTS();
+GENERATE_ENTRYPOINTS(_bump_pointer);
+
+static bool entry_points_instrumented = false;
+static gc::AllocatorType entry_points_allocator = kMovingCollector ?
+    gc::kAllocatorTypeBumpPointer : gc::kAllocatorTypeFreeList;
+
+void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator) {
+  entry_points_allocator = allocator;
+}
+
+void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
+  entry_points_instrumented = instrumented;
+}
+
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
+  switch (entry_points_allocator) {
+    case gc::kAllocatorTypeFreeList: {
+      SetQuickAllocEntryPoints(qpoints, entry_points_instrumented);
+      break;
+    }
+    case gc::kAllocatorTypeBumpPointer: {
+      SetQuickAllocEntryPoints_bump_pointer(qpoints, entry_points_instrumented);
+      break;
+    }
+    default: {
+      LOG(FATAL) << "Unimplemented";
+    }
+  }
+}
+
+}  // namespace art
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 99b0dd5..6a67079 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -32,21 +32,6 @@
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
 
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
-
-extern "C" void* art_quick_alloc_array_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_instrumented(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_with_access_check_instrumented(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_instrumented(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
-
 // Cast entrypoints.
 extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
                                                 const mirror::Class* ref_class);
@@ -125,29 +110,7 @@
 extern "C" void art_quick_throw_null_pointer_exception();
 extern "C" void art_quick_throw_stack_overflow(void*);
 
-static bool quick_alloc_entry_points_instrumented = false;
-
-void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
-  quick_alloc_entry_points_instrumented = instrumented;
-}
-
-void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
-  if (quick_alloc_entry_points_instrumented) {
-    qpoints->pAllocArray = art_quick_alloc_array_instrumented;
-    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check_instrumented;
-    qpoints->pAllocObject = art_quick_alloc_object_instrumented;
-    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check_instrumented;
-    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array_instrumented;
-    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check_instrumented;
-  } else {
-    qpoints->pAllocArray = art_quick_alloc_array;
-    qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
-    qpoints->pAllocObject = art_quick_alloc_object;
-    qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
-    qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
-    qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
-  }
-}
+extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
 
 void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
                      PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index decdb50..62a8b70 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -426,7 +426,7 @@
     DELIVER_PENDING_EXCEPTION
 END_MACRO
 
-#include "arch/alloc_entrypoints.S"
+#include "arch/quick_alloc_entrypoints.S"
 
 TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
 TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cfe3bf4..500cb59 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -199,9 +199,8 @@
   gc::Heap* heap = Runtime::Current()->GetHeap();
   // The GC can't handle an object with a null class since we can't get the size of this object.
   heap->IncrementDisableGC(self);
-  SirtRef<mirror::Class> java_lang_Class(
-      self, down_cast<mirror::Class*>(
-          heap->AllocNonMovableObject(self, NULL, sizeof(mirror::ClassClass))));
+  SirtRef<mirror::Class> java_lang_Class(self, down_cast<mirror::Class*>(
+      heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass))));
   CHECK(java_lang_Class.get() != NULL);
   mirror::Class::SetClassClass(java_lang_Class.get());
   java_lang_Class->SetClass(java_lang_Class.get());
@@ -239,7 +238,8 @@
   java_lang_String->SetStatus(mirror::Class::kStatusResolved, self);
 
   // Create storage for root classes, save away our work so far (requires descriptors).
-  class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.get(), kClassRootsMax);
+  class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.get(),
+                                                           kClassRootsMax);
   CHECK(class_roots_ != NULL);
   SetClassRoot(kJavaLangClass, java_lang_Class.get());
   SetClassRoot(kJavaLangObject, java_lang_Object.get());
@@ -1204,7 +1204,7 @@
   SirtRef<mirror::Class> dex_cache_class(self, GetClassRoot(kJavaLangDexCache));
   SirtRef<mirror::DexCache> dex_cache(
       self, down_cast<mirror::DexCache*>(
-          heap->AllocObject(self, dex_cache_class.get(), dex_cache_class->GetObjectSize())));
+          heap->AllocObject<true>(self, dex_cache_class.get(), dex_cache_class->GetObjectSize())));
   if (dex_cache.get() == NULL) {
     return NULL;
   }
@@ -1249,7 +1249,7 @@
                                        size_t class_size) {
   DCHECK_GE(class_size, sizeof(mirror::Class));
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  mirror::Object* k = heap->AllocNonMovableObject(self, java_lang_Class, class_size);
+  mirror::Object* k = heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size);
   if (UNLIKELY(k == NULL)) {
     CHECK(self->IsExceptionPending());  // OOME.
     return NULL;
@@ -1268,12 +1268,12 @@
 
 mirror::ArtField* ClassLinker::AllocArtField(Thread* self) {
   return down_cast<mirror::ArtField*>(
-      GetClassRoot(kJavaLangReflectArtField)->Alloc<false, true>(self));
+      GetClassRoot(kJavaLangReflectArtField)->AllocNonMovableObject(self));
 }
 
 mirror::ArtMethod* ClassLinker::AllocArtMethod(Thread* self) {
   return down_cast<mirror::ArtMethod*>(
-      GetClassRoot(kJavaLangReflectArtMethod)->Alloc<false, true>(self));
+      GetClassRoot(kJavaLangReflectArtMethod)->AllocNonMovableObject(self));
 }
 
 mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray(
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index cecb770..c5c1dfb 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1119,8 +1119,7 @@
   if (c == NULL) {
     return status;
   }
-  new_array = gRegistry->Add(
-      mirror::Array::Alloc<kMovingCollector, true>(Thread::Current(), c, length));
+  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length));
   return JDWP::ERR_NONE;
 }
 
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index d7bbe64..2806f94 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -33,20 +33,20 @@
 
 namespace art {
 
-static inline bool CheckFilledNewArrayAlloc(uint32_t type_idx, mirror::ArtMethod* referrer,
-                                            int32_t component_count, Thread* self,
-                                            bool access_check, mirror::Class** klass_ptr)
+static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, mirror::ArtMethod* referrer,
+                                                      int32_t component_count, Thread* self,
+                                                      bool access_check)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (UNLIKELY(component_count < 0)) {
     ThrowNegativeArraySizeException(component_count);
-    return false;  // Failure
+    return nullptr;  // Failure
   }
   mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
   if (UNLIKELY(klass == NULL)) {  // Not in dex cache so try to resolve
     klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
     if (klass == NULL) {  // Error
       DCHECK(self->IsExceptionPending());
-      return false;  // Failure
+      return nullptr;  // Failure
     }
   }
   if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) {
@@ -60,40 +60,43 @@
                                "Found type %s; filled-new-array not implemented for anything but \'int\'",
                                PrettyDescriptor(klass).c_str());
     }
-    return false;  // Failure
+    return nullptr;  // Failure
   }
   if (access_check) {
     mirror::Class* referrer_klass = referrer->GetDeclaringClass();
     if (UNLIKELY(!referrer_klass->CanAccess(klass))) {
       ThrowIllegalAccessErrorClass(referrer_klass, klass);
-      return false;  // Failure
+      return nullptr;  // Failure
     }
   }
   DCHECK(klass->IsArrayClass()) << PrettyClass(klass);
-  *klass_ptr = klass;
-  return true;
+  return klass;
 }
 
 // Helper function to allocate array for FILLED_NEW_ARRAY.
 mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* referrer,
                                           int32_t component_count, Thread* self,
-                                          bool access_check) {
-  mirror::Class* klass;
-  if (UNLIKELY(!CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self, access_check, &klass))) {
-    return NULL;
+                                          bool access_check,
+                                          gc::AllocatorType allocator_type) {
+  mirror::Class* klass = CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self,
+                                                  access_check);
+  if (UNLIKELY(klass == nullptr)) {
+    return nullptr;
   }
-  return mirror::Array::Alloc<kMovingCollector, false>(self, klass, component_count);
+  return mirror::Array::Alloc<false>(self, klass, component_count, allocator_type);
 }
 
 // Helper function to allocate array for FILLED_NEW_ARRAY.
 mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* referrer,
                                                       int32_t component_count, Thread* self,
-                                                      bool access_check) {
-  mirror::Class* klass;
-  if (UNLIKELY(!CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self, access_check, &klass))) {
-    return NULL;
+                                                      bool access_check,
+                                                      gc::AllocatorType allocator_type) {
+  mirror::Class* klass = CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self,
+                                                  access_check);
+  if (UNLIKELY(klass == nullptr)) {
+    return nullptr;
   }
-  return mirror::Array::Alloc<kMovingCollector, true>(self, klass, component_count);
+  return mirror::Array::Alloc<true>(self, klass, component_count, allocator_type);
 }
 
 void ThrowStackOverflowError(Thread* self) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 3b58a8d..747dd56 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -27,9 +27,11 @@
 #include "mirror/art_method.h"
 #include "mirror/array.h"
 #include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
 #include "mirror/throwable.h"
+#include "locks.h"
 #include "object_utils.h"
-
+#include "sirt_ref.h"
 #include "thread.h"
 
 namespace art {
@@ -40,130 +42,122 @@
   class Object;
 }  // namespace mirror
 
-static inline bool CheckObjectAlloc(uint32_t type_idx, mirror::ArtMethod* method,
-                                    Thread* self,
-                                    bool access_check,
-                                    mirror::Class** klass_ptr)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+// TODO: Fix no thread safety analysis when GCC can handle template specialization.
+template <const bool kAccessCheck>
+ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
+                                                            mirror::ArtMethod* method,
+                                                            Thread* self)
+    NO_THREAD_SAFETY_ANALYSIS {
   mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
-  Runtime* runtime = Runtime::Current();
   if (UNLIKELY(klass == NULL)) {
-    klass = runtime->GetClassLinker()->ResolveType(type_idx, method);
+    klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
     if (klass == NULL) {
       DCHECK(self->IsExceptionPending());
-      return false;  // Failure
+      return nullptr;  // Failure
     }
   }
-  if (access_check) {
+  if (kAccessCheck) {
     if (UNLIKELY(!klass->IsInstantiable())) {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
       self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;",
                               PrettyDescriptor(klass).c_str());
-      return false;  // Failure
+      return nullptr;  // Failure
     }
     mirror::Class* referrer = method->GetDeclaringClass();
     if (UNLIKELY(!referrer->CanAccess(klass))) {
       ThrowIllegalAccessErrorClass(referrer, klass);
-      return false;  // Failure
+      return nullptr;  // Failure
     }
   }
-  if (!klass->IsInitialized() &&
-      !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) {
-    DCHECK(self->IsExceptionPending());
-    return false;  // Failure
+  if (UNLIKELY(!klass->IsInitialized())) {
+    SirtRef<mirror::Class> sirt_klass(self, klass);
+    // The class initializer might cause a GC.
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(klass, true, true)) {
+      DCHECK(self->IsExceptionPending());
+      return nullptr;  // Failure
+    }
+    return sirt_klass.get();
   }
-  *klass_ptr = klass;
-  return true;
+  return klass;
 }
 
 // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
 // cannot be resolved, throw an error. If it can, use it to create an instance.
 // When verification/compiler hasn't been able to verify access, optionally perform an access
 // check.
-static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::ArtMethod* method,
-                                                  Thread* self,
-                                                  bool access_check)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Class* klass;
-  if (UNLIKELY(!CheckObjectAlloc(type_idx, method, self, access_check, &klass))) {
-    return NULL;
+// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
+template <bool kAccessCheck, bool kInstrumented>
+ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
+                                                                mirror::ArtMethod* method,
+                                                                Thread* self,
+                                                                gc::AllocatorType allocator_type)
+    NO_THREAD_SAFETY_ANALYSIS {
+  mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self);
+  if (UNLIKELY(klass == nullptr)) {
+    return nullptr;
   }
-  return klass->Alloc<kMovingCollector, false>(self);
+  return klass->Alloc<kInstrumented>(self, allocator_type);
 }
 
-static inline mirror::Object* AllocObjectFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
-                                                              Thread* self,
-                                                              bool access_check)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Class* klass;
-  if (UNLIKELY(!CheckObjectAlloc(type_idx, method, self, access_check, &klass))) {
-    return NULL;
-  }
-  return klass->Alloc<kMovingCollector, true>(self);
-}
-
-static inline bool CheckArrayAlloc(uint32_t type_idx, mirror::ArtMethod* method,
-                                   int32_t component_count,
-                                   bool access_check, mirror::Class** klass_ptr)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+// TODO: Fix no thread safety analysis when GCC can handle template specialization.
+template <bool kAccessCheck>
+ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
+                                                           mirror::ArtMethod* method,
+                                                           int32_t component_count)
+    NO_THREAD_SAFETY_ANALYSIS {
   if (UNLIKELY(component_count < 0)) {
     ThrowNegativeArraySizeException(component_count);
-    return false;  // Failure
+    return nullptr;  // Failure
   }
   mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
-  if (UNLIKELY(klass == NULL)) {  // Not in dex cache so try to resolve
+  if (UNLIKELY(klass == nullptr)) {  // Not in dex cache so try to resolve
     klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
     if (klass == NULL) {  // Error
       DCHECK(Thread::Current()->IsExceptionPending());
-      return false;  // Failure
+      return nullptr;  // Failure
     }
     CHECK(klass->IsArrayClass()) << PrettyClass(klass);
   }
-  if (access_check) {
+  if (kAccessCheck) {
     mirror::Class* referrer = method->GetDeclaringClass();
     if (UNLIKELY(!referrer->CanAccess(klass))) {
       ThrowIllegalAccessErrorClass(referrer, klass);
-      return false;  // Failure
+      return nullptr;  // Failure
     }
   }
-  *klass_ptr = klass;
-  return true;
+  return klass;
 }
 
 // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
 // it cannot be resolved, throw an error. If it can, use it to create an array.
 // When verification/compiler hasn't been able to verify access, optionally perform an access
 // check.
-static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
-                                                int32_t component_count,
-                                                Thread* self, bool access_check)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Class* klass;
-  if (UNLIKELY(!CheckArrayAlloc(type_idx, method, component_count, access_check, &klass))) {
-    return NULL;
+// TODO: Fix no thread safety analysis when GCC can handle template specialization.
+template <bool kAccessCheck, bool kInstrumented>
+ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
+                                                              mirror::ArtMethod* method,
+                                                              int32_t component_count,
+                                                              Thread* self,
+                                                              gc::AllocatorType allocator_type)
+    NO_THREAD_SAFETY_ANALYSIS {
+  mirror::Class* klass = CheckArrayAlloc<kAccessCheck>(type_idx, method, component_count);
+  if (UNLIKELY(klass == nullptr)) {
+    return nullptr;
   }
-  return mirror::Array::Alloc<kMovingCollector, false>(self, klass, component_count);
-}
-
-static inline mirror::Array* AllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
-                                                            int32_t component_count,
-                                                            Thread* self, bool access_check)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Class* klass;
-  if (UNLIKELY(!CheckArrayAlloc(type_idx, method, component_count, access_check, &klass))) {
-    return NULL;
-  }
-  return mirror::Array::Alloc<kMovingCollector, true>(self, klass, component_count);
+  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, allocator_type);
 }
 
 extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
-                                                 int32_t component_count,
-                                                 Thread* self, bool access_check)
+                                                 int32_t component_count, Thread* self,
+                                                 bool access_check,
+                                                 gc::AllocatorType allocator_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
-                                                             int32_t component_count,
-                                                             Thread* self, bool access_check)
+extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
+                                                             mirror::ArtMethod* method,
+                                                             int32_t component_count, Thread* self,
+                                                             bool access_check,
+                                                             gc::AllocatorType allocator_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Type of find field operation for fast and slow case.
diff --git a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
index 91b7353..6d23efe 100644
--- a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
@@ -24,14 +24,14 @@
                                                                mirror::ArtMethod* referrer,
                                                                Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return AllocObjectFromCode(type_idx, referrer, thread, false);
+  return AllocObjectFromCode<false, true>(type_idx, referrer, thread, gc::kAllocatorTypeFreeList);
 }
 
 extern "C" mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
                                                                                  mirror::ArtMethod* referrer,
                                                                                  Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return AllocObjectFromCode(type_idx, referrer, thread, true);
+  return AllocObjectFromCode<true, true>(type_idx, referrer, thread, gc::kAllocatorTypeFreeList);
 }
 
 extern "C" mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx,
@@ -39,7 +39,8 @@
                                                               uint32_t length,
                                                               Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return AllocArrayFromCode(type_idx, referrer, length, self, false);
+  return AllocArrayFromCode<false, true>(type_idx, referrer, length, self,
+                                         gc::kAllocatorTypeFreeList);
 }
 
 extern "C" mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
@@ -47,7 +48,8 @@
                                                                                 uint32_t length,
                                                                                 Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return AllocArrayFromCode(type_idx, referrer, length, self, true);
+  return AllocArrayFromCode<true, true>(type_idx, referrer, length, self,
+                                        gc::kAllocatorTypeFreeList);
 }
 
 extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
@@ -55,7 +57,8 @@
                                                                         uint32_t length,
                                                                         Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false);
+  return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false,
+                                    gc::kAllocatorTypeFreeList);
 }
 
 extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
@@ -63,7 +66,8 @@
                                                                                           uint32_t length,
                                                                                           Thread* thread)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true);
+  return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true,
+                                    gc::kAllocatorTypeFreeList);
 }
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 6f7b1ab..b71b880 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -23,110 +23,55 @@
 
 namespace art {
 
-extern "C" mirror::Object* artAllocObjectFromCode(uint32_t type_idx, mirror::ArtMethod* method,
-                                                  Thread* self, mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocObjectFromCode(type_idx, method, self, false);
+#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
+extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
+    uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+  return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
+} \
+extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
+    uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+  return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
+} \
+extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
+    uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
+    mirror::ArtMethod** sp) \
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+  return AllocArrayFromCode<false, instrumented_bool>(type_idx, method, component_count, self, \
+                                                      allocator_type); \
+} \
+extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
+    uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
+    mirror::ArtMethod** sp) \
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+  return AllocArrayFromCode<true, instrumented_bool>(type_idx, method, component_count, self, \
+                                                     allocator_type); \
+} \
+extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
+    uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
+    mirror::ArtMethod** sp) \
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+  return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false, allocator_type); \
+} \
+extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
+    uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
+    mirror::ArtMethod** sp) \
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+  return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true, allocator_type); \
 }
 
-extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx,
-                                                                 mirror::ArtMethod* method,
-                                                                 Thread* self,
-                                                                 mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocObjectFromCode(type_idx, method, self, true);
-}
+#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(suffix, allocator_type) \
+    GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, Instrumented, true, allocator_type) \
+    GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, , false, allocator_type)
 
-extern "C" mirror::Array* artAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
-                                                int32_t component_count, Thread* self,
-                                                mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocArrayFromCode(type_idx, method, component_count, self, false);
-}
-
-extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx,
-                                                               mirror::ArtMethod* method,
-                                                               int32_t component_count,
-                                                               Thread* self,
-                                                               mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocArrayFromCode(type_idx, method, component_count, self, true);
-}
-
-extern "C" mirror::Array* artCheckAndAllocArrayFromCode(uint32_t type_idx,
-                                                        mirror::ArtMethod* method,
-                                                        int32_t component_count, Thread* self,
-                                                        mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false);
-}
-
-extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx,
-                                                                       mirror::ArtMethod* method,
-                                                                       int32_t component_count,
-                                                                       Thread* self,
-                                                                       mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true);
-}
-
-extern "C" mirror::Object* artAllocObjectFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
-                                                              Thread* self, mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocObjectFromCodeInstrumented(type_idx, method, self, false);
-}
-
-extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheckInstrumented(uint32_t type_idx,
-                                                                             mirror::ArtMethod* method,
-                                                                             Thread* self,
-                                                                             mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocObjectFromCodeInstrumented(type_idx, method, self, true);
-}
-
-extern "C" mirror::Array* artAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
-                                                            int32_t component_count, Thread* self,
-                                                              mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocArrayFromCodeInstrumented(type_idx, method, component_count, self, false);
-}
-
-extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheckInstrumented(uint32_t type_idx,
-                                                                           mirror::ArtMethod* method,
-                                                                           int32_t component_count,
-                                                                           Thread* self,
-                                                                           mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return AllocArrayFromCodeInstrumented(type_idx, method, component_count, self, true);
-}
-
-extern "C" mirror::Array* artCheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
-                                                                    mirror::ArtMethod* method,
-                                                                    int32_t component_count, Thread* self,
-                                                                    mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return CheckAndAllocArrayFromCodeInstrumented(type_idx, method, component_count, self, false);
-}
-
-extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented(uint32_t type_idx,
-                                                                                   mirror::ArtMethod* method,
-                                                                                   int32_t component_count,
-                                                                                   Thread* self,
-                                                                                   mirror::ArtMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
-  return CheckAndAllocArrayFromCodeInstrumented(type_idx, method, component_count, self, true);
-}
+GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(, gc::kAllocatorTypeFreeList)
+GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(BumpPointer, gc::kAllocatorTypeBumpPointer)
 
 }  // namespace art
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index e6829e2..fcc07a0 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -32,152 +32,126 @@
 namespace art {
 namespace gc {
 
-inline mirror::Object* Heap::AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* c,
-                                                                 size_t byte_count) {
+template <const bool kInstrumented>
+inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* c,
+                                                      size_t byte_count, AllocatorType allocator) {
   DebugCheckPreconditionsForAllocObject(c, byte_count);
-  mirror::Object* obj;
-  size_t bytes_allocated;
-  AllocationTimer alloc_timer(this, &obj);
-  bool large_object_allocation = TryAllocLargeObjectUninstrumented(self, c, byte_count,
-                                                                   &obj, &bytes_allocated);
-  if (LIKELY(!large_object_allocation)) {
-    // Non-large object allocation.
-    if (!kUseRosAlloc) {
-      DCHECK(non_moving_space_->IsDlMallocSpace());
-      obj = AllocateUninstrumented(self, reinterpret_cast<space::DlMallocSpace*>(non_moving_space_),
-                                   byte_count, &bytes_allocated);
-    } else {
-      DCHECK(non_moving_space_->IsRosAllocSpace());
-      obj = AllocateUninstrumented(self, reinterpret_cast<space::RosAllocSpace*>(non_moving_space_),
-                                   byte_count, &bytes_allocated);
-    }
-    // Ensure that we did not allocate into a zygote space.
-    DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
-  }
-  if (LIKELY(obj != NULL)) {
-    obj->SetClass(c);
-    // Record allocation after since we want to use the atomic add for the atomic fence to guard
-    // the SetClass since we do not want the class to appear NULL in another thread.
-    size_t new_num_bytes_allocated = RecordAllocationUninstrumented(bytes_allocated, obj);
-    DCHECK(!Dbg::IsAllocTrackingEnabled());
-    CheckConcurrentGC(self, new_num_bytes_allocated, obj);
-    if (kDesiredHeapVerification > kNoHeapVerification) {
-      VerifyObject(obj);
-    }
-  } else {
-    ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
-  }
-  if (kIsDebugBuild) {
-    self->VerifyStack();
-  }
-  return obj;
-}
-
-inline mirror::Object* Heap::AllocMovableObjectUninstrumented(Thread* self, mirror::Class* c,
-                                                              size_t byte_count) {
-  DebugCheckPreconditionsForAllocObject(c, byte_count);
-  mirror::Object* obj;
-  AllocationTimer alloc_timer(this, &obj);
-  byte_count = (byte_count + 7) & ~7;
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
-    CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
-    if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
-      CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
-    }
-  }
-  obj = bump_pointer_space_->AllocNonvirtual(byte_count);
-  if (LIKELY(obj != NULL)) {
-    obj->SetClass(c);
-    DCHECK(!obj->IsClass());
-    // Record allocation after since we want to use the atomic add for the atomic fence to guard
-    // the SetClass since we do not want the class to appear NULL in another thread.
-    num_bytes_allocated_.fetch_add(byte_count);
-    DCHECK(!Dbg::IsAllocTrackingEnabled());
-    if (kDesiredHeapVerification > kNoHeapVerification) {
-      VerifyObject(obj);
-    }
-  } else {
-    ThrowOutOfMemoryError(self, byte_count, false);
-  }
-  if (kIsDebugBuild) {
-    self->VerifyStack();
-  }
-  return obj;
-}
-
-inline size_t Heap::RecordAllocationUninstrumented(size_t size, mirror::Object* obj) {
-  DCHECK(obj != NULL);
-  DCHECK_GT(size, 0u);
-  size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size));
-
-  DCHECK(!Runtime::Current()->HasStatsEnabled());
-
-  // This is safe to do since the GC will never free objects which are neither in the allocation
-  // stack or the live bitmap.
-  while (!allocation_stack_->AtomicPushBack(obj)) {
-    CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
-  }
-
-  return old_num_bytes_allocated + size;
-}
-
-inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
-                                                         bool grow, size_t* bytes_allocated) {
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
-    return NULL;
-  }
-  DCHECK(!running_on_valgrind_);
-  return space->Alloc(self, alloc_size, bytes_allocated);
-}
-
-// DlMallocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
-                                                         bool grow, size_t* bytes_allocated) {
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
-    return NULL;
-  }
-  DCHECK(!running_on_valgrind_);
-  return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
-}
-
-// RosAllocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
-                                                         bool grow, size_t* bytes_allocated) {
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
-    return NULL;
-  }
-  DCHECK(!running_on_valgrind_);
-  return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
-}
-
-template <class T>
-inline mirror::Object* Heap::AllocateUninstrumented(Thread* self, T* space, size_t alloc_size,
-                                                    size_t* bytes_allocated) {
   // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
   // done in the runnable state where suspension is expected.
   DCHECK_EQ(self->GetState(), kRunnable);
   self->AssertThreadSuspensionIsAllowable();
-
-  mirror::Object* ptr = TryToAllocateUninstrumented(self, space, alloc_size, false, bytes_allocated);
-  if (LIKELY(ptr != NULL)) {
-    return ptr;
+  mirror::Object* obj;
+  size_t bytes_allocated;
+  AllocationTimer alloc_timer(this, &obj);
+  if (UNLIKELY(ShouldAllocLargeObject(c, byte_count))) {
+    obj = TryToAllocate<kInstrumented>(self, kAllocatorTypeLOS, byte_count, false,
+                                       &bytes_allocated);
+    allocator = kAllocatorTypeLOS;
+  } else {
+    obj = TryToAllocate<kInstrumented>(self, allocator, byte_count, false, &bytes_allocated);
   }
-  return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated);
+
+  if (UNLIKELY(obj == nullptr)) {
+    SirtRef<mirror::Class> sirt_c(self, c);
+    obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated);
+    if (obj == nullptr) {
+      return nullptr;
+    } else {
+      c = sirt_c.get();
+    }
+  }
+  obj->SetClass(c);
+  // TODO: Set array length here.
+  DCHECK_GT(bytes_allocated, 0u);
+  const size_t new_num_bytes_allocated =
+      static_cast<size_t>(num_bytes_allocated_.fetch_add(bytes_allocated)) + bytes_allocated;
+  // TODO: Deprecate.
+  if (kInstrumented) {
+    if (Runtime::Current()->HasStatsEnabled()) {
+      RuntimeStats* thread_stats = self->GetStats();
+      ++thread_stats->allocated_objects;
+      thread_stats->allocated_bytes += bytes_allocated;
+      RuntimeStats* global_stats = Runtime::Current()->GetStats();
+      ++global_stats->allocated_objects;
+      global_stats->allocated_bytes += bytes_allocated;
+    }
+  } else {
+    DCHECK(!Runtime::Current()->HasStatsEnabled());
+  }
+  if (AllocatorHasAllocationStack(allocator)) {
+    // This is safe to do since the GC will never free objects which are neither in the allocation
+    // stack or the live bitmap.
+    while (!allocation_stack_->AtomicPushBack(obj)) {
+      CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+    }
+  }
+  if (kInstrumented) {
+    if (Dbg::IsAllocTrackingEnabled()) {
+      Dbg::RecordAllocation(c, bytes_allocated);
+    }
+  } else {
+    DCHECK(!Dbg::IsAllocTrackingEnabled());
+  }
+  if (AllocatorHasConcurrentGC(allocator)) {
+    CheckConcurrentGC(self, new_num_bytes_allocated, obj);
+  }
+  if (kIsDebugBuild) {
+    if (kDesiredHeapVerification > kNoHeapVerification) {
+      VerifyObject(obj);
+    }
+    self->VerifyStack();
+  }
+  return obj;
 }
 
-inline bool Heap::TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count,
-                                                    mirror::Object** obj_ptr, size_t* bytes_allocated) {
-  bool large_object_allocation = ShouldAllocLargeObject(c, byte_count);
-  if (UNLIKELY(large_object_allocation)) {
-    mirror::Object* obj = AllocateUninstrumented(self, large_object_space_, byte_count, bytes_allocated);
-    // Make sure that our large object didn't get placed anywhere within the space interval or else
-    // it breaks the immune range.
-    DCHECK(obj == NULL ||
-           reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
-           reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
-    *obj_ptr = obj;
+template <const bool kInstrumented>
+inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
+                                           size_t alloc_size, bool grow,
+                                           size_t* bytes_allocated) {
+  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
+    return nullptr;
   }
-  return large_object_allocation;
+  if (kInstrumented) {
+    if (UNLIKELY(running_on_valgrind_ && allocator_type == kAllocatorTypeFreeList)) {
+      return non_moving_space_->Alloc(self, alloc_size, bytes_allocated);
+    }
+  }
+  mirror::Object* ret;
+  switch (allocator_type) {
+    case kAllocatorTypeBumpPointer: {
+      DCHECK(bump_pointer_space_ != nullptr);
+      alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
+      ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
+      if (LIKELY(ret != nullptr)) {
+        *bytes_allocated = alloc_size;
+      }
+      break;
+    }
+    case kAllocatorTypeFreeList: {
+      if (kUseRosAlloc) {
+        ret = reinterpret_cast<space::RosAllocSpace*>(non_moving_space_)->AllocNonvirtual(
+            self, alloc_size, bytes_allocated);
+      } else {
+        ret = reinterpret_cast<space::DlMallocSpace*>(non_moving_space_)->AllocNonvirtual(
+            self, alloc_size, bytes_allocated);
+      }
+      break;
+    }
+    case kAllocatorTypeLOS: {
+      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated);
+      // Make sure that our large object didn't get placed anywhere within the space interval or
+      // else it breaks the immune range.
+      DCHECK(ret == nullptr ||
+             reinterpret_cast<byte*>(ret) < continuous_spaces_.front()->Begin() ||
+             reinterpret_cast<byte*>(ret) >= continuous_spaces_.back()->End());
+      break;
+    }
+    default: {
+      LOG(FATAL) << "Invalid allocator type";
+      ret = nullptr;
+    }
+  }
+  return ret;
 }
 
 inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
@@ -198,14 +172,14 @@
   if (kMeasureAllocationTime) {
     mirror::Object* allocated_obj = *allocated_obj_ptr_;
     // Only if the allocation succeeded, record the time.
-    if (allocated_obj != NULL) {
+    if (allocated_obj != nullptr) {
       uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
       heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_);
     }
   }
 };
 
-inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) {
+inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
   // We need to have a zygote space or else our newly allocated large object can end up in the
   // Zygote resulting in it being prematurely freed.
   // We can only do this for primitive objects since large objects will not be within the card table
@@ -230,7 +204,8 @@
   return false;
 }
 
-inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object* obj) {
+inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
+                                    mirror::Object* obj) {
   if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
     // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
     SirtRef<mirror::Object> ref(self, obj);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 763bfe9..c31e3e9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -74,7 +74,7 @@
            bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads,
            bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
            bool ignore_max_footprint)
-    : non_moving_space_(NULL),
+    : non_moving_space_(nullptr),
       concurrent_gc_(!kMovingCollector && concurrent_gc),
       parallel_gc_threads_(parallel_gc_threads),
       conc_gc_threads_(conc_gc_threads),
@@ -128,6 +128,8 @@
        */
       max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
           : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
+      current_allocator_(kMovingCollector ? kAllocatorTypeBumpPointer : kAllocatorTypeFreeList),
+      current_non_moving_allocator_(kAllocatorTypeFreeList),
       bump_pointer_space_(nullptr),
       temp_space_(nullptr),
       reference_referent_offset_(0),
@@ -256,9 +258,13 @@
       garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
       garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
     }
+    gc_plan_.push_back(collector::kGcTypeSticky);
+    gc_plan_.push_back(collector::kGcTypePartial);
+    gc_plan_.push_back(collector::kGcTypeFull);
   } else {
     semi_space_collector_ = new collector::SemiSpace(this);
     garbage_collectors_.push_back(semi_space_collector_);
+    gc_plan_.push_back(collector::kGcTypeFull);
   }
 
   if (running_on_valgrind_) {
@@ -779,106 +785,6 @@
   self->ThrowOutOfMemoryError(oss.str().c_str());
 }
 
-inline bool Heap::TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
-                                                  mirror::Object** obj_ptr, size_t* bytes_allocated) {
-  bool large_object_allocation = ShouldAllocLargeObject(c, byte_count);
-  if (UNLIKELY(large_object_allocation)) {
-    mirror::Object* obj = AllocateInstrumented(self, large_object_space_, byte_count, bytes_allocated);
-    // Make sure that our large object didn't get placed anywhere within the space interval or else
-    // it breaks the immune range.
-    DCHECK(obj == nullptr ||
-           reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
-           reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
-    *obj_ptr = obj;
-  }
-  return large_object_allocation;
-}
-
-mirror::Object* Heap::AllocMovableObjectInstrumented(Thread* self, mirror::Class* c,
-                                                     size_t byte_count) {
-  DebugCheckPreconditionsForAllocObject(c, byte_count);
-  mirror::Object* obj;
-  AllocationTimer alloc_timer(this, &obj);
-  byte_count = RoundUp(byte_count, 8);
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
-    CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
-    if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
-      CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
-    }
-  }
-  obj = bump_pointer_space_->AllocNonvirtual(byte_count);
-  if (LIKELY(obj != NULL)) {
-    obj->SetClass(c);
-    DCHECK(!obj->IsClass());
-    // Record allocation after since we want to use the atomic add for the atomic fence to guard
-    // the SetClass since we do not want the class to appear NULL in another thread.
-    num_bytes_allocated_.fetch_add(byte_count);
-    if (Runtime::Current()->HasStatsEnabled()) {
-      RuntimeStats* thread_stats = Thread::Current()->GetStats();
-      ++thread_stats->allocated_objects;
-      thread_stats->allocated_bytes += byte_count;
-      RuntimeStats* global_stats = Runtime::Current()->GetStats();
-      ++global_stats->allocated_objects;
-      global_stats->allocated_bytes += byte_count;
-    }
-    if (Dbg::IsAllocTrackingEnabled()) {
-      Dbg::RecordAllocation(c, byte_count);
-    }
-    if (kDesiredHeapVerification > kNoHeapVerification) {
-      VerifyObject(obj);
-    }
-  } else {
-    ThrowOutOfMemoryError(self, byte_count, false);
-  }
-  if (kIsDebugBuild) {
-    self->VerifyStack();
-  }
-  return obj;
-}
-
-mirror::Object* Heap::AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* c,
-                                                        size_t byte_count) {
-  DebugCheckPreconditionsForAllocObject(c, byte_count);
-  mirror::Object* obj;
-  size_t bytes_allocated;
-  AllocationTimer alloc_timer(this, &obj);
-  bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count, &obj,
-                                                                 &bytes_allocated);
-  if (LIKELY(!large_object_allocation)) {
-    // Non-large object allocation.
-    if (!kUseRosAlloc) {
-      DCHECK(non_moving_space_->IsDlMallocSpace());
-      obj = AllocateInstrumented(self, reinterpret_cast<space::DlMallocSpace*>(non_moving_space_),
-                                 byte_count, &bytes_allocated);
-    } else {
-      DCHECK(non_moving_space_->IsRosAllocSpace());
-      obj = AllocateInstrumented(self, reinterpret_cast<space::RosAllocSpace*>(non_moving_space_),
-                                 byte_count, &bytes_allocated);
-    }
-    // Ensure that we did not allocate into a zygote space.
-    DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
-  }
-  if (LIKELY(obj != NULL)) {
-    obj->SetClass(c);
-    // Record allocation after since we want to use the atomic add for the atomic fence to guard
-    // the SetClass since we do not want the class to appear NULL in another thread.
-    size_t new_num_bytes_allocated = RecordAllocationInstrumented(bytes_allocated, obj);
-    if (Dbg::IsAllocTrackingEnabled()) {
-      Dbg::RecordAllocation(c, byte_count);
-    }
-    CheckConcurrentGC(self, new_num_bytes_allocated, obj);
-    if (kDesiredHeapVerification > kNoHeapVerification) {
-      VerifyObject(obj);
-    }
-  } else {
-    ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
-  }
-  if (kIsDebugBuild) {
-    self->VerifyStack();
-  }
-  return obj;
-}
-
 void Heap::Trim() {
   uint64_t start_ns = NanoTime();
   // Trim the managed spaces.
@@ -1059,31 +965,6 @@
   GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
 }
 
-inline size_t Heap::RecordAllocationInstrumented(size_t size, mirror::Object* obj) {
-  DCHECK(obj != NULL);
-  DCHECK_GT(size, 0u);
-  size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size));
-
-  if (Runtime::Current()->HasStatsEnabled()) {
-    RuntimeStats* thread_stats = Thread::Current()->GetStats();
-    ++thread_stats->allocated_objects;
-    thread_stats->allocated_bytes += size;
-
-    // TODO: Update these atomically.
-    RuntimeStats* global_stats = Runtime::Current()->GetStats();
-    ++global_stats->allocated_objects;
-    global_stats->allocated_bytes += size;
-  }
-
-  // This is safe to do since the GC will never free objects which are neither in the allocation
-  // stack or the live bitmap.
-  while (!allocation_stack_->AtomicPushBack(obj)) {
-    CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
-  }
-
-  return old_num_bytes_allocated + size;
-}
-
 void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
   DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
   num_bytes_allocated_.fetch_sub(freed_bytes);
@@ -1100,125 +981,50 @@
   }
 }
 
-inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
-                                                       bool grow, size_t* bytes_allocated) {
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
-    return NULL;
-  }
-  return space->Alloc(self, alloc_size, bytes_allocated);
-}
-
-// DlMallocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
-                                                       bool grow, size_t* bytes_allocated) {
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
-    return nullptr;
-  }
-  if (LIKELY(!running_on_valgrind_)) {
-    return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
-  } else {
-    return space->Alloc(self, alloc_size, bytes_allocated);
-  }
-}
-
-// RosAllocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
-                                                       bool grow, size_t* bytes_allocated) {
-  if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
-    return NULL;
-  }
-  if (LIKELY(!running_on_valgrind_)) {
-    return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
-  } else {
-    return space->Alloc(self, alloc_size, bytes_allocated);
-  }
-}
-
-template <class T>
-inline mirror::Object* Heap::AllocateInstrumented(Thread* self, T* space, size_t alloc_size,
-                                                  size_t* bytes_allocated) {
-  // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
-  // done in the runnable state where suspension is expected.
-  DCHECK_EQ(self->GetState(), kRunnable);
-  self->AssertThreadSuspensionIsAllowable();
-
-  mirror::Object* ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
-  if (LIKELY(ptr != NULL)) {
-    return ptr;
-  }
-  return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated);
-}
-
-mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space,
+mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
                                              size_t alloc_size, size_t* bytes_allocated) {
-  mirror::Object* ptr;
-
+  mirror::Object* ptr = nullptr;
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
   collector::GcType last_gc = WaitForGcToComplete(self);
   if (last_gc != collector::kGcTypeNone) {
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
-    ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
-    if (ptr != NULL) {
-      return ptr;
-    }
+    ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
   }
 
   // Loop through our different Gc types and try to Gc until we get enough free memory.
-  for (size_t i = static_cast<size_t>(last_gc) + 1;
-      i < static_cast<size_t>(collector::kGcTypeMax); ++i) {
-    bool run_gc = false;
-    collector::GcType gc_type = static_cast<collector::GcType>(i);
-    switch (gc_type) {
-      case collector::kGcTypeSticky: {
-          const size_t alloc_space_size = non_moving_space_->Size();
-          run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ &&
-              non_moving_space_->Capacity() - alloc_space_size >=
-              min_remaining_space_for_sticky_gc_;
-          break;
-        }
-      case collector::kGcTypePartial:
-        run_gc = have_zygote_space_;
-        break;
-      case collector::kGcTypeFull:
-        run_gc = true;
-        break;
-      default:
-        LOG(FATAL) << "Invalid GC type";
+  for (collector::GcType gc_type : gc_plan_) {
+    if (ptr != nullptr) {
+      break;
     }
-
-    if (run_gc) {
-      // If we actually ran a different type of Gc than requested, we can skip the index forwards.
-      collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
-      DCHECK_GE(static_cast<size_t>(gc_type_ran), i);
-      i = static_cast<size_t>(gc_type_ran);
-
+    // Attempt to run the collector, if we succeed, re-try the allocation.
+    if (CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone) {
       // Did we free sufficient memory for the allocation to succeed?
-      ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
-      if (ptr != NULL) {
-        return ptr;
-      }
+      ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
     }
   }
-
   // Allocations have failed after GCs;  this is an exceptional state.
-  // Try harder, growing the heap if necessary.
-  ptr = TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
-  if (ptr != NULL) {
-    return ptr;
+  if (ptr == nullptr) {
+    // Try harder, growing the heap if necessary.
+    ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
   }
-
-  // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
-  // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
-  // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
-
-  // TODO: Run finalization, but this can cause more allocations to occur.
-  VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
-           << " allocation";
-
-  // We don't need a WaitForGcToComplete here either.
-  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
-  return TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
+  if (ptr == nullptr) {
+    // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
+    // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
+    // VM spec requires that all SoftReferences have been collected and cleared before throwing
+    // OOME.
+    VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
+             << " allocation";
+    // TODO: Run finalization, but this may cause more allocations to occur.
+    // We don't need a WaitForGcToComplete here either.
+    DCHECK(!gc_plan_.empty());
+    CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+    ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
+    if (ptr == nullptr) {
+      ThrowOutOfMemoryError(self, alloc_size, false);
+    }
+  }
+  return ptr;
 }
 
 void Heap::SetTargetHeapUtilization(float target) {
@@ -1493,6 +1299,27 @@
                                                bool clear_soft_references) {
   Thread* self = Thread::Current();
   Runtime* runtime = Runtime::Current();
+  // If the heap can't run the GC, silently fail and return that no GC was run.
+  switch (gc_type) {
+    case collector::kGcTypeSticky: {
+      const size_t alloc_space_size = non_moving_space_->Size();
+      if (alloc_space_size < min_alloc_space_size_for_sticky_gc_ ||
+        non_moving_space_->Capacity() - alloc_space_size < min_remaining_space_for_sticky_gc_) {
+        return collector::kGcTypeNone;
+      }
+      break;
+    }
+    case collector::kGcTypePartial: {
+      if (!have_zygote_space_) {
+        return collector::kGcTypeNone;
+      }
+      break;
+    }
+    default: {
+      // Other GC types don't have any special cases which makes them not runnable. The main case
+      // here is full GC.
+    }
+  }
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
   Locks::mutator_lock_->AssertNotHeld(self);
   if (self->IsHandlingStackOverflow()) {
@@ -1512,12 +1339,10 @@
     }
     is_gc_running_ = true;
   }
-
   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
     ++runtime->GetStats()->gc_for_alloc_count;
     ++self->GetStats()->gc_for_alloc_count;
   }
-
   uint64_t gc_start_time_ns = NanoTime();
   uint64_t gc_start_size = GetBytesAllocated();
   // Approximate allocation rate in bytes / second.
@@ -1528,11 +1353,6 @@
     VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
   }
 
-  if (gc_type == collector::kGcTypeSticky &&
-      non_moving_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
-    gc_type = collector::kGcTypePartial;
-  }
-
   DCHECK_LT(gc_type, collector::kGcTypeMax);
   DCHECK_NE(gc_type, collector::kGcTypeNone);
 
@@ -2347,6 +2167,9 @@
   // Total number of native bytes allocated.
   native_bytes_allocated_.fetch_add(bytes);
   if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
+    collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
+        collector::kGcTypeFull;
+
     // The second watermark is higher than the gc watermark. If you hit this it means you are
     // allocating native objects faster than the GC can keep up with.
     if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
@@ -2357,7 +2180,7 @@
       }
       // If we still are over the watermark, attempt a GC for alloc and run finalizers.
       if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
-        CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
         RunFinalization(env);
         native_need_to_run_finalization_ = false;
         CHECK(!env->ExceptionCheck());
@@ -2369,7 +2192,7 @@
       if (concurrent_gc_) {
         RequestConcurrentGC(self);
       } else {
-        CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
       }
     }
   }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3da3943..5a0372a 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -87,6 +87,13 @@
   }
 };
 
+// Different types of allocators.
+enum AllocatorType {
+  kAllocatorTypeBumpPointer,
+  kAllocatorTypeFreeList,  // ROSAlloc / dlmalloc
+  kAllocatorTypeLOS,  // Large object space.
+};
+
 // What caused the GC?
 enum GcCause {
   // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
@@ -143,41 +150,30 @@
   ~Heap();
 
   // Allocates and initializes storage for an object instance.
-  mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+  template <const bool kInstrumented>
+  inline mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(!kMovingClasses);
-    return AllocObjectInstrumented(self, klass, num_bytes);
+    return AllocObjectWithAllocator<kInstrumented>(self, klass, num_bytes, GetCurrentAllocator());
   }
-  // Allocates and initializes storage for an object instance.
-  mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+  template <const bool kInstrumented>
+  inline mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass,
+                                               size_t num_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(!kMovingClasses);
-    return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
+    return AllocObjectWithAllocator<kInstrumented>(self, klass, num_bytes,
+                                                   GetCurrentNonMovingAllocator());
   }
-  mirror::Object* AllocObjectInstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(!kMovingClasses);
-    if (kMovingCollector) {
-      return AllocMovableObjectInstrumented(self, klass, num_bytes);
-    } else {
-      return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
-    }
-  }
-  mirror::Object* AllocObjectUninstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(!kMovingClasses);
-    if (kMovingCollector) {
-      return AllocMovableObjectUninstrumented(self, klass, num_bytes);
-    } else {
-      return AllocNonMovableObjectUninstrumented(self, klass, num_bytes);
-    }
-  }
-  mirror::Object* AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* klass,
-                                                    size_t num_bytes)
+  template <bool kInstrumented>
+  ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
+                                                         size_t num_bytes, AllocatorType allocator)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
-                                                      size_t num_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  AllocatorType GetCurrentAllocator() const {
+    return current_allocator_;
+  }
+
+  AllocatorType GetCurrentNonMovingAllocator() const {
+    return current_non_moving_allocator_;
+  }
 
   // Visit all of the live objects in the heap.
   void VisitObjects(ObjectVisitorCallback callback, void* arg)
@@ -488,13 +484,6 @@
   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
 
-  mirror::Object* AllocMovableObjectInstrumented(Thread* self, mirror::Class* klass,
-                                                 size_t num_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* AllocMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
-                                                   size_t num_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   bool IsCompilingBoot() const;
   bool HasImageSpace() const;
 
@@ -502,30 +491,19 @@
   void Compact(space::ContinuousMemMapAllocSpace* target_space,
                space::ContinuousMemMapAllocSpace* source_space);
 
-  bool TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
-                                       mirror::Object** obj_ptr, size_t* bytes_allocated)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count,
-                                         mirror::Object** obj_ptr, size_t* bytes_allocated)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count);
-  void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object* obj);
-
-  // Allocates uninitialized storage. Passing in a null space tries to place the object in the
-  // large object space.
-  template <class T> mirror::Object* AllocateInstrumented(Thread* self, T* space, size_t num_bytes,
-                                                          size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  template <class T> mirror::Object* AllocateUninstrumented(Thread* self, T* space, size_t num_bytes,
-                                                            size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
+    return allocator_type == kAllocatorTypeFreeList;
+  }
+  static bool AllocatorHasConcurrentGC(AllocatorType allocator_type) {
+    return allocator_type == kAllocatorTypeFreeList;
+  }
+  bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const;
+  ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
+                                       mirror::Object* obj);
 
   // Handles Allocate()'s slow allocation path with GC involved after
   // an initial allocation attempt failed.
-  mirror::Object* AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t num_bytes,
+  mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
                                          size_t* bytes_allocated)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -535,37 +513,12 @@
                                size_t bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Try to allocate a number of bytes, this function never does any GCs.
-  mirror::Object* TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
-                                            bool grow, size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  // Try to allocate a number of bytes, this function never does any GCs. DlMallocSpace-specialized version.
-  mirror::Object* TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
-                                            bool grow, size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  // Try to allocate a number of bytes, this function never does any GCs. RosAllocSpace-specialized version.
-  mirror::Object* TryToAllocateInstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
-                                            bool grow, size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  mirror::Object* TryToAllocateUninstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
-                                              bool grow, size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  mirror::Object* TryToAllocateUninstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
-                                              bool grow, size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  mirror::Object* TryToAllocateUninstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
-                                              bool grow, size_t* bytes_allocated)
-      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+  // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
+  // that the switch statement is constant optimized in the entrypoints.
+  template <const bool kInstrumented>
+  ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
+                                              size_t alloc_size, bool grow,
+                                              size_t* bytes_allocated)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation)
@@ -816,12 +769,18 @@
   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
   // to use the live bitmap as the old mark bitmap.
   const size_t max_allocation_stack_size_;
-  bool is_allocation_stack_sorted_;
   UniquePtr<accounting::ObjectStack> allocation_stack_;
 
   // Second allocation stack so that we can process allocation with the heap unlocked.
   UniquePtr<accounting::ObjectStack> live_stack_;
 
+  // Allocator type.
+  const AllocatorType current_allocator_;
+  const AllocatorType current_non_moving_allocator_;
+
+  // Which GCs we run in order when we an allocation fails.
+  std::vector<collector::GcType> gc_plan_;
+
   // Bump pointer spaces.
   space::BumpPointerSpace* bump_pointer_space_;
   // Temp space is the space which the semispace collector copies to.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 0faac0c..9b0b6aa 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -120,6 +120,9 @@
   static mirror::Object* GetNextObject(mirror::Object* obj)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Alignment.
+  static constexpr size_t kAlignment = 8;
+
  protected:
   BumpPointerSpace(const std::string& name, MemMap* mem_map);
 
@@ -132,9 +135,6 @@
   AtomicInteger total_bytes_allocated_;
   AtomicInteger total_objects_allocated_;
 
-  // Alignment.
-  static constexpr size_t kAlignment = 8;
-
   byte* growth_end_;
 
  private:
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index b3b4731..4ad9c63 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -407,45 +407,39 @@
 void Instrumentation::InstrumentQuickAllocEntryPoints() {
   // TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
   //       should be guarded by a lock.
-  DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_, 0U);
-  bool enable_instrumentation = (quick_alloc_entry_points_instrumentation_counter_ == 0);
-  quick_alloc_entry_points_instrumentation_counter_++;
+  DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_.load(), 0);
+  const bool enable_instrumentation =
+      quick_alloc_entry_points_instrumentation_counter_.fetch_add(1) == 0;
   if (enable_instrumentation) {
     // Instrumentation wasn't enabled so enable it.
     SetQuickAllocEntryPointsInstrumented(true);
-    Runtime* runtime = Runtime::Current();
-    if (runtime->IsStarted()) {
-      ThreadList* tl = runtime->GetThreadList();
-      Thread* self = Thread::Current();
-      tl->SuspendAll();
-      {
-        MutexLock mu(self, *Locks::thread_list_lock_);
-        tl->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
-      }
-      tl->ResumeAll();
-    }
+    ResetQuickAllocEntryPoints();
   }
 }
 
 void Instrumentation::UninstrumentQuickAllocEntryPoints() {
   // TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
   //       should be guarded by a lock.
-  DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_, 0U);
-  quick_alloc_entry_points_instrumentation_counter_--;
-  bool disable_instrumentation = (quick_alloc_entry_points_instrumentation_counter_ == 0);
+  DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_.load(), 0);
+  const bool disable_instrumentation =
+      quick_alloc_entry_points_instrumentation_counter_.fetch_sub(1) == 1;
   if (disable_instrumentation) {
     SetQuickAllocEntryPointsInstrumented(false);
-    Runtime* runtime = Runtime::Current();
-    if (runtime->IsStarted()) {
-      ThreadList* tl = Runtime::Current()->GetThreadList();
-      Thread* self = Thread::Current();
-      tl->SuspendAll();
-      {
-        MutexLock mu(self, *Locks::thread_list_lock_);
-        tl->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
-      }
-      tl->ResumeAll();
+    ResetQuickAllocEntryPoints();
+  }
+}
+
+void Instrumentation::ResetQuickAllocEntryPoints() {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->IsStarted()) {
+    ThreadList* tl = runtime->GetThreadList();
+    Thread* self = Thread::Current();
+    tl->SuspendAll();
+    {
+      MutexLock mu(self, *Locks::thread_list_lock_);
+      tl->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
     }
+    tl->ResumeAll();
   }
 }
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 6bfc2d7..72a646e 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_INSTRUMENTATION_H_
 #define ART_RUNTIME_INSTRUMENTATION_H_
 
+#include "atomic_integer.h"
 #include "base/macros.h"
 #include "locks.h"
 
@@ -125,6 +126,7 @@
 
   void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_);
   void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+  void ResetQuickAllocEntryPoints();
 
   // Update the code of a method respecting any installed stubs.
   void UpdateMethodsCode(mirror::ArtMethod* method, const void* code) const;
@@ -298,7 +300,7 @@
 
   // Greater than 0 if quick alloc entry points instrumented.
   // TODO: The access and changes to this is racy and should be guarded by a lock.
-  size_t quick_alloc_entry_points_instrumentation_counter_;
+  AtomicInteger quick_alloc_entry_points_instrumentation_counter_;
 
   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
 };
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 08221b7..c9756ac 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -193,7 +193,7 @@
     }
     return false;
   }
-  Object* newArray = Array::Alloc<kMovingCollector, true>(self, arrayClass, length);
+  Object* newArray = Array::Alloc<true>(self, arrayClass, length);
   if (UNLIKELY(newArray == NULL)) {
     DCHECK(self->IsExceptionPending());
     return false;
@@ -279,7 +279,7 @@
     // TODO: getDeclaredField calls GetType once the field is found to ensure a
     //       NoClassDefFoundError is thrown if the field's type cannot be resolved.
     Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
-    SirtRef<Object> field(self, jlr_Field->AllocObject(self));
+    SirtRef<Object> field(self, jlr_Field->AllocNonMovableObject(self));
     CHECK(field.get() != NULL);
     ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
     uint32_t args[1];
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index aa6bcd6..99c85bd 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -509,8 +509,9 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(NEW_INSTANCE) {
-    Object* obj = AllocObjectFromCodeInstrumented(inst->VRegB_21c(), shadow_frame.GetMethod(),
-                                                  self, do_access_check);
+    Object* obj = AllocObjectFromCode<do_access_check, true>(
+        inst->VRegB_21c(), shadow_frame.GetMethod(), self,
+        Runtime::Current()->GetHeap()->GetCurrentAllocator());
     if (UNLIKELY(obj == NULL)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -522,8 +523,9 @@
 
   HANDLE_INSTRUCTION_START(NEW_ARRAY) {
     int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
-    Object* obj = AllocArrayFromCodeInstrumented(inst->VRegC_22c(), shadow_frame.GetMethod(),
-                                                 length, self, do_access_check);
+    Object* obj = AllocArrayFromCode<do_access_check, true>(
+        inst->VRegC_22c(), shadow_frame.GetMethod(), length, self,
+        Runtime::Current()->GetHeap()->GetCurrentAllocator());
     if (UNLIKELY(obj == NULL)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index bd0d87e..675095f 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -422,8 +422,9 @@
       }
       case Instruction::NEW_INSTANCE: {
         PREAMBLE();
-        Object* obj = AllocObjectFromCodeInstrumented(inst->VRegB_21c(), shadow_frame.GetMethod(),
-                                                      self, do_access_check);
+        Object* obj = AllocObjectFromCode<do_access_check, true>(
+            inst->VRegB_21c(), shadow_frame.GetMethod(), self,
+            Runtime::Current()->GetHeap()->GetCurrentAllocator());
         if (UNLIKELY(obj == NULL)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
@@ -435,8 +436,9 @@
       case Instruction::NEW_ARRAY: {
         PREAMBLE();
         int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
-        Object* obj = AllocArrayFromCodeInstrumented(inst->VRegC_22c(), shadow_frame.GetMethod(),
-                                                     length, self, do_access_check);
+        Object* obj = AllocArrayFromCode<do_access_check, true>(
+            inst->VRegC_22c(), shadow_frame.GetMethod(), length, self,
+            Runtime::Current()->GetHeap()->GetCurrentAllocator());
         if (UNLIKELY(obj == NULL)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index ef73e4d..2955faa 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -59,43 +59,44 @@
 }
 
 static inline Array* SetArrayLength(Array* array, size_t length) {
-  if (LIKELY(array != NULL)) {
+  if (LIKELY(array != nullptr)) {
     DCHECK(array->IsArrayInstance());
     array->SetLength(length);
   }
   return array;
 }
 
-template <bool kIsMovable, bool kIsInstrumented>
+template <bool kIsInstrumented>
 inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
-                           size_t component_size) {
+                           size_t component_size, gc::AllocatorType allocator_type) {
   size_t size = ComputeArraySize(self, array_class, component_count, component_size);
   if (UNLIKELY(size == 0)) {
-    return NULL;
+    return nullptr;
   }
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  Array* array = nullptr;
-  if (kIsMovable) {
-    if (kIsInstrumented) {
-      array = down_cast<Array*>(heap->AllocMovableObjectInstrumented(self, array_class, size));
-    } else {
-      array = down_cast<Array*>(heap->AllocMovableObjectUninstrumented(self, array_class, size));
-    }
-  } else {
-    if (kIsInstrumented) {
-      array = down_cast<Array*>(heap->AllocNonMovableObjectInstrumented(self, array_class, size));
-    } else {
-      array = down_cast<Array*>(heap->AllocNonMovableObjectUninstrumented(self, array_class, size));
-    }
-  }
+  Array* array = down_cast<Array*>(
+      heap->AllocObjectWithAllocator<kIsInstrumented>(self, array_class, size, allocator_type));
   return SetArrayLength(array, component_count);
 }
 
-template <bool kIsMovable, bool kIsInstrumented>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
+template <bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
+                           gc::AllocatorType allocator_type) {
   DCHECK(array_class->IsArrayClass());
-  return Alloc<kIsMovable, kIsInstrumented>(self, array_class, component_count,
-                                            array_class->GetComponentSize());
+  return Alloc<kIsInstrumented>(self, array_class, component_count, array_class->GetComponentSize(),
+                                allocator_type);
+}
+template <bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
+  return Alloc<kIsInstrumented>(self, array_class, component_count,
+               Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+template <bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
+                           size_t component_size) {
+  return Alloc<kIsInstrumented>(self, array_class, component_count, component_size,
+               Runtime::Current()->GetHeap()->GetCurrentAllocator());
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index f8a2832..00b88db 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -44,8 +44,7 @@
                                         SirtRef<mirror::IntArray>& dimensions)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   int32_t array_length = dimensions->Get(current_dimension);
-  SirtRef<Array> new_array(self, Array::Alloc<kMovingCollector, true>(self, array_class,
-                                                                      array_length));
+  SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class, array_length));
   if (UNLIKELY(new_array.get() == NULL)) {
     CHECK(self->IsExceptionPending());
     return NULL;
@@ -115,7 +114,7 @@
 template<typename T>
 PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
   DCHECK(array_class_ != NULL);
-  Array* raw_array = Array::Alloc<kMovingCollector, true>(self, array_class_, length, sizeof(T));
+  Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T));
   return down_cast<PrimitiveArray<T>*>(raw_array);
 }
 
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 584a4c0..a332f97 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_MIRROR_ARRAY_H_
 
 #include "object.h"
+#include "gc/heap.h"
 
 namespace art {
 namespace mirror {
@@ -26,13 +27,24 @@
  public:
   // A convenience for code that doesn't know the component size, and doesn't want to have to work
   // it out itself.
-  template <bool kIsMovable, bool kIsInstrumented>
+  template <bool kIsInstrumented>
+  static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
+                      gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <bool kIsInstrumented>
+  static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
+                      size_t component_size, gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <bool kIsInstrumented>
   static Array* Alloc(Thread* self, Class* array_class, int32_t component_count)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  template <bool kIsMovable, bool kIsInstrumented>
+  template <bool kIsInstrumented>
   static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
-                      size_t component_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                      size_t component_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static Array* CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 406ab1b..4dcce1e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -357,23 +357,20 @@
   DCHECK_GE(this->object_size_, sizeof(Object));
 }
 
-template <bool kIsMovable, bool kIsInstrumented>
-inline Object* Class::Alloc(Thread* self) {
+template <bool kIsInstrumented>
+inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
   CheckObjectAlloc();
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  if (kIsMovable) {
-    if (kIsInstrumented) {
-      return heap->AllocMovableObjectInstrumented(self, this, this->object_size_);
-    } else {
-      return heap->AllocMovableObjectUninstrumented(self, this, this->object_size_);
-    }
-  } else {
-    if (kIsInstrumented) {
-      return heap->AllocNonMovableObjectInstrumented(self, this, this->object_size_);
-    } else {
-      return heap->AllocNonMovableObjectUninstrumented(self, this, this->object_size_);
-    }
-  }
+  return heap->AllocObjectWithAllocator<kIsInstrumented>(self, this, this->object_size_,
+                                                         allocator_type);
+}
+
+inline Object* Class::AllocObject(Thread* self) {
+  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+inline Object* Class::AllocNonMovableObject(Thread* self) {
+  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 82077dc..5f64bb4 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_CLASS_H_
 #define ART_RUNTIME_MIRROR_CLASS_H_
 
+#include "gc/heap.h"
 #include "modifiers.h"
 #include "object.h"
 #include "primitive.h"
@@ -377,13 +378,14 @@
   }
 
   // Creates a raw object instance but does not invoke the default constructor.
-  Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return Alloc<kMovingCollector, true>(self);
-  }
+  template <bool kIsInstrumented>
+  ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Creates a raw object instance but does not invoke the default constructor.
-  template <bool kIsMovable, bool kIsInstrumented>
-  Object* Alloc(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* AllocObject(Thread* self)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* AllocNonMovableObject(Thread* self)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsVariableSize() const {
     // Classes and arrays vary in size, and so the object_size_ field cannot
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 385ef5f..008a173 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -75,9 +75,9 @@
   SirtRef<Object> this_object(self, this);
   Object* copy;
   if (heap->IsMovableObject(this)) {
-    copy = heap->AllocObject(self, GetClass(), num_bytes);
+    copy = heap->AllocObject<true>(self, GetClass(), num_bytes);
   } else {
-    copy = heap->AllocNonMovableObject(self, GetClass(), num_bytes);
+    copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes);
   }
   if (LIKELY(copy != nullptr)) {
     return CopyObject(self, copy, this_object.get(), num_bytes);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 478f4ec..be49b42 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -30,16 +30,25 @@
 namespace mirror {
 
 template<class T>
-inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class, int32_t length) {
-  Array* array = Array::Alloc<kMovingCollector, true>(self, object_array_class, length, sizeof(Object*));
-  if (UNLIKELY(array == NULL)) {
-    return NULL;
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+                                             int32_t length, gc::AllocatorType allocator_type) {
+  Array* array = Array::Alloc<true>(self, object_array_class, length, sizeof(Object*),
+                                    allocator_type);
+  if (UNLIKELY(array == nullptr)) {
+    return nullptr;
   } else {
     return array->AsObjectArray<T>();
   }
 }
 
 template<class T>
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+                                             int32_t length) {
+  return Alloc(self, object_array_class, length,
+               Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+template<class T>
 inline T* ObjectArray<T>::Get(int32_t i) const {
   if (UNLIKELY(!IsValidIndex(i))) {
     return NULL;
@@ -137,7 +146,10 @@
 inline ObjectArray<T>* ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
   // We may get copied by a compacting GC.
   SirtRef<ObjectArray<T> > sirt_this(self, this);
-  ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length);
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
+      heap->GetCurrentNonMovingAllocator();
+  ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length, allocator_type);
   if (LIKELY(new_array != nullptr)) {
     Copy(sirt_this.get(), 0, new_array, 0, std::min(sirt_this->GetLength(), new_length));
   }
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 09ff519..5da8845 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_MIRROR_OBJECT_ARRAY_H_
 
 #include "array.h"
+#include "gc/heap.h"
 
 namespace art {
 namespace mirror {
@@ -25,6 +26,10 @@
 template<class T>
 class MANAGED ObjectArray : public Array {
  public:
+  static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
+                               gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 8530317..8272ff8 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -144,15 +144,15 @@
 TEST_F(ObjectTest, AllocArray) {
   ScopedObjectAccess soa(Thread::Current());
   Class* c = class_linker_->FindSystemClass("[I");
-  SirtRef<Array> a(soa.Self(), Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
+  SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1));
   ASSERT_TRUE(c == a->GetClass());
 
   c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
-  a.reset(Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
+  a.reset(Array::Alloc<true>(soa.Self(), c, 1));
   ASSERT_TRUE(c == a->GetClass());
 
   c = class_linker_->FindSystemClass("[[Ljava/lang/Object;");
-  a.reset(Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
+  a.reset(Array::Alloc<true>(soa.Self(), c, 1));
   ASSERT_TRUE(c == a->GetClass());
 }
 
@@ -221,7 +221,8 @@
       java_lang_dex_file_->GetIndexForStringId(*string_id));
   ASSERT_TRUE(type_id != NULL);
   uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
-  Object* array = CheckAndAllocArrayFromCode(type_idx, sort, 3, Thread::Current(), false);
+  Object* array = CheckAndAllocArrayFromCode(type_idx, sort, 3, Thread::Current(), false,
+                                             Runtime::Current()->GetHeap()->GetCurrentAllocator());
   EXPECT_TRUE(array->IsArrayInstance());
   EXPECT_EQ(3, array->AsArray()->GetLength());
   EXPECT_TRUE(array->GetClass()->IsArrayClass());
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index f0efdc2..fd3d91e 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -71,7 +71,8 @@
   descriptor += ClassHelper(element_class).GetDescriptor();
   SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
   mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader);
-  mirror::Array* result = mirror::Array::Alloc<false, true>(soa.Self(), array_class, length);
+  mirror::Array* result = mirror::Array::Alloc<true>(soa.Self(), array_class, length,
+                                                     Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
   return soa.AddLocalReference<jobject>(result);
 }
 
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index 808c917..52cdb59 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -59,8 +59,7 @@
     return NULL;
   }
   DCHECK(array_class->IsArrayClass());
-  mirror::Array* new_array = mirror::Array::Alloc<kMovingCollector, true>(
-      soa.Self(), array_class, length);
+  mirror::Array* new_array = mirror::Array::Alloc<true>(soa.Self(), array_class, length);
   return soa.AddLocalReference<jobject>(new_array);
 }
 
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index aa72755..04dfcb5 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -56,7 +56,7 @@
     return NULL;
   }
 
-  mirror::Object* receiver = c->AllocObject(soa.Self());
+  mirror::Object* receiver = c->AllocNonMovableObject(soa.Self());
   if (receiver == NULL) {
     return NULL;
   }