Merge "Add timing logger for mini-debug-info generation."
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
index 9e709d8..0129772 100644
--- a/build/apex/ld.config.txt
+++ b/build/apex/ld.config.txt
@@ -29,11 +29,17 @@
 namespace.platform.link.default.shared_libs += libnativehelper.so
 namespace.platform.link.default.shared_libs += libnativeloader.so
 
+# Note that we don't need to link the default namespace with conscrypt:
+# the runtime Java code and binaries do not explicitly load native libraries
+# from it.
+
 ###############################################################################
 # "conscrypt" APEX namespace
 #
 # This namespace is for libraries within the conscrypt APEX.
 ###############################################################################
+
+# Keep in sync with conscrypt namespace in /system/etc/ld.config.txt.
 namespace.conscrypt.isolated = true
 namespace.conscrypt.visible = true
 
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 1725154..478ecdf 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -427,6 +427,7 @@
   gc::CollectorType collector_type_ = gc::kCollectorTypeDefault;
   bool verify_pre_gc_heap_ = false;
   bool verify_pre_sweeping_heap_ = kIsDebugBuild;
+  bool generational_cc = kEnableGenerationalCCByDefault;
   bool verify_post_gc_heap_ = false;
   bool verify_pre_gc_rosalloc_ = kIsDebugBuild;
   bool verify_pre_sweeping_rosalloc_ = false;
@@ -455,6 +456,10 @@
         xgc.verify_pre_sweeping_heap_ = true;
       } else if (gc_option == "nopresweepingverify") {
         xgc.verify_pre_sweeping_heap_ = false;
+      } else if (gc_option == "generational_cc") {
+        xgc.generational_cc = true;
+      } else if (gc_option == "nogenerational_cc") {
+        xgc.generational_cc = false;
       } else if (gc_option == "postverify") {
         xgc.verify_post_gc_heap_ = true;
       } else if (gc_option == "nopostverify") {
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index e914738..5aa8236 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -205,7 +205,7 @@
     target: {
         android: {
             shared_libs: [
-                "libpagemap",
+                "libmeminfo",
             ],
         },
     },
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index 7a849f2..28d4048 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -34,13 +34,18 @@
 #include "dex_ir.h"
 #include "dex_ir_builder.h"
 #ifdef ART_TARGET_ANDROID
-#include "pagemap/pagemap.h"
+#include <meminfo/pageacct.h>
+#include <meminfo/procmeminfo.h>
 #endif
 #include "vdex_file.h"
 
 namespace art {
 
 using android::base::StringPrintf;
+#ifdef ART_TARGET_ANDROID
+using android::meminfo::ProcMemInfo;
+using android::meminfo::Vma;
+#endif
 
 static bool g_verbose = false;
 
@@ -194,7 +199,7 @@
   return DexFile::kDexTypeHeaderItem;
 }
 
-static void ProcessPageMap(uint64_t* pagemap,
+static void ProcessPageMap(const std::vector<uint64_t>& pagemap,
                            size_t start,
                            size_t end,
                            const std::vector<dex_ir::DexFileSection>& sections,
@@ -202,7 +207,7 @@
   static constexpr size_t kLineLength = 32;
   for (size_t page = start; page < end; ++page) {
     char type_char = '.';
-    if (PM_PAGEMAP_PRESENT(pagemap[page])) {
+    if (::android::meminfo::page_present(pagemap[page])) {
       const size_t dex_page_offset = page - start;
       uint16_t type = FindSectionTypeForPage(dex_page_offset, sections);
       page_counts->Increment(type);
@@ -265,7 +270,7 @@
   printer->PrintSkipLine();
 }
 
-static void ProcessOneDexMapping(uint64_t* pagemap,
+static void ProcessOneDexMapping(const std::vector<uint64_t>& pagemap,
                                  uint64_t map_start,
                                  const DexFile* dex_file,
                                  uint64_t vdex_start,
@@ -316,8 +321,8 @@
   return false;
 }
 
-static bool DisplayMappingIfFromVdexFile(pm_map_t* map, Printer* printer) {
-  std::string vdex_name = pm_map_name(map);
+static bool DisplayMappingIfFromVdexFile(ProcMemInfo& proc, const Vma& vma, Printer* printer) {
+  std::string vdex_name = vma.name;
   // Extract all the dex files from the vdex file.
   std::string error_msg;
   std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_name,
@@ -344,34 +349,33 @@
     return false;
   }
   // Open the page mapping (one uint64_t per page) for the entire vdex mapping.
-  uint64_t* pagemap;
-  size_t len;
-  if (pm_map_pagemap(map, &pagemap, &len) != 0) {
+  std::vector<uint64_t> pagemap;
+  if (!proc.PageMap(vma, &pagemap)) {
     std::cerr << "Error creating pagemap." << std::endl;
     return false;
   }
   // Process the dex files.
   std::cout << "MAPPING "
-            << pm_map_name(map)
-            << StringPrintf(": %" PRIx64 "-%" PRIx64, pm_map_start(map), pm_map_end(map))
+            << vma.name
+            << StringPrintf(": %" PRIx64 "-%" PRIx64, vma.start, vma.end)
             << std::endl;
   for (const auto& dex_file : dex_files) {
     ProcessOneDexMapping(pagemap,
-                         pm_map_start(map),
+                         vma.start,
                          dex_file.get(),
                          reinterpret_cast<uint64_t>(vdex->Begin()),
                          printer);
   }
-  free(pagemap);
   return true;
 }
 
-static void ProcessOneOatMapping(uint64_t* pagemap, size_t size, Printer* printer) {
+static void ProcessOneOatMapping(const std::vector<uint64_t>& pagemap,
+                                 Printer* printer) {
   static constexpr size_t kLineLength = 32;
   size_t resident_page_count = 0;
-  for (size_t page = 0; page < size; ++page) {
+  for (size_t page = 0; page < pagemap.size(); ++page) {
     char type_char = '.';
-    if (PM_PAGEMAP_PRESENT(pagemap[page])) {
+    if (::android::meminfo::page_present(pagemap[page])) {
       ++resident_page_count;
       type_char = '*';
     }
@@ -383,13 +387,13 @@
     }
   }
   if (g_verbose) {
-    if (size % kLineLength != 0) {
+    if (pagemap.size() % kLineLength != 0) {
       std::cout << std::endl;
     }
   }
-  double percent_of_total = 100.0 * resident_page_count / size;
+  double percent_of_total = 100.0 * resident_page_count / pagemap.size();
   printer->PrintHeader();
-  printer->PrintOne("EXECUTABLE", resident_page_count, size, percent_of_total, percent_of_total);
+  printer->PrintOne("EXECUTABLE", resident_page_count, pagemap.size(), percent_of_total, percent_of_total);
   printer->PrintSkipLine();
 }
 
@@ -405,21 +409,19 @@
   return false;
 }
 
-static bool DisplayMappingIfFromOatFile(pm_map_t* map, Printer* printer) {
+static bool DisplayMappingIfFromOatFile(ProcMemInfo& proc, const Vma& vma, Printer* printer) {
   // Open the page mapping (one uint64_t per page) for the entire vdex mapping.
-  uint64_t* pagemap;
-  size_t len;
-  if (pm_map_pagemap(map, &pagemap, &len) != 0) {
+  std::vector<uint64_t> pagemap;
+  if (!proc.PageMap(vma, &pagemap) != 0) {
     std::cerr << "Error creating pagemap." << std::endl;
     return false;
   }
   // Process the dex files.
   std::cout << "MAPPING "
-            << pm_map_name(map)
-            << StringPrintf(": %" PRIx64 "-%" PRIx64, pm_map_start(map), pm_map_end(map))
+            << vma.name
+            << StringPrintf(": %" PRIx64 "-%" PRIx64, vma.start, vma.end)
             << std::endl;
-  ProcessOneOatMapping(pagemap, len, printer);
-  free(pagemap);
+  ProcessOneOatMapping(pagemap, printer);
   return true;
 }
 
@@ -488,27 +490,11 @@
     return EXIT_FAILURE;
   }
 
-  // get libpagemap kernel information.
-  pm_kernel_t* ker;
-  if (pm_kernel_create(&ker) != 0) {
-    std::cerr << "Error creating kernel interface -- does this kernel have pagemap?" << std::endl;
-    return EXIT_FAILURE;
-  }
-
-  // get libpagemap process information.
-  pm_process_t* proc;
-  if (pm_process_create(ker, pid, &proc) != 0) {
-    std::cerr << "Error creating process interface -- does process "
-              << pid
-              << " really exist?"
-              << std::endl;
-    return EXIT_FAILURE;
-  }
-
+  // get libmeminfo process information.
+  ProcMemInfo proc(pid);
   // Get the set of mappings by the specified process.
-  pm_map_t** maps;
-  size_t num_maps;
-  if (pm_process_maps(proc, &maps, &num_maps) != 0) {
+  const std::vector<Vma>& maps = proc.Maps();
+  if (maps.empty()) {
     std::cerr << "Error listing maps." << std::endl;
     return EXIT_FAILURE;
   }
@@ -516,19 +502,19 @@
   bool match_found = false;
   // Process the mappings that are due to vdex or oat files.
   Printer printer;
-  for (size_t i = 0; i < num_maps; ++i) {
-    std::string mapped_file_name = pm_map_name(maps[i]);
+  for (auto& vma : maps) {
+    std::string mapped_file_name = vma.name;
     // Filter by name contains options (if any).
     if (!FilterByNameContains(mapped_file_name, name_filters)) {
       continue;
     }
     if (IsVdexFileMapping(mapped_file_name)) {
-      if (!DisplayMappingIfFromVdexFile(maps[i], &printer)) {
+      if (!DisplayMappingIfFromVdexFile(proc, vma, &printer)) {
         return EXIT_FAILURE;
       }
       match_found = true;
     } else if (IsOatFileMapping(mapped_file_name)) {
-      if (!DisplayMappingIfFromOatFile(maps[i], &printer)) {
+      if (!DisplayMappingIfFromOatFile(proc, vma, &printer)) {
         return EXIT_FAILURE;
       }
       match_found = true;
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index 9490798..4953bab 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -278,6 +278,17 @@
   }
 }
 
+bool LocationIsOnRuntimeModule(const char* full_path) {
+  std::string error_msg;
+  const char* runtime_path = GetAndroidDirSafe("ANDROID_RUNTIME_ROOT",
+                                               "/apex/com.android.runtime",
+                                               &error_msg);
+  if (runtime_path == nullptr) {
+    return false;
+  }
+  return android::base::StartsWith(full_path, runtime_path);
+}
+
 bool LocationIsOnSystem(const char* path) {
 #ifdef _WIN32
   UNUSED(path);
diff --git a/libartbase/base/file_utils.h b/libartbase/base/file_utils.h
index c249bcc..bddfaa1 100644
--- a/libartbase/base/file_utils.h
+++ b/libartbase/base/file_utils.h
@@ -72,6 +72,9 @@
 //          ReplaceFileExtension("foo", "abc") == "foo.abc"
 std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
 
+// Return whether the location is on apex/com.android.runtime
+bool LocationIsOnRuntimeModule(const char* location);
+
 // Return whether the location is on system (i.e. android root).
 bool LocationIsOnSystem(const char* location);
 
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index a814b66..7e93639 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -539,17 +539,17 @@
                                                                 error_msg,
                                                                 std::move(container),
                                                                 verify_result);
-
-  // Check if this dex file is located in the framework directory.
-  // If it is, set a flag on the dex file. This is used by hidden API
-  // policy decision logic.
-  // Location can contain multidex suffix, so fetch its canonical version. Note
-  // that this will call `realpath`.
-  std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str());
-  if (dex_file != nullptr && LocationIsOnSystemFramework(path.c_str())) {
-    dex_file->SetHiddenapiDomain(hiddenapi::Domain::kPlatform);
+  if (dex_file != nullptr) {
+    // Set hidden API domain based based on location.
+    // Location can contain multidex suffix, so fetch its canonical version. Note
+    // that this will call `realpath`.
+    std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str());
+    if (LocationIsOnSystemFramework(path.c_str())) {
+      dex_file->SetHiddenapiDomain(hiddenapi::Domain::kPlatform);
+    } else if (LocationIsOnRuntimeModule(path.c_str())) {
+      dex_file->SetHiddenapiDomain(hiddenapi::Domain::kCorePlatform);
+    }
   }
-
   return dex_file;
 }
 
diff --git a/runtime/Android.bp b/runtime/Android.bp
index b89eb02..a3081e9 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -396,8 +396,10 @@
         "libnativeloader",
         "libbacktrace",
         "liblog",
-        // For atrace, properties, ashmem, set_sched_policy.
+        // For atrace, properties, ashmem.
         "libcutils",
+        // For set_sched_policy.
+        "libprocessgroup",
         // For common macros.
         "libbase",
     ],
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 1014c0e..2de7910 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -36,8 +36,7 @@
     Thread* const self,
     mirror::Object* ref,
     accounting::ContinuousSpaceBitmap* bitmap) {
-  if (kEnableGenerationalConcurrentCopyingCollection
-      && !done_scanning_.load(std::memory_order_acquire)) {
+  if (use_generational_cc_ && !done_scanning_.load(std::memory_order_acquire)) {
     // Everything in the unevac space should be marked for young generation CC,
     // except for large objects.
     DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
@@ -130,7 +129,7 @@
                                                mirror::Object* holder,
                                                MemberOffset offset) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   if (from_ref == nullptr) {
     return nullptr;
   }
@@ -172,9 +171,7 @@
         return to_ref;
       }
       case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
-        if (kEnableGenerationalConcurrentCopyingCollection
-            && kNoUnEvac
-            && !region_space_->IsLargeObject(from_ref)) {
+        if (kNoUnEvac && use_generational_cc_ && !region_space_->IsLargeObject(from_ref)) {
           if (!kFromGCThread) {
             DCHECK(IsMarkedInUnevacFromSpace(from_ref)) << "Returning unmarked object to mutator";
           }
@@ -245,8 +242,7 @@
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
-  } else if (!kEnableGenerationalConcurrentCopyingCollection
-             || done_scanning_.load(std::memory_order_acquire)) {
+  } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
     // If the card table scanning is not finished yet, then only read-barrier
     // state should be checked. Checking the mark bitmap is unreliable as there
     // may be some objects - whose corresponding card is dirty - which are
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8f7b76a..642b12e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -69,15 +69,19 @@
 
 ConcurrentCopying::ConcurrentCopying(Heap* heap,
                                      bool young_gen,
+                                     bool use_generational_cc,
                                      const std::string& name_prefix,
                                      bool measure_read_barrier_slow_path)
     : GarbageCollector(heap,
                        name_prefix + (name_prefix.empty() ? "" : " ") +
                        "concurrent copying"),
-      region_space_(nullptr), gc_barrier_(new Barrier(0)),
+      region_space_(nullptr),
+      gc_barrier_(new Barrier(0)),
       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
                                                      kDefaultGcMarkStackSize,
                                                      kDefaultGcMarkStackSize)),
+      use_generational_cc_(use_generational_cc),
+      young_gen_(young_gen),
       rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
                                                          kReadBarrierMarkStackSize,
                                                          kReadBarrierMarkStackSize)),
@@ -100,7 +104,6 @@
       region_space_inter_region_bitmap_(nullptr),
       non_moving_space_inter_region_bitmap_(nullptr),
       reclaimed_bytes_ratio_sum_(0.f),
-      young_gen_(young_gen),
       skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
       measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
       mark_from_read_barrier_measurements_(false),
@@ -119,7 +122,7 @@
       num_bytes_allocated_before_gc_(0) {
   static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
                 "The region space size and the read barrier table region size must match");
-  CHECK(kEnableGenerationalConcurrentCopyingCollection || !young_gen_);
+  CHECK(use_generational_cc_ || !young_gen_);
   Thread* self = Thread::Current();
   {
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -138,7 +141,7 @@
       pooled_mark_stacks_.push_back(mark_stack);
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     // Allocate sweep array free buffer.
     std::string error_msg;
     sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
@@ -194,7 +197,7 @@
     InitializePhase();
     // In case of forced evacuation, all regions are evacuated and hence no
     // need to compute live_bytes.
-    if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) {
+    if (use_generational_cc_ && !young_gen_ && !force_evacuate_all_) {
       MarkingPhase();
     }
   }
@@ -290,7 +293,7 @@
 }
 
 void ConcurrentCopying::CreateInterRegionRefBitmaps() {
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
   DCHECK(region_space_inter_region_bitmap_ == nullptr);
   DCHECK(non_moving_space_inter_region_bitmap_ == nullptr);
   DCHECK(region_space_ != nullptr);
@@ -325,7 +328,7 @@
       CHECK(!space->IsZygoteSpace());
       CHECK(!space->IsImageSpace());
       CHECK(space == region_space_ || space == heap_->non_moving_space_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         if (space == region_space_) {
           region_space_bitmap_ = region_space_->GetMarkBitmap();
         } else if (young_gen_ && space->IsContinuousMemMapAllocSpace()) {
@@ -358,7 +361,7 @@
       }
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
       CHECK(space->IsLargeObjectSpace());
       space->AsLargeObjectSpace()->CopyLiveToMarked();
@@ -391,7 +394,7 @@
   GcCause gc_cause = GetCurrentIteration()->GetGcCause();
 
   force_evacuate_all_ = false;
-  if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+  if (!use_generational_cc_ || !young_gen_) {
     if (gc_cause == kGcCauseExplicit ||
         gc_cause == kGcCauseCollectorTransition ||
         GetCurrentIteration()->GetClearSoftReferences()) {
@@ -407,7 +410,7 @@
       DCHECK(immune_gray_stack_.empty());
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     done_scanning_.store(false, std::memory_order_release);
   }
   BindBitmaps();
@@ -421,7 +424,7 @@
     }
     LOG(INFO) << "GC end of InitializePhase";
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+  if (use_generational_cc_ && !young_gen_) {
     region_space_bitmap_->Clear();
   }
   mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
@@ -533,7 +536,7 @@
       cc->region_space_->SetFromSpace(
           cc->rb_table_,
           evac_mode,
-          /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection);
+          /*clear_live_bytes=*/ !cc->use_generational_cc_);
     }
     cc->SwapStacks();
     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -542,7 +545,7 @@
       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
     }
     cc->is_marking_ = true;
-    if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) {
+    if (kIsDebugBuild && !cc->use_generational_cc_) {
       cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
     }
     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -866,7 +869,7 @@
   DCHECK(obj != nullptr);
   DCHECK(immune_spaces_.ContainsObject(obj));
   // Update the fields without graying it or pushing it onto the mark stack.
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     // Young GC does not care about references to unevac space. It is safe to not gray these as
     // long as scan immune objects happens after scanning the dirty cards.
     Scan<true>(obj);
@@ -1394,7 +1397,7 @@
   if (kUseBakerReadBarrier) {
     gc_grays_immune_objects_ = false;
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     if (kVerboseMode) {
       LOG(INFO) << "GC ScanCardsForSpace";
     }
@@ -2152,7 +2155,7 @@
       if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
         // It may be already marked if we accidentally pushed the same object twice due to the racy
         // bitmap read in MarkUnevacFromSpaceRegion.
-        if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+        if (use_generational_cc_ && young_gen_) {
           CHECK(region_space_->IsLargeObject(to_ref));
           region_space_->ZeroLiveBytesForLargeObject(to_ref);
         }
@@ -2169,7 +2172,7 @@
       }
       break;
     case space::RegionSpace::RegionType::kRegionTypeToSpace:
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         // Copied to to-space, set the bit so that the next GC can scan objects.
         region_space_bitmap_->Set(to_ref);
       }
@@ -2214,7 +2217,7 @@
       }
   }
   if (perform_scan) {
-    if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+    if (use_generational_cc_ && young_gen_) {
       Scan<true>(to_ref);
     } else {
       Scan<false>(to_ref);
@@ -2373,7 +2376,7 @@
 }
 
 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     // Only sweep objects on the live stack.
     SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
   } else {
@@ -2407,7 +2410,7 @@
 // Copied and adapted from MarkSweep::SweepArray.
 void ConcurrentCopying::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
   // This method is only used when Generational CC collection is enabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
   CheckEmptyMarkStack();
   TimingLogger::ScopedTiming t("SweepArray", GetTimings());
   Thread* self = Thread::Current();
@@ -2891,8 +2894,7 @@
   DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
-  } else if (!kEnableGenerationalConcurrentCopyingCollection
-             || done_scanning_.load(std::memory_order_acquire)) {
+  } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
     // Read the comment in IsMarkedInUnevacFromSpace()
     accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
     accounting::LargeObjectBitmap* los_bitmap = nullptr;
@@ -2954,7 +2956,7 @@
   explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread)
       : collector_(collector), thread_(thread) {
     // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-    DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+    DCHECK(!kNoUnEvac || collector_->use_generational_cc_);
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
@@ -2991,7 +2993,7 @@
 template <bool kNoUnEvac>
 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
     // Avoid all read barriers during visit references to help performance.
     // Don't do this in transaction mode because we may read the old value of an field which may
@@ -3012,7 +3014,7 @@
 template <bool kNoUnEvac>
 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   mirror::Object* ref = obj->GetFieldObject<
       mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
@@ -3386,7 +3388,7 @@
       } else {
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
-        if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+        if (!use_generational_cc_ || !young_gen_) {
           // Mark it in the live bitmap.
           CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref));
         }
@@ -3482,7 +3484,7 @@
     los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
     DCHECK(los_bitmap->HasAddress(ref));
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     // The sticky-bit CC collector is only compatible with Baker-style read barriers.
     DCHECK(kUseBakerReadBarrier);
     // Not done scanning, use AtomicSetReadBarrierPointer.
@@ -3551,11 +3553,11 @@
   }
   // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
   // positives.
-  if (!kEnableGenerationalConcurrentCopyingCollection && !kVerifyNoMissingCardMarks) {
+  if (!kVerifyNoMissingCardMarks && !use_generational_cc_) {
     TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
     // We do not currently use the region space cards at all, madvise them away to save ram.
     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
-  } else if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+  } else if (use_generational_cc_ && !young_gen_) {
     region_space_inter_region_bitmap_->Clear();
     non_moving_space_inter_region_bitmap_->Clear();
   }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index a41c17a..124713c 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -65,10 +65,11 @@
   // pages.
   static constexpr bool kGrayDirtyImmuneObjects = true;
 
-  explicit ConcurrentCopying(Heap* heap,
-                             bool young_gen,
-                             const std::string& name_prefix = "",
-                             bool measure_read_barrier_slow_path = false);
+  ConcurrentCopying(Heap* heap,
+                    bool young_gen,
+                    bool use_generational_cc,
+                    const std::string& name_prefix = "",
+                    bool measure_read_barrier_slow_path = false);
   ~ConcurrentCopying();
 
   void RunPhases() override
@@ -90,7 +91,7 @@
   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_);
   GcType GetGcType() const override {
-    return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+    return (use_generational_cc_ && young_gen_)
         ? kGcTypeSticky
         : kGcTypePartial;
   }
@@ -323,6 +324,19 @@
   std::unique_ptr<Barrier> gc_barrier_;
   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
 
+  // If true, enable generational collection when using the Concurrent Copying
+  // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
+  // for major collections. Generational CC collection is currently only
+  // compatible with Baker read barriers. Set in Heap constructor.
+  const bool use_generational_cc_;
+
+  // Generational "sticky", only trace through dirty objects in region space.
+  const bool young_gen_;
+
+  // If true, the GC thread is done scanning marked objects on dirty and aged
+  // card (see ConcurrentCopying::CopyingPhase).
+  Atomic<bool> done_scanning_;
+
   // The read-barrier mark-bit stack. Stores object references whose
   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
   // so that this bit can be reset at the end of the collection in
@@ -400,12 +414,6 @@
   // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
   float reclaimed_bytes_ratio_sum_;
 
-  // Generational "sticky", only trace through dirty objects in region space.
-  const bool young_gen_;
-  // If true, the GC thread is done scanning marked objects on dirty and aged
-  // card (see ConcurrentCopying::CopyingPhase).
-  Atomic<bool> done_scanning_;
-
   // The skipped blocks are memory blocks/chucks that were copies of
   // objects that were unused due to lost races (cas failures) at
   // object copy/forward pointer install. They are reused.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d699da0..5f62d75 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -107,8 +107,9 @@
 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
 // threads (lower pauses, use less memory bandwidth).
-static constexpr double kStickyGcThroughputAdjustment =
-    kEnableGenerationalConcurrentCopyingCollection ? 0.5 : 1.0;
+static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
+  return use_generational_cc ? 0.5 : 1.0;
+}
 // Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
 // How many reserve entries are at the end of the allocation stack, these are only needed if the
@@ -201,6 +202,7 @@
            bool gc_stress_mode,
            bool measure_gc_performance,
            bool use_homogeneous_space_compaction_for_oom,
+           bool use_generational_cc,
            uint64_t min_interval_homogeneous_space_compaction_by_oom,
            bool dump_region_info_before_gc,
            bool dump_region_info_after_gc)
@@ -288,6 +290,7 @@
       pending_collector_transition_(nullptr),
       pending_heap_trim_(nullptr),
       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
+      use_generational_cc_(use_generational_cc),
       running_collection_is_blocking_(false),
       blocking_gc_count_(0U),
       blocking_gc_time_(0U),
@@ -494,7 +497,8 @@
     MemMap region_space_mem_map =
         space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
     CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
-    region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
+    region_space_ = space::RegionSpace::Create(
+        kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
     AddSpace(region_space_);
   } else if (IsMovingGc(foreground_collector_type_) &&
       foreground_collector_type_ != kCollectorTypeGSS) {
@@ -652,26 +656,28 @@
     if (MayUseCollector(kCollectorTypeCC)) {
       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
                                                                        /*young_gen=*/false,
+                                                                       use_generational_cc_,
                                                                        "",
                                                                        measure_gc_performance);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
             this,
             /*young_gen=*/true,
+            use_generational_cc_,
             "young",
             measure_gc_performance);
       }
       active_concurrent_copying_collector_ = concurrent_copying_collector_;
       DCHECK(region_space_ != nullptr);
       concurrent_copying_collector_->SetRegionSpace(region_space_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         young_concurrent_copying_collector_->SetRegionSpace(region_space_);
         // At this point, non-moving space should be created.
         DCHECK(non_moving_space_ != nullptr);
         concurrent_copying_collector_->CreateInterRegionRefBitmaps();
       }
       garbage_collectors_.push_back(concurrent_copying_collector_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         garbage_collectors_.push_back(young_concurrent_copying_collector_);
       }
     }
@@ -2262,7 +2268,7 @@
     gc_plan_.clear();
     switch (collector_type_) {
       case kCollectorTypeCC: {
-        if (kEnableGenerationalConcurrentCopyingCollection) {
+        if (use_generational_cc_) {
           gc_plan_.push_back(collector::kGcTypeSticky);
         }
         gc_plan_.push_back(collector::kGcTypeFull);
@@ -2739,7 +2745,7 @@
         collector = semi_space_collector_;
         break;
       case kCollectorTypeCC:
-        if (kEnableGenerationalConcurrentCopyingCollection) {
+        if (use_generational_cc_) {
           // TODO: Other threads must do the flip checkpoint before they start poking at
           // active_concurrent_copying_collector_. So we should not concurrency here.
           active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
@@ -3637,19 +3643,21 @@
     collector::GcType non_sticky_gc_type = NonStickyGcType();
     // Find what the next non sticky collector will be.
     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
-    if (kEnableGenerationalConcurrentCopyingCollection) {
+    if (use_generational_cc_) {
       if (non_sticky_collector == nullptr) {
         non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
       }
       CHECK(non_sticky_collector != nullptr);
     }
+    double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
+
     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
     // do another sticky collection next.
     // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
     // if the sticky GC throughput always remained >= the full/partial throughput.
     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
-    if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
+    if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
         non_sticky_collector->GetEstimatedMeanThroughput() &&
         non_sticky_collector->NumberOfIterations() > 0 &&
         bytes_allocated <= target_footprint) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 52c9386..4c5d896 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -212,6 +212,7 @@
        bool gc_stress_mode,
        bool measure_gc_performance,
        bool use_homogeneous_space_compaction,
+       bool use_generational_cc,
        uint64_t min_interval_homogeneous_space_compaction_by_oom,
        bool dump_region_info_before_gc,
        bool dump_region_info_after_gc);
@@ -532,6 +533,10 @@
     return num_bytes_allocated_.load(std::memory_order_relaxed);
   }
 
+  bool GetUseGenerationalCC() const {
+    return use_generational_cc_;
+  }
+
   // Returns the number of objects currently allocated.
   size_t GetObjectsAllocated() const
       REQUIRES(!Locks::heap_bitmap_lock_);
@@ -768,7 +773,7 @@
 
   // Returns the active concurrent copying collector.
   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
-    if (kEnableGenerationalConcurrentCopyingCollection) {
+    if (use_generational_cc_) {
       DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) ||
              (active_concurrent_copying_collector_ == young_concurrent_copying_collector_));
     } else {
@@ -1477,6 +1482,11 @@
   // Whether or not we use homogeneous space compaction to avoid OOM errors.
   bool use_homogeneous_space_compaction_for_oom_;
 
+  // If true, enable generational collection when using the Concurrent Copying
+  // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
+  // for major collections. Set in Heap constructor.
+  const bool use_generational_cc_;
+
   // True if the currently running collection has made some thread wait.
   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
   // The number of blocking GC runs.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index a5ba1dc..5179702 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -93,11 +93,12 @@
   return mem_map;
 }
 
-RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) {
-  return new RegionSpace(name, std::move(mem_map));
+RegionSpace* RegionSpace::Create(
+    const std::string& name, MemMap&& mem_map, bool use_generational_cc) {
+  return new RegionSpace(name, std::move(mem_map), use_generational_cc);
 }
 
-RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map)
+RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc)
     : ContinuousMemMapAllocSpace(name,
                                  std::move(mem_map),
                                  mem_map.Begin(),
@@ -105,6 +106,7 @@
                                  mem_map.End(),
                                  kGcRetentionPolicyAlwaysCollect),
       region_lock_("Region lock", kRegionSpaceRegionLock),
+      use_generational_cc_(use_generational_cc),
       time_(1U),
       num_regions_(mem_map_.Size() / kRegionSize),
       num_non_free_regions_(0U),
@@ -179,9 +181,44 @@
   return num_regions * kRegionSize;
 }
 
+void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) {
+  // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
+  DCHECK(GetUseGenerationalCC() || clear_live_bytes);
+  DCHECK(!IsFree() && IsInToSpace());
+  type_ = RegionType::kRegionTypeUnevacFromSpace;
+  if (IsNewlyAllocated()) {
+    // A newly allocated region set as unevac from-space must be
+    // a large or large tail region.
+    DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
+    // Always clear the live bytes of a newly allocated (large or
+    // large tail) region.
+    clear_live_bytes = true;
+    // Clear the "newly allocated" status here, as we do not want the
+    // GC to see it when encountering (and processing) references in the
+    // from-space.
+    //
+    // Invariant: There should be no newly-allocated region in the
+    // from-space (when the from-space exists, which is between the calls
+    // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
+    is_newly_allocated_ = false;
+  }
+  if (clear_live_bytes) {
+    // Reset the live bytes, as we have made a non-evacuation
+    // decision (possibly based on the percentage of live bytes).
+    live_bytes_ = 0;
+  }
+}
+
+bool RegionSpace::Region::GetUseGenerationalCC() {
+  // We are retrieving the info from Heap, instead of the cached version in
+  // RegionSpace, because accessing the Heap from a Region object is easier
+  // than accessing the RegionSpace.
+  return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC();
+}
+
 inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
   // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || (evac_mode != kEvacModeNewlyAllocated));
+  DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated));
   DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
   // The region should be evacuated if:
   // - the evacuation is forced (`evac_mode == kEvacModeForceAll`); or
@@ -253,7 +290,7 @@
 
 void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
   // This method is only used when Generational CC collection is enabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
 
   // This code uses a logic similar to the one used in RegionSpace::FreeLarge
   // to traverse the regions supporting `obj`.
@@ -292,7 +329,7 @@
                                EvacMode evac_mode,
                                bool clear_live_bytes) {
   // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
+  DCHECK(use_generational_cc_ || clear_live_bytes);
   ++time_;
   if (kUseTableLookupReadBarrier) {
     DCHECK(rb_table->IsAllCleared());
@@ -336,9 +373,7 @@
           // mark-bit otherwise the live_bytes will not be updated in
           // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
           // logic.
-          if (kEnableGenerationalConcurrentCopyingCollection
-              && !should_evacuate
-              && is_newly_allocated) {
+          if (use_generational_cc_ && !should_evacuate && is_newly_allocated) {
             GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
           }
           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
@@ -506,7 +541,7 @@
         // bitmap. But they cannot do so before we know the next GC cycle will
         // be a major one, so this operation happens at the beginning of such a
         // major collection, before marking starts.
-        if (!kEnableGenerationalConcurrentCopyingCollection) {
+        if (!use_generational_cc_) {
           GetLiveBitmap()->ClearRange(
               reinterpret_cast<mirror::Object*>(r->Begin()),
               reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
@@ -520,8 +555,7 @@
         // `r` when it has an undefined live bytes count (i.e. when
         // `r->LiveBytes() == static_cast<size_t>(-1)`) with
         // Generational CC.
-        if (!kEnableGenerationalConcurrentCopyingCollection ||
-            (r->LiveBytes() != static_cast<size_t>(-1))) {
+        if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) {
           // Only some allocated bytes are live in this unevac region.
           // This should only happen for an allocated non-large region.
           DCHECK(r->IsAllocated()) << r->State();
@@ -918,7 +952,7 @@
     Region* r = &regions_[region_index];
     if (r->IsFree()) {
       r->Unfree(this, time_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         // TODO: Add an explanation for this assertion.
         DCHECK(!for_evac || !r->is_newly_allocated_);
       }
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index a6f501b..d8b54e2 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -59,7 +59,7 @@
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
   static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
-  static RegionSpace* Create(const std::string& name, MemMap&& mem_map);
+  static RegionSpace* Create(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
 
   // Allocate `num_bytes`, returns null if the space is full.
   mirror::Object* Alloc(Thread* self,
@@ -368,7 +368,7 @@
   }
 
  private:
-  RegionSpace(const std::string& name, MemMap&& mem_map);
+  RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
 
   class Region {
    public:
@@ -523,33 +523,7 @@
     // collection, RegionSpace::ClearFromSpace will preserve the space
     // used by this region, and tag it as to-space (see
     // Region::SetUnevacFromSpaceAsToSpace below).
-    void SetAsUnevacFromSpace(bool clear_live_bytes) {
-      // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
-      DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
-      DCHECK(!IsFree() && IsInToSpace());
-      type_ = RegionType::kRegionTypeUnevacFromSpace;
-      if (IsNewlyAllocated()) {
-        // A newly allocated region set as unevac from-space must be
-        // a large or large tail region.
-        DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
-        // Always clear the live bytes of a newly allocated (large or
-        // large tail) region.
-        clear_live_bytes = true;
-        // Clear the "newly allocated" status here, as we do not want the
-        // GC to see it when encountering (and processing) references in the
-        // from-space.
-        //
-        // Invariant: There should be no newly-allocated region in the
-        // from-space (when the from-space exists, which is between the calls
-        // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
-        is_newly_allocated_ = false;
-      }
-      if (clear_live_bytes) {
-        // Reset the live bytes, as we have made a non-evacuation
-        // decision (possibly based on the percentage of live bytes).
-        live_bytes_ = 0;
-      }
-    }
+    void SetAsUnevacFromSpace(bool clear_live_bytes);
 
     // Set this region as to-space. Used by RegionSpace::ClearFromSpace.
     // This is only valid if it is currently an unevac from-space region.
@@ -562,7 +536,7 @@
     ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
 
     void AddLiveBytes(size_t live_bytes) {
-      DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace());
+      DCHECK(GetUseGenerationalCC() || IsInUnevacFromSpace());
       DCHECK(!IsLargeTail());
       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
       // For large allocations, we always consider all bytes in the regions live.
@@ -616,6 +590,8 @@
     uint64_t GetLongestConsecutiveFreeBytes() const;
 
    private:
+    static bool GetUseGenerationalCC();
+
     size_t idx_;                        // The region's index in the region space.
     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
     uint8_t* begin_;                    // The begin address of the region.
@@ -738,6 +714,8 @@
 
   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
+  // Cached version of Heap::use_generational_cc_.
+  const bool use_generational_cc_;
   uint32_t time_;                  // The time as the number of collections since the startup.
   size_t num_regions_;             // The number of regions in this space.
   // The number of non-free regions in this space.
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index c73a710..8bd59ea 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -104,8 +104,8 @@
 
     Domain dex_domain = dex_file->GetHiddenapiDomain();
     if (class_loader.IsNull() && dex_domain == Domain::kApplication) {
-      LOG(WARNING) << "DexFile " << dex_file->GetLocation() << " is in boot classpath "
-                   << "but is assigned untrusted domain";
+      // LOG(WARNING) << "DexFile " << dex_file->GetLocation() << " is in boot classpath "
+      //              << "but is assigned untrusted domain";
       dex_domain = Domain::kPlatform;
     }
     return dex_domain;
@@ -415,7 +415,7 @@
       }
 
       // Access checks are not disabled, report the violation.
-      detail::MaybeReportCorePlatformApiViolation(member, caller_context, access_method);
+      // detail::MaybeReportCorePlatformApiViolation(member, caller_context, access_method);
 
       // Deny access if the policy is enabled.
       return policy == EnforcementPolicy::kEnabled;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4a04259..6fd691f 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -703,6 +703,7 @@
   UsageMessage(stream, "  -Xgc:[no]postsweepingverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]postverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]presweepingverify\n");
+  UsageMessage(stream, "  -Xgc:[no]generational_cc\n");
   UsageMessage(stream, "  -Ximage:filename\n");
   UsageMessage(stream, "  -Xbootclasspath-locations:bootclasspath\n"
                        "     (override the dex locations of the -Xbootclasspath files)\n");
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index cbb7b82..77d2316 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -130,6 +130,23 @@
   EXPECT_EQ(gc::kCollectorTypeSS, xgc.collector_type_);
 }
 
+TEST_F(ParsedOptionsTest, ParsedOptionsGenerationalCC) {
+  RuntimeOptions options;
+  options.push_back(std::make_pair("-Xgc:generational_cc", nullptr));
+
+  RuntimeArgumentMap map;
+  bool parsed = ParsedOptions::Parse(options, false, &map);
+  ASSERT_TRUE(parsed);
+  ASSERT_NE(0u, map.Size());
+
+  using Opt = RuntimeArgumentMap;
+
+  EXPECT_TRUE(map.Exists(Opt::GcOption));
+
+  XGcOption xgc = map.GetOrDefault(Opt::GcOption);
+  ASSERT_TRUE(xgc.generational_cc);
+}
+
 TEST_F(ParsedOptionsTest, ParsedOptionsInstructionSet) {
   using Opt = RuntimeArgumentMap;
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4853187..a86bc94 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1240,6 +1240,10 @@
             kExtraDefaultHeapGrowthMultiplier;
   }
   XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
+
+  // Generational CC collection is currently only compatible with Baker read barriers.
+  bool use_generational_cc = kUseBakerReadBarrier && xgc_option.generational_cc;
+
   heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
                        runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
                        runtime_options.GetOrDefault(Opt::HeapMinFree),
@@ -1274,6 +1278,7 @@
                        xgc_option.gcstress_,
                        xgc_option.measure_,
                        runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
+                       use_generational_cc,
                        runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs),
                        runtime_options.Exists(Opt::DumpRegionInfoBeforeGC),
                        runtime_options.Exists(Opt::DumpRegionInfoAfterGC));
diff --git a/runtime/runtime_globals.h b/runtime/runtime_globals.h
index 793291a..81d350b 100644
--- a/runtime/runtime_globals.h
+++ b/runtime/runtime_globals.h
@@ -40,16 +40,24 @@
 static constexpr bool kMarkCompactSupport = false && kMovingCollector;
 // True if we allow moving classes.
 static constexpr bool kMovingClasses = !kMarkCompactSupport;
-// If true, enable generational collection when using the Concurrent Copying
-// (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
-// for major collections.
+// When using the Concurrent Copying (CC) collector, if
+// `ART_USE_GENERATIONAL_CC` is true, enable generational collection by default,
+// i.e. use sticky-bit CC for minor collections and (full) CC for major
+// collections.
+// This default value can be overridden with the runtime option
+// `-Xgc:[no]generational_cc`.
 //
-// Generational CC collection is currently only compatible with Baker read
-// barriers.
-#if defined(ART_USE_GENERATIONAL_CC) && defined(ART_READ_BARRIER_TYPE_IS_BAKER)
-static constexpr bool kEnableGenerationalConcurrentCopyingCollection = true;
+// TODO(b/67628039): Consider either:
+// - renaming this to a better descriptive name (e.g.
+//   `ART_USE_GENERATIONAL_CC_BY_DEFAULT`); or
+// - removing `ART_USE_GENERATIONAL_CC` and having a fixed default value.
+// Any of these changes will require adjusting users of this preprocessor
+// directive and the corresponding build system environment variable (e.g. in
+// ART's continuous testing).
+#ifdef ART_USE_GENERATIONAL_CC
+static constexpr bool kEnableGenerationalCCByDefault = true;
 #else
-static constexpr bool kEnableGenerationalConcurrentCopyingCollection = false;
+static constexpr bool kEnableGenerationalCCByDefault = false;
 #endif
 
 // If true, enable the tlab allocator by default.
diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc
index 8ff6c52..24864f9 100644
--- a/runtime/thread_android.cc
+++ b/runtime/thread_android.cc
@@ -21,7 +21,7 @@
 #include <sys/resource.h>
 #include <sys/time.h>
 
-#include <cutils/sched_policy.h>
+#include <processgroup/sched_policy.h>
 #include <utils/threads.h>
 
 #include "base/macros.h"
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index 8dfb402..3dc2789 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -82,6 +82,14 @@
   return int_index;
 }
 
+extern "C" JNIEXPORT void JNICALL Java_Main_setWhitelistAll(JNIEnv*, jclass, jboolean value) {
+  std::vector<std::string> exemptions;
+  if (value != JNI_FALSE) {
+    exemptions.push_back("L");
+  }
+  Runtime::Current()->SetHiddenApiExemptions(exemptions);
+}
+
 static jobject NewInstance(JNIEnv* env, jclass klass) {
   jmethodID constructor = env->GetMethodID(klass, "<init>", "()V");
   if (constructor == nullptr) {
diff --git a/test/674-hiddenapi/src-art/Main.java b/test/674-hiddenapi/src-art/Main.java
index 190f4ac..d6a8c6d 100644
--- a/test/674-hiddenapi/src-art/Main.java
+++ b/test/674-hiddenapi/src-art/Main.java
@@ -119,9 +119,8 @@
     // loaded by their parent class loader.
     String nativeLibCopy = createNativeLibCopy(parentDomain, childDomain, whitelistAllApis);
 
-    if (whitelistAllApis) {
-      VMRuntime.getRuntime().setHiddenApiExemptions(new String[]{"L"});
-    }
+    // Set exemptions to "L" (matches all classes) if we are testing whitelisting.
+    setWhitelistAll(whitelistAllApis);
 
     // Invoke ChildClass.runTest
     Class<?> childClass = Class.forName("ChildClass", true, childLoader);
@@ -129,8 +128,6 @@
         "runTest", String.class, Integer.TYPE, Integer.TYPE, Boolean.TYPE);
     runTestMethod.invoke(null, nativeLibCopy, parentDomain.ordinal(), childDomain.ordinal(),
         whitelistAllApis);
-
-    VMRuntime.getRuntime().setHiddenApiExemptions(new String[0]);
   }
 
   // Routine which tries to figure out the absolute path of our native library.
@@ -203,4 +200,5 @@
   private static native int appendToBootClassLoader(String dexPath, boolean isCorePlatform);
   private static native void setDexDomain(int index, boolean isCorePlatform);
   private static native void init();
+  private static native void setWhitelistAll(boolean value);
 }
diff --git a/tools/libcore_network_failures.txt b/tools/libcore_network_failures.txt
index e7e31db..380f56b 100644
--- a/tools/libcore_network_failures.txt
+++ b/tools/libcore_network_failures.txt
@@ -8,7 +8,7 @@
   description: "Ignore failure of network-related tests on new devices running Android O",
   result: EXEC_FAILED,
   bug: 74725685,
-  modes: [device],
+  modes: [device_testdex],
   names: ["libcore.libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet",
           "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithFtpURLConnection",
           "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection",