Move startup thread pool back into runtime
Added logic in heap trim to delete the thread pool, if there are no
active users.
Added a scoped accessor to prevent ref counting errors.
Motivation, have workers already created when the app images are
loaded.
Bug: 116052292
Test: test-art-host
Change-Id: I8ea776d74e88601222a9989e0c6dac34cf77c683
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 66db063..4f9b3f9 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -21,7 +21,6 @@
#include <unistd.h>
#include <random>
-#include <thread>
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
@@ -685,40 +684,12 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
- std::unique_ptr<ThreadPool> thread_pool;
std::unique_ptr<ImageSpace> space = Init(image_filename,
image_location,
oat_file,
&logger,
- &thread_pool,
image_reservation,
error_msg);
- if (thread_pool != nullptr) {
- // Delay the thread pool deletion to prevent the deletion slowing down the startup by causing
- // preemption. TODO: Just do this in heap trim.
- static constexpr uint64_t kThreadPoolDeleteDelay = MsToNs(5000);
-
- class DeleteThreadPoolTask : public HeapTask {
- public:
- explicit DeleteThreadPoolTask(std::unique_ptr<ThreadPool>&& thread_pool)
- : HeapTask(NanoTime() + kThreadPoolDeleteDelay), thread_pool_(std::move(thread_pool)) {}
-
- void Run(Thread* self) override {
- ScopedTrace trace("DestroyThreadPool");
- ScopedThreadStateChange stsc(self, kNative);
- thread_pool_.reset();
- }
-
- private:
- std::unique_ptr<ThreadPool> thread_pool_;
- };
- gc::TaskProcessor* const processor = Runtime::Current()->GetHeap()->GetTaskProcessor();
- // The thread pool is already done being used since Init has finished running. Deleting the
- // thread pool is done async since it takes a non-trivial amount of time to do.
- if (processor != nullptr) {
- processor->AddTask(Thread::Current(), new DeleteThreadPoolTask(std::move(thread_pool)));
- }
- }
if (space != nullptr) {
uint32_t expected_reservation_size =
RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
@@ -779,7 +750,6 @@
const char* image_location,
const OatFile* oat_file,
TimingLogger* logger,
- std::unique_ptr<ThreadPool>* thread_pool,
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -856,18 +826,6 @@
return nullptr;
}
- const size_t kMinBlocks = 2;
- if (thread_pool != nullptr && image_header->GetBlockCount() >= kMinBlocks) {
- TimingLogger::ScopedTiming timing("CreateThreadPool", logger);
- ScopedThreadStateChange stsc(Thread::Current(), kNative);
- constexpr size_t kStackSize = 64 * KB;
- constexpr size_t kMaxRuntimeWorkers = 4u;
- const size_t num_workers =
- std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
- thread_pool->reset(new ThreadPool("Image", num_workers, /*create_peers=*/false, kStackSize));
- thread_pool->get()->StartWorkers(Thread::Current());
- }
-
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
// If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
@@ -880,7 +838,6 @@
*image_header,
file->Fd(),
logger,
- thread_pool != nullptr ? thread_pool->get() : nullptr,
image_reservation,
error_msg);
if (!map.IsValid()) {
@@ -971,7 +928,6 @@
const ImageHeader& image_header,
int fd,
TimingLogger* logger,
- ThreadPool* pool,
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg) {
TimingLogger::ScopedTiming timing("MapImageFile", logger);
@@ -1015,9 +971,12 @@
}
memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
+ Runtime::ScopedThreadPoolUsage stpu;
+ ThreadPool* const pool = stpu.GetThreadPool();
const uint64_t start = NanoTime();
Thread* const self = Thread::Current();
- const bool use_parallel = pool != nullptr;
+ static constexpr size_t kMinBlocks = 2u;
+ const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
auto function = [&](Thread*) {
const uint64_t start2 = NanoTime();
@@ -1915,7 +1874,6 @@
image_location.c_str(),
/*oat_file=*/ nullptr,
logger,
- /*thread_pool=*/ nullptr,
image_reservation,
error_msg);
}