Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 1 | |
| 2 | /* Copyright (C) 2019 The Android Open Source Project |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This file implements interfaces from the file jvmti.h. This implementation |
| 6 | * is licensed under the same terms as the file jvmti.h. The |
| 7 | * copyright and license information for the file jvmti.h follows. |
| 8 | * |
| 9 | * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. |
| 10 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 11 | * |
| 12 | * This code is free software; you can redistribute it and/or modify it |
| 13 | * under the terms of the GNU General Public License version 2 only, as |
| 14 | * published by the Free Software Foundation. Oracle designates this |
| 15 | * particular file as subject to the "Classpath" exception as provided |
| 16 | * by Oracle in the LICENSE file that accompanied this code. |
| 17 | * |
| 18 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 19 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 21 | * version 2 for more details (a copy is included in the LICENSE file that |
| 22 | * accompanied this code). |
| 23 | * |
| 24 | * You should have received a copy of the GNU General Public License version |
| 25 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 26 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 27 | * |
| 28 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 29 | * or visit www.oracle.com if you need additional information or have any |
| 30 | * questions. |
| 31 | */ |
| 32 | |
| 33 | #include "alloc_manager.h" |
| 34 | |
| 35 | #include <atomic> |
| 36 | #include <sstream> |
| 37 | |
| 38 | #include "base/logging.h" |
| 39 | #include "gc/allocation_listener.h" |
| 40 | #include "gc/heap.h" |
| 41 | #include "handle.h" |
| 42 | #include "mirror/class-inl.h" |
| 43 | #include "runtime.h" |
Alex Light | 96cbde8 | 2019-12-17 16:28:48 -0800 | [diff] [blame] | 44 | #include "runtime_globals.h" |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 45 | #include "scoped_thread_state_change-inl.h" |
| 46 | #include "scoped_thread_state_change.h" |
| 47 | #include "thread-current-inl.h" |
| 48 | #include "thread_list.h" |
| 49 | #include "thread_pool.h" |
| 50 | |
| 51 | namespace openjdkjvmti { |
| 52 | |
| 53 | template<typename T> |
| 54 | void AllocationManager::PauseForAllocation(art::Thread* self, T msg) { |
| 55 | // The suspension can pause us for arbitrary times. We need to do it to sleep unfortunately. So we |
| 56 | // do test, suspend, test again, sleep, repeat. |
| 57 | std::string cause; |
| 58 | const bool is_logging = VLOG_IS_ON(plugin); |
| 59 | while (true) { |
| 60 | // We always return when there is no pause and we are runnable. |
| 61 | art::Thread* pausing_thread = allocations_paused_thread_.load(std::memory_order_seq_cst); |
| 62 | if (LIKELY(pausing_thread == nullptr || pausing_thread == self)) { |
| 63 | return; |
| 64 | } |
| 65 | if (UNLIKELY(is_logging && cause.empty())) { |
| 66 | cause = msg(); |
| 67 | } |
| 68 | art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); |
| 69 | art::MutexLock mu(self, alloc_listener_mutex_); |
| 70 | pausing_thread = allocations_paused_thread_.load(std::memory_order_seq_cst); |
| 71 | CHECK_NE(pausing_thread, self) << "We should always be setting pausing_thread = self!" |
| 72 | << " How did this happen? " << *self; |
| 73 | if (pausing_thread != nullptr) { |
| 74 | VLOG(plugin) << "Suspending " << *self << " due to " << cause << ". Allocation pause " |
| 75 | << "initiated by " << *pausing_thread; |
| 76 | alloc_pause_cv_.Wait(self); |
| 77 | } |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | extern AllocationManager* gAllocManager; |
| 82 | AllocationManager* AllocationManager::Get() { |
| 83 | return gAllocManager; |
| 84 | } |
| 85 | |
| 86 | void JvmtiAllocationListener::ObjectAllocated(art::Thread* self, |
| 87 | art::ObjPtr<art::mirror::Object>* obj, |
| 88 | size_t cnt) { |
| 89 | auto cb = manager_->callback_; |
| 90 | if (cb != nullptr && manager_->callback_enabled_.load(std::memory_order_seq_cst)) { |
| 91 | cb->ObjectAllocated(self, obj, cnt); |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | bool JvmtiAllocationListener::HasPreAlloc() const { |
Alex Light | 7ba68ff | 2019-12-17 16:15:35 -0800 | [diff] [blame] | 96 | return manager_->allocations_paused_ever_.load(std::memory_order_seq_cst); |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | void JvmtiAllocationListener::PreObjectAllocated(art::Thread* self, |
| 100 | art::MutableHandle<art::mirror::Class> type, |
| 101 | size_t* byte_count) { |
| 102 | manager_->PauseForAllocation(self, [&]() REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 103 | std::ostringstream oss; |
| 104 | oss << "allocating " << *byte_count << " bytes of type " << type->PrettyClass(); |
| 105 | return oss.str(); |
| 106 | }); |
| 107 | if (!type->IsVariableSize()) { |
Alex Light | 96cbde8 | 2019-12-17 16:28:48 -0800 | [diff] [blame] | 108 | *byte_count = |
| 109 | std::max(art::RoundUp(static_cast<size_t>(type->GetObjectSize()), art::kObjectAlignment), |
| 110 | *byte_count); |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 111 | } |
| 112 | } |
| 113 | |
| 114 | AllocationManager::AllocationManager() |
| 115 | : alloc_listener_(nullptr), |
| 116 | alloc_listener_mutex_("JVMTI Alloc listener", |
| 117 | art::LockLevel::kPostUserCodeSuspensionTopLevelLock), |
| 118 | alloc_pause_cv_("JVMTI Allocation Pause Condvar", alloc_listener_mutex_) { |
| 119 | alloc_listener_.reset(new JvmtiAllocationListener(this)); |
| 120 | } |
| 121 | |
| 122 | void AllocationManager::DisableAllocationCallback(art::Thread* self) { |
| 123 | callback_enabled_.store(false); |
| 124 | DecrListenerInstall(self); |
| 125 | } |
| 126 | |
| 127 | void AllocationManager::EnableAllocationCallback(art::Thread* self) { |
| 128 | IncrListenerInstall(self); |
| 129 | callback_enabled_.store(true); |
| 130 | } |
| 131 | |
| 132 | void AllocationManager::SetAllocListener(AllocationCallback* callback) { |
| 133 | CHECK(callback_ == nullptr) << "Already setup!"; |
| 134 | callback_ = callback; |
| 135 | alloc_listener_.reset(new JvmtiAllocationListener(this)); |
| 136 | } |
| 137 | |
| 138 | void AllocationManager::RemoveAllocListener() { |
| 139 | callback_enabled_.store(false, std::memory_order_seq_cst); |
| 140 | callback_ = nullptr; |
| 141 | } |
| 142 | |
| 143 | void AllocationManager::DecrListenerInstall(art::Thread* self) { |
| 144 | art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); |
| 145 | art::MutexLock mu(self, alloc_listener_mutex_); |
| 146 | // We don't need any particular memory-order here since we're under the lock, they aren't |
| 147 | // changing. |
| 148 | if (--listener_refcount_ == 0) { |
| 149 | art::Runtime::Current()->GetHeap()->RemoveAllocationListener(); |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | void AllocationManager::IncrListenerInstall(art::Thread* self) { |
| 154 | art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); |
| 155 | art::MutexLock mu(self, alloc_listener_mutex_); |
| 156 | // We don't need any particular memory-order here since we're under the lock, they aren't |
| 157 | // changing. |
| 158 | if (listener_refcount_++ == 0) { |
| 159 | art::Runtime::Current()->GetHeap()->SetAllocationListener(alloc_listener_.get()); |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | void AllocationManager::PauseAllocations(art::Thread* self) { |
| 164 | art::Thread* null_thr = nullptr; |
Alex Light | 7ba68ff | 2019-12-17 16:15:35 -0800 | [diff] [blame] | 165 | // Unfortunately once we've paused allocations once we have to leave the listener and |
| 166 | // PreObjectAlloc event enabled forever. This is to avoid an instance of the ABA problem. We need |
| 167 | // to make sure that every thread gets a chance to see the PreObjectAlloc event at least once or |
| 168 | // else it could miss the fact that the object its allocating had its size changed. |
| 169 | // |
| 170 | // Consider the following 2 threads. T1 is allocating an object of class K. It is suspended (by |
| 171 | // user code) somewhere in the AllocObjectWithAllocator function, perhaps while doing a GC to |
| 172 | // attempt to clear space. With that thread suspended on thread T2 we decide to structurally |
| 173 | // redefine 'K', changing its size. To do this we insert this PreObjectAlloc event to check and |
| 174 | // update the size of the class being allocated. This is done successfully. Now imagine if T2 |
| 175 | // removed the listener event then T1 subsequently resumes. T1 would see there is no |
| 176 | // PreObjectAlloc event and so allocate using the old object size. This leads to it not allocating |
| 177 | // enough. To prevent this we simply force every allocation after our first pause to go through |
| 178 | // the PreObjectAlloc event. |
| 179 | // |
| 180 | // TODO Technically we could do better than this. We just need to be able to require that all |
| 181 | // threads within allocation functions go through the PreObjectAlloc at least once after we turn |
| 182 | // it on. This is easier said than done though since we don't want to place a marker on threads |
| 183 | // (allocation is just too common) and we can't just have every thread go through the event since |
| 184 | // there are some threads that never or almost never allocate. We would also need to ensure that |
| 185 | // this thread doesn't pause waiting for all threads to pass the barrier since the other threads |
| 186 | // might be suspended. We could accomplish this by storing callbacks on each thread that would do |
| 187 | // the work. Honestly though this is a debug feature and it doesn't slow things down very much so |
| 188 | // simply leaving it on forever is simpler and safer. |
| 189 | bool expected = false; |
| 190 | if (allocations_paused_ever_.compare_exchange_strong(expected, true, std::memory_order_seq_cst)) { |
| 191 | IncrListenerInstall(self); |
| 192 | } |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 193 | do { |
| 194 | PauseForAllocation(self, []() { return "request to pause allocations on other threads"; }); |
Alex Light | 7a6b966 | 2019-12-17 16:45:49 -0800 | [diff] [blame] | 195 | } while (!allocations_paused_thread_.compare_exchange_strong( |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 196 | null_thr, self, std::memory_order_seq_cst)); |
| 197 | // Make sure everything else can see this and isn't in the middle of final allocation. |
| 198 | // Force every thread to either be suspended or pass through a barrier. |
| 199 | art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); |
| 200 | art::Barrier barrier(0); |
| 201 | art::FunctionClosure fc([&](art::Thread* thr ATTRIBUTE_UNUSED) { |
| 202 | barrier.Pass(art::Thread::Current()); |
| 203 | }); |
| 204 | size_t requested = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&fc); |
| 205 | barrier.Increment(self, requested); |
| 206 | } |
| 207 | |
| 208 | void AllocationManager::ResumeAllocations(art::Thread* self) { |
| 209 | CHECK_EQ(allocations_paused_thread_.load(), self) << "not paused! "; |
Alex Light | 7ba68ff | 2019-12-17 16:15:35 -0800 | [diff] [blame] | 210 | // See above for why we don't decr the install count. |
| 211 | CHECK(allocations_paused_ever_.load(std::memory_order_seq_cst)); |
Alex Light | 986914b | 2019-11-19 01:12:25 +0000 | [diff] [blame] | 212 | art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended); |
| 213 | art::MutexLock mu(self, alloc_listener_mutex_); |
| 214 | allocations_paused_thread_.store(nullptr, std::memory_order_seq_cst); |
| 215 | alloc_pause_cv_.Broadcast(self); |
| 216 | } |
| 217 | |
| 218 | } // namespace openjdkjvmti |