blob: b983e79658ccbc87306c37429eb63ba06eb54a19 [file] [log] [blame]
Andreas Gampecc13b222016-10-10 19:09:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "object_tagging.h"
33
Andreas Gampee54eee12016-10-20 19:03:58 -070034#include <limits>
35
Andreas Gampecc13b222016-10-10 19:09:09 -070036#include "art_jvmti.h"
37#include "base/logging.h"
38#include "events-inl.h"
39#include "gc/allocation_listener.h"
40#include "instrumentation.h"
41#include "jni_env_ext-inl.h"
Andreas Gampe5e6046b2016-10-25 12:05:53 -070042#include "jvmti_allocator.h"
Andreas Gampecc13b222016-10-10 19:09:09 -070043#include "mirror/class.h"
44#include "mirror/object.h"
45#include "runtime.h"
46#include "ScopedLocalRef.h"
47
48namespace openjdkjvmti {
49
Andreas Gampedef4ee62016-11-07 10:10:21 -080050void ObjectTagTable::Lock() {
51 allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
52}
53void ObjectTagTable::Unlock() {
54 allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
55}
56void ObjectTagTable::AssertLocked() {
57 allow_disallow_lock_.AssertHeld(art::Thread::Current());
58}
59
Andreas Gampe3fe4d5a2016-10-21 12:32:31 -070060void ObjectTagTable::UpdateTableWithReadBarrier() {
61 update_since_last_sweep_ = true;
62
Andreas Gampee54eee12016-10-20 19:03:58 -070063 auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
64 art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
65 REQUIRES_SHARED(art::Locks::mutator_lock_) {
66 return original_root.Read<art::kWithReadBarrier>();
67 };
Andreas Gampecc13b222016-10-10 19:09:09 -070068
Andreas Gampee54eee12016-10-20 19:03:58 -070069 UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
70}
71
72bool ObjectTagTable::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result) {
Andreas Gampe3fe4d5a2016-10-21 12:32:31 -070073 // Under concurrent GC, there is a window between moving objects and sweeping of system
74 // weaks in which mutators are active. We may receive a to-space object pointer in obj,
75 // but still have from-space pointers in the table. Explicitly update the table once.
76 // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
77 UpdateTableWithReadBarrier();
Andreas Gampee54eee12016-10-20 19:03:58 -070078 return GetTagLocked(self, obj, result);
79}
80
81void ObjectTagTable::Add(art::mirror::Object* obj, jlong tag) {
82 // Same as Set(), as we don't have duplicates in an unordered_map.
83 Set(obj, tag);
Andreas Gampecc13b222016-10-10 19:09:09 -070084}
85
86bool ObjectTagTable::Remove(art::mirror::Object* obj, jlong* tag) {
87 art::Thread* self = art::Thread::Current();
88 art::MutexLock mu(self, allow_disallow_lock_);
89 Wait(self);
90
Andreas Gampee54eee12016-10-20 19:03:58 -070091 return RemoveLocked(self, obj, tag);
92}
Andreas Gampedef4ee62016-11-07 10:10:21 -080093bool ObjectTagTable::RemoveLocked(art::mirror::Object* obj, jlong* tag) {
94 art::Thread* self = art::Thread::Current();
95 allow_disallow_lock_.AssertHeld(self);
96 Wait(self);
97
98 return RemoveLocked(self, obj, tag);
99}
Andreas Gampecc13b222016-10-10 19:09:09 -0700100
Andreas Gampee54eee12016-10-20 19:03:58 -0700101bool ObjectTagTable::RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag) {
102 auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
103 if (it != tagged_objects_.end()) {
104 if (tag != nullptr) {
105 *tag = it->second;
Andreas Gampecc13b222016-10-10 19:09:09 -0700106 }
Andreas Gampee54eee12016-10-20 19:03:58 -0700107 tagged_objects_.erase(it);
108 return true;
Andreas Gampecc13b222016-10-10 19:09:09 -0700109 }
110
Andreas Gampe3fe4d5a2016-10-21 12:32:31 -0700111 if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
112 // Under concurrent GC, there is a window between moving objects and sweeping of system
113 // weaks in which mutators are active. We may receive a to-space object pointer in obj,
114 // but still have from-space pointers in the table. Explicitly update the table once.
115 // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
116
Andreas Gampee54eee12016-10-20 19:03:58 -0700117 // Update the table.
Andreas Gampe3fe4d5a2016-10-21 12:32:31 -0700118 UpdateTableWithReadBarrier();
Andreas Gampee54eee12016-10-20 19:03:58 -0700119
120 // And try again.
121 return RemoveLocked(self, obj, tag);
122 }
123
124 // Not in here.
Andreas Gampecc13b222016-10-10 19:09:09 -0700125 return false;
126}
127
Andreas Gampee54d9922016-10-11 19:55:37 -0700128bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
Andreas Gampedef4ee62016-11-07 10:10:21 -0800129 if (new_tag == 0) {
130 jlong tmp;
131 return Remove(obj, &tmp);
132 }
133
Andreas Gampee54d9922016-10-11 19:55:37 -0700134 art::Thread* self = art::Thread::Current();
135 art::MutexLock mu(self, allow_disallow_lock_);
136 Wait(self);
137
Andreas Gampee54eee12016-10-20 19:03:58 -0700138 return SetLocked(self, obj, new_tag);
139}
Andreas Gampedef4ee62016-11-07 10:10:21 -0800140bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
141 if (new_tag == 0) {
142 jlong tmp;
143 return RemoveLocked(obj, &tmp);
144 }
145
146 art::Thread* self = art::Thread::Current();
147 allow_disallow_lock_.AssertHeld(self);
148 Wait(self);
149
150 return SetLocked(self, obj, new_tag);
151}
Andreas Gampee54eee12016-10-20 19:03:58 -0700152
153bool ObjectTagTable::SetLocked(art::Thread* self, art::mirror::Object* obj, jlong new_tag) {
154 auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
155 if (it != tagged_objects_.end()) {
156 it->second = new_tag;
157 return true;
Andreas Gampee54d9922016-10-11 19:55:37 -0700158 }
159
Andreas Gampe3fe4d5a2016-10-21 12:32:31 -0700160 if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
161 // Under concurrent GC, there is a window between moving objects and sweeping of system
162 // weaks in which mutators are active. We may receive a to-space object pointer in obj,
163 // but still have from-space pointers in the table. Explicitly update the table once.
164 // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
165
Andreas Gampee54eee12016-10-20 19:03:58 -0700166 // Update the table.
Andreas Gampe3fe4d5a2016-10-21 12:32:31 -0700167 UpdateTableWithReadBarrier();
Andreas Gampee54eee12016-10-20 19:03:58 -0700168
169 // And try again.
170 return SetLocked(self, obj, new_tag);
Andreas Gampee54d9922016-10-11 19:55:37 -0700171 }
172
Andreas Gampee54eee12016-10-20 19:03:58 -0700173 // New element.
174 auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
175 DCHECK(insert_it.second);
Andreas Gampee54d9922016-10-11 19:55:37 -0700176 return false;
177}
178
Andreas Gampecc13b222016-10-10 19:09:09 -0700179void ObjectTagTable::Sweep(art::IsMarkedVisitor* visitor) {
180 if (event_handler_->IsEventEnabledAnywhere(JVMTI_EVENT_OBJECT_FREE)) {
181 SweepImpl<true>(visitor);
182 } else {
183 SweepImpl<false>(visitor);
184 }
Andreas Gampe3fe4d5a2016-10-21 12:32:31 -0700185
186 // Under concurrent GC, there is a window between moving objects and sweeping of system
187 // weaks in which mutators are active. We may receive a to-space object pointer in obj,
188 // but still have from-space pointers in the table. We explicitly update the table then
189 // to ensure we compare against to-space pointers. But we want to do this only once. Once
190 // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
191 // so we re-enable the explicit update for the next marking.
192 update_since_last_sweep_ = false;
Andreas Gampecc13b222016-10-10 19:09:09 -0700193}
194
195template <bool kHandleNull>
196void ObjectTagTable::SweepImpl(art::IsMarkedVisitor* visitor) {
197 art::Thread* self = art::Thread::Current();
198 art::MutexLock mu(self, allow_disallow_lock_);
199
Andreas Gampee54eee12016-10-20 19:03:58 -0700200 auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
201 art::mirror::Object* original_obj) {
202 return visitor->IsMarked(original_obj);
203 };
Andreas Gampecc13b222016-10-10 19:09:09 -0700204
Andreas Gampee54eee12016-10-20 19:03:58 -0700205 UpdateTableWith<decltype(IsMarkedUpdater),
206 kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
Andreas Gampecc13b222016-10-10 19:09:09 -0700207}
208
209void ObjectTagTable::HandleNullSweep(jlong tag) {
210 event_handler_->DispatchEvent(nullptr, JVMTI_EVENT_OBJECT_FREE, tag);
211}
212
Andreas Gampee54eee12016-10-20 19:03:58 -0700213template <typename T, ObjectTagTable::TableUpdateNullTarget kTargetNull>
214ALWAYS_INLINE inline void ObjectTagTable::UpdateTableWith(T& updater) {
215 // We optimistically hope that elements will still be well-distributed when re-inserting them.
216 // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
217 // vector and two passes.
218 float original_max_load_factor = tagged_objects_.max_load_factor();
219 tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
220 // For checking that a max load-factor actually does what we expect.
221 size_t original_bucket_count = tagged_objects_.bucket_count();
222
223 for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
224 DCHECK(!it->first.IsNull());
225 art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
226 art::mirror::Object* target_obj = updater(it->first, original_obj);
227 if (original_obj != target_obj) {
228 if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
229 // Ignore null target, don't do anything.
230 } else {
231 jlong tag = it->second;
232 it = tagged_objects_.erase(it);
233 if (target_obj != nullptr) {
234 tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
235 DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
236 } else if (kTargetNull == kCallHandleNull) {
237 HandleNullSweep(tag);
238 }
239 continue; // Iterator was implicitly updated by erase.
240 }
241 }
242 it++;
243 }
244
245 tagged_objects_.max_load_factor(original_max_load_factor);
246 // TODO: consider rehash here.
247}
248
Andreas Gampe5e6046b2016-10-25 12:05:53 -0700249template <typename T, class Allocator = std::allocator<T>>
250struct ReleasableContainer {
251 using allocator_type = Allocator;
252
253 explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
254 : allocator(alloc),
255 data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
256 size(0),
257 capacity(reserve) {
258 }
259
260 ~ReleasableContainer() {
261 if (data != nullptr) {
262 allocator.deallocate(data, capacity);
263 capacity = 0;
264 size = 0;
265 }
266 }
267
268 T* Release() {
269 T* tmp = data;
270
271 data = nullptr;
272 size = 0;
273 capacity = 0;
274
275 return tmp;
276 }
277
278 void Resize(size_t new_capacity) {
279 CHECK_GT(new_capacity, capacity);
280
281 T* tmp = allocator.allocate(new_capacity);
282 DCHECK(tmp != nullptr);
283 if (data != nullptr) {
284 memcpy(tmp, data, sizeof(T) * size);
285 }
286 T* old = data;
287 data = tmp;
288 allocator.deallocate(old, capacity);
289 capacity = new_capacity;
290 }
291
292 void Pushback(const T& elem) {
293 if (size == capacity) {
294 size_t new_capacity = 2 * capacity + 1;
295 Resize(new_capacity);
296 }
297 data[size++] = elem;
298 }
299
300 Allocator allocator;
301 T* data;
302 size_t size;
303 size_t capacity;
304};
305
306jvmtiError ObjectTagTable::GetTaggedObjects(jvmtiEnv* jvmti_env,
307 jint tag_count,
308 const jlong* tags,
309 jint* count_ptr,
310 jobject** object_result_ptr,
311 jlong** tag_result_ptr) {
312 if (tag_count < 0) {
313 return ERR(ILLEGAL_ARGUMENT);
314 }
315 if (tag_count > 0) {
316 for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
317 if (tags[i] == 0) {
318 return ERR(ILLEGAL_ARGUMENT);
319 }
320 }
321 }
322 if (tags == nullptr) {
323 return ERR(NULL_POINTER);
324 }
325 if (count_ptr == nullptr) {
326 return ERR(NULL_POINTER);
327 }
328
329 art::Thread* self = art::Thread::Current();
330 art::MutexLock mu(self, allow_disallow_lock_);
331 Wait(self);
332
333 art::JNIEnvExt* jni_env = self->GetJniEnv();
334
335 constexpr size_t kDefaultSize = 10;
336 size_t initial_object_size;
337 size_t initial_tag_size;
338 if (tag_count == 0) {
339 initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
340 initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
341 } else {
342 initial_object_size = initial_tag_size = kDefaultSize;
343 }
344 JvmtiAllocator<void> allocator(jvmti_env);
345 ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator, initial_object_size);
346 ReleasableContainer<jlong, JvmtiAllocator<jlong>> selected_tags(allocator, initial_tag_size);
347
348 size_t count = 0;
349 for (auto& pair : tagged_objects_) {
350 bool select;
351 if (tag_count > 0) {
352 select = false;
353 for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
354 if (tags[i] == pair.second) {
355 select = true;
356 break;
357 }
358 }
359 } else {
360 select = true;
361 }
362
363 if (select) {
364 art::mirror::Object* obj = pair.first.Read<art::kWithReadBarrier>();
365 if (obj != nullptr) {
366 count++;
367 if (object_result_ptr != nullptr) {
368 selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
369 }
370 if (tag_result_ptr != nullptr) {
371 selected_tags.Pushback(pair.second);
372 }
373 }
374 }
375 }
376
377 if (object_result_ptr != nullptr) {
378 *object_result_ptr = selected_objects.Release();
379 }
380 if (tag_result_ptr != nullptr) {
381 *tag_result_ptr = selected_tags.Release();
382 }
383 *count_ptr = static_cast<jint>(count);
384 return ERR(NONE);
385}
386
Andreas Gampecc13b222016-10-10 19:09:09 -0700387} // namespace openjdkjvmti