blob: be6edefae21985352c5fefc6a78518f31e5247ba [file] [log] [blame]
Andreas Gampef0140212017-03-03 13:28:58 -08001/* Copyright (C) 2017 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
33#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
34
35#include <unordered_map>
36
37#include "base/macros.h"
38#include "base/mutex.h"
39#include "gc/system_weak.h"
40#include "gc_root-inl.h"
41#include "globals.h"
42#include "jvmti.h"
43#include "mirror/object.h"
44#include "thread-inl.h"
45
46namespace openjdkjvmti {
47
48class EventHandler;
49
50// A system-weak container mapping objects to elements of the template type. This corresponds
51// to a weak hash map. For historical reasons the stored value is called "tag."
52template <typename T>
53class JvmtiWeakTable : public art::gc::SystemWeakHolder {
54 public:
55 JvmtiWeakTable()
Mathieu Chartierf169e272017-03-28 12:59:38 -070056 : art::gc::SystemWeakHolder(art::kTaggingLockLevel),
Andreas Gampef0140212017-03-03 13:28:58 -080057 update_since_last_sweep_(false) {
58 }
59
60 // Remove the mapping for the given object, returning whether such a mapping existed (and the old
61 // value).
Andreas Gampe8ea4eec2017-05-30 13:53:03 -070062 ALWAYS_INLINE bool Remove(art::mirror::Object* obj, /* out */ T* tag)
Andreas Gampef0140212017-03-03 13:28:58 -080063 REQUIRES_SHARED(art::Locks::mutator_lock_)
64 REQUIRES(!allow_disallow_lock_);
Andreas Gampe8ea4eec2017-05-30 13:53:03 -070065 ALWAYS_INLINE bool RemoveLocked(art::mirror::Object* obj, /* out */ T* tag)
Andreas Gampef0140212017-03-03 13:28:58 -080066 REQUIRES_SHARED(art::Locks::mutator_lock_)
67 REQUIRES(allow_disallow_lock_);
68
69 // Set the mapping for the given object. Returns true if this overwrites an already existing
70 // mapping.
Andreas Gampe8ea4eec2017-05-30 13:53:03 -070071 ALWAYS_INLINE virtual bool Set(art::mirror::Object* obj, T tag)
Andreas Gampef0140212017-03-03 13:28:58 -080072 REQUIRES_SHARED(art::Locks::mutator_lock_)
73 REQUIRES(!allow_disallow_lock_);
Andreas Gampe8ea4eec2017-05-30 13:53:03 -070074 ALWAYS_INLINE virtual bool SetLocked(art::mirror::Object* obj, T tag)
Andreas Gampef0140212017-03-03 13:28:58 -080075 REQUIRES_SHARED(art::Locks::mutator_lock_)
76 REQUIRES(allow_disallow_lock_);
77
78 // Return the value associated with the given object. Returns true if the mapping exists, false
79 // otherwise.
80 bool GetTag(art::mirror::Object* obj, /* out */ T* result)
81 REQUIRES_SHARED(art::Locks::mutator_lock_)
82 REQUIRES(!allow_disallow_lock_) {
83 art::Thread* self = art::Thread::Current();
84 art::MutexLock mu(self, allow_disallow_lock_);
85 Wait(self);
86
87 return GetTagLocked(self, obj, result);
88 }
89 bool GetTagLocked(art::mirror::Object* obj, /* out */ T* result)
90 REQUIRES_SHARED(art::Locks::mutator_lock_)
91 REQUIRES(allow_disallow_lock_) {
92 art::Thread* self = art::Thread::Current();
93 allow_disallow_lock_.AssertHeld(self);
94 Wait(self);
95
96 return GetTagLocked(self, obj, result);
97 }
98
99 // Sweep the container. DO NOT CALL MANUALLY.
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700100 ALWAYS_INLINE void Sweep(art::IsMarkedVisitor* visitor)
Andreas Gampef0140212017-03-03 13:28:58 -0800101 REQUIRES_SHARED(art::Locks::mutator_lock_)
102 REQUIRES(!allow_disallow_lock_);
103
104 // Return all objects that have a value mapping in tags.
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700105 ALWAYS_INLINE
Andreas Gampef0140212017-03-03 13:28:58 -0800106 jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
107 jint tag_count,
108 const T* tags,
109 /* out */ jint* count_ptr,
110 /* out */ jobject** object_result_ptr,
111 /* out */ T** tag_result_ptr)
112 REQUIRES_SHARED(art::Locks::mutator_lock_)
113 REQUIRES(!allow_disallow_lock_);
114
115 // Locking functions, to allow coarse-grained locking and amortization.
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700116 ALWAYS_INLINE void Lock() ACQUIRE(allow_disallow_lock_);
117 ALWAYS_INLINE void Unlock() RELEASE(allow_disallow_lock_);
118 ALWAYS_INLINE void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
Andreas Gampef0140212017-03-03 13:28:58 -0800119
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700120 ALWAYS_INLINE art::mirror::Object* Find(T tag)
Andreas Gamped73aba42017-05-03 21:40:26 -0700121 REQUIRES_SHARED(art::Locks::mutator_lock_)
122 REQUIRES(!allow_disallow_lock_);
123
Andreas Gampef0140212017-03-03 13:28:58 -0800124 protected:
125 // Should HandleNullSweep be called when Sweep detects the release of an object?
126 virtual bool DoesHandleNullOnSweep() {
127 return false;
128 }
129 // If DoesHandleNullOnSweep returns true, this function will be called.
130 virtual void HandleNullSweep(T tag ATTRIBUTE_UNUSED) {}
131
132 private:
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700133 ALWAYS_INLINE
Andreas Gampef0140212017-03-03 13:28:58 -0800134 bool SetLocked(art::Thread* self, art::mirror::Object* obj, T tag)
135 REQUIRES_SHARED(art::Locks::mutator_lock_)
136 REQUIRES(allow_disallow_lock_);
137
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700138 ALWAYS_INLINE
Andreas Gampef0140212017-03-03 13:28:58 -0800139 bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* tag)
140 REQUIRES_SHARED(art::Locks::mutator_lock_)
141 REQUIRES(allow_disallow_lock_);
142
143 bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
144 REQUIRES_SHARED(art::Locks::mutator_lock_)
145 REQUIRES(allow_disallow_lock_) {
146 auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
147 if (it != tagged_objects_.end()) {
148 *result = it->second;
149 return true;
150 }
151
152 // Performance optimization: To avoid multiple table updates, ensure that during GC we
153 // only update once. See the comment on the implementation of GetTagSlowPath.
154 if (art::kUseReadBarrier &&
155 self != nullptr &&
156 self->GetIsGcMarking() &&
157 !update_since_last_sweep_) {
158 return GetTagSlowPath(self, obj, result);
159 }
160
161 return false;
162 }
163
164 // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
165 // are asked to retrieve with a to-pointer.
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700166 ALWAYS_INLINE
Andreas Gampef0140212017-03-03 13:28:58 -0800167 bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
168 REQUIRES_SHARED(art::Locks::mutator_lock_)
169 REQUIRES(allow_disallow_lock_);
170
171 // Update the table by doing read barriers on each element, ensuring that to-space pointers
172 // are stored.
Andreas Gampe8ea4eec2017-05-30 13:53:03 -0700173 ALWAYS_INLINE
Andreas Gampef0140212017-03-03 13:28:58 -0800174 void UpdateTableWithReadBarrier()
175 REQUIRES_SHARED(art::Locks::mutator_lock_)
176 REQUIRES(allow_disallow_lock_);
177
178 template <bool kHandleNull>
179 void SweepImpl(art::IsMarkedVisitor* visitor)
180 REQUIRES_SHARED(art::Locks::mutator_lock_)
181 REQUIRES(!allow_disallow_lock_);
182
183 enum TableUpdateNullTarget {
184 kIgnoreNull,
185 kRemoveNull,
186 kCallHandleNull
187 };
188
189 template <typename Updater, TableUpdateNullTarget kTargetNull>
190 void UpdateTableWith(Updater& updater)
191 REQUIRES_SHARED(art::Locks::mutator_lock_)
192 REQUIRES(allow_disallow_lock_);
193
194 template <typename Storage, class Allocator = std::allocator<T>>
195 struct ReleasableContainer;
196
197 struct HashGcRoot {
198 size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
199 REQUIRES_SHARED(art::Locks::mutator_lock_) {
200 return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
201 }
202 };
203
204 struct EqGcRoot {
205 bool operator()(const art::GcRoot<art::mirror::Object>& r1,
206 const art::GcRoot<art::mirror::Object>& r2) const
207 REQUIRES_SHARED(art::Locks::mutator_lock_) {
208 return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
209 }
210 };
211
Andreas Gampef0140212017-03-03 13:28:58 -0800212 std::unordered_map<art::GcRoot<art::mirror::Object>,
213 T,
214 HashGcRoot,
215 EqGcRoot> tagged_objects_
216 GUARDED_BY(allow_disallow_lock_)
217 GUARDED_BY(art::Locks::mutator_lock_);
218 // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
219 bool update_since_last_sweep_;
220};
221
222} // namespace openjdkjvmti
223
224#endif // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_