ART: Switch tagging table to a map
Performance is critical. A map involves overhead for moving GC,
but has much faster lookup for the common case.
Make test 905 robust against unstable ordering.
Bug: 31385027
Test: m test-art-host
Test: m ART_USE_READ_BARRIER=true test-art-host
Change-Id: Ica3ff603fc78168759fccfe79c97860279ce9036
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index ac8d5e1..7292946 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -312,10 +312,7 @@
art::ScopedObjectAccess soa(jni_env);
art::ObjPtr<art::mirror::Object> obj = soa.Decode<art::mirror::Object>(object);
- gObjectTagTable.Remove(obj.Ptr(), /* tag* */ nullptr);
- if (tag != 0) {
- gObjectTagTable.Add(obj.Ptr(), tag);
- }
+ gObjectTagTable.Set(obj.Ptr(), tag);
return ERR(NONE);
}
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index 29d4830..9ea14a2 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -31,6 +31,8 @@
#include "object_tagging.h"
+#include <limits>
+
#include "art_jvmti.h"
#include "base/logging.h"
#include "events-inl.h"
@@ -44,21 +46,24 @@
namespace openjdkjvmti {
-void ObjectTagTable::Add(art::mirror::Object* obj, jlong tag) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
+void ObjectTagTable::UpdateTable() {
+ auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
+ art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return original_root.Read<art::kWithReadBarrier>();
+ };
- if (first_free_ == tagged_objects_.size()) {
- tagged_objects_.push_back(Entry(art::GcRoot<art::mirror::Object>(obj), tag));
- first_free_++;
- } else {
- DCHECK_LT(first_free_, tagged_objects_.size());
- DCHECK(tagged_objects_[first_free_].first.IsNull());
- tagged_objects_[first_free_] = Entry(art::GcRoot<art::mirror::Object>(obj), tag);
- // TODO: scan for free elements.
- first_free_ = tagged_objects_.size();
- }
+ UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
+}
+
+bool ObjectTagTable::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result) {
+ UpdateTable();
+ return GetTagLocked(self, obj, result);
+}
+
+void ObjectTagTable::Add(art::mirror::Object* obj, jlong tag) {
+ // Same as Set(), as we don't have duplicates in an unordered_map.
+ Set(obj, tag);
}
bool ObjectTagTable::Remove(art::mirror::Object* obj, jlong* tag) {
@@ -66,24 +71,28 @@
art::MutexLock mu(self, allow_disallow_lock_);
Wait(self);
- for (auto it = tagged_objects_.begin(); it != tagged_objects_.end(); ++it) {
- if (it->first.Read(nullptr) == obj) {
- if (tag != nullptr) {
- *tag = it->second;
- }
- it->first = art::GcRoot<art::mirror::Object>(nullptr);
+ return RemoveLocked(self, obj, tag);
+}
- size_t index = it - tagged_objects_.begin();
- if (index < first_free_) {
- first_free_ = index;
- }
-
- // TODO: compaction.
-
- return true;
+bool ObjectTagTable::RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ if (tag != nullptr) {
+ *tag = it->second;
}
+ tagged_objects_.erase(it);
+ return true;
}
+ if (art::kUseReadBarrier && self->GetIsGcMarking()) {
+ // Update the table.
+ UpdateTable();
+
+ // And try again.
+ return RemoveLocked(self, obj, tag);
+ }
+
+ // Not in here.
return false;
}
@@ -92,25 +101,27 @@
art::MutexLock mu(self, allow_disallow_lock_);
Wait(self);
- for (auto& pair : tagged_objects_) {
- if (pair.first.Read(nullptr) == obj) {
- pair.second = new_tag;
- return true;
- }
+ return SetLocked(self, obj, new_tag);
+}
+
+bool ObjectTagTable::SetLocked(art::Thread* self, art::mirror::Object* obj, jlong new_tag) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ it->second = new_tag;
+ return true;
}
- // TODO refactor with Add.
- if (first_free_ == tagged_objects_.size()) {
- tagged_objects_.push_back(Entry(art::GcRoot<art::mirror::Object>(obj), new_tag));
- first_free_++;
- } else {
- DCHECK_LT(first_free_, tagged_objects_.size());
- DCHECK(tagged_objects_[first_free_].first.IsNull());
- tagged_objects_[first_free_] = Entry(art::GcRoot<art::mirror::Object>(obj), new_tag);
- // TODO: scan for free elements.
- first_free_ = tagged_objects_.size();
+ if (art::kUseReadBarrier && self->GetIsGcMarking()) {
+ // Update the table.
+ UpdateTable();
+
+ // And try again.
+ return SetLocked(self, obj, new_tag);
}
+ // New element.
+ auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
+ DCHECK(insert_it.second);
return false;
}
@@ -127,28 +138,53 @@
art::Thread* self = art::Thread::Current();
art::MutexLock mu(self, allow_disallow_lock_);
- for (auto it = tagged_objects_.begin(); it != tagged_objects_.end(); ++it) {
- if (!it->first.IsNull()) {
- art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
- art::mirror::Object* target_obj = visitor->IsMarked(original_obj);
- if (original_obj != target_obj) {
- it->first = art::GcRoot<art::mirror::Object>(target_obj);
+ auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
+ art::mirror::Object* original_obj) {
+ return visitor->IsMarked(original_obj);
+ };
- if (kHandleNull && target_obj == nullptr) {
- HandleNullSweep(it->second);
- }
- }
- } else {
- size_t index = it - tagged_objects_.begin();
- if (index < first_free_) {
- first_free_ = index;
- }
- }
- }
+ UpdateTableWith<decltype(IsMarkedUpdater),
+ kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
}
void ObjectTagTable::HandleNullSweep(jlong tag) {
event_handler_->DispatchEvent(nullptr, JVMTI_EVENT_OBJECT_FREE, tag);
}
+template <typename T, ObjectTagTable::TableUpdateNullTarget kTargetNull>
+ALWAYS_INLINE inline void ObjectTagTable::UpdateTableWith(T& updater) {
+ // We optimistically hope that elements will still be well-distributed when re-inserting them.
+ // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
+ // vector and two passes.
+ float original_max_load_factor = tagged_objects_.max_load_factor();
+ tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
+ // For checking that a max load-factor actually does what we expect.
+ size_t original_bucket_count = tagged_objects_.bucket_count();
+
+ for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
+ DCHECK(!it->first.IsNull());
+ art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
+ art::mirror::Object* target_obj = updater(it->first, original_obj);
+ if (original_obj != target_obj) {
+ if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
+ // Ignore null target, don't do anything.
+ } else {
+ jlong tag = it->second;
+ it = tagged_objects_.erase(it);
+ if (target_obj != nullptr) {
+ tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
+ DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
+ } else if (kTargetNull == kCallHandleNull) {
+ HandleNullSweep(tag);
+ }
+ continue; // Iterator was implicitly updated by erase.
+ }
+ }
+ it++;
+ }
+
+ tagged_objects_.max_load_factor(original_max_load_factor);
+ // TODO: consider rehash here.
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index b399e65..90c40f6 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -17,9 +17,12 @@
#ifndef ART_RUNTIME_OPENJDKJVMTI_OBJECT_TAGGING_H_
#define ART_RUNTIME_OPENJDKJVMTI_OBJECT_TAGGING_H_
+#include <unordered_map>
+
#include "base/mutex.h"
#include "gc/system_weak.h"
#include "gc_root-inl.h"
+#include "globals.h"
#include "mirror/object.h"
#include "thread-inl.h"
@@ -53,14 +56,7 @@
art::MutexLock mu(self, allow_disallow_lock_);
Wait(self);
- for (const auto& pair : tagged_objects_) {
- if (pair.first.Read(nullptr) == obj) {
- *result = pair.second;
- return true;
- }
- }
-
- return false;
+ return GetTagLocked(self, obj, result);
}
void Sweep(art::IsMarkedVisitor* visitor)
@@ -68,16 +64,80 @@
REQUIRES(!allow_disallow_lock_);
private:
- using Entry = std::pair<art::GcRoot<art::mirror::Object>, jlong>;
+ bool SetLocked(art::Thread* self, art::mirror::Object* obj, jlong tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, jlong* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ *result = it->second;
+ return true;
+ }
+
+ if (art::kUseReadBarrier &&
+ self != nullptr &&
+ self->GetIsGcMarking()) {
+ return GetTagSlowPath(self, obj, result);
+ }
+
+ return false;
+ }
+
+ // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
+ // are asked to retrieve with a to-pointer.
+ bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ void UpdateTable()
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
template <bool kHandleNull>
void SweepImpl(art::IsMarkedVisitor* visitor)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
void HandleNullSweep(jlong tag);
- std::vector<Entry> tagged_objects_ GUARDED_BY(allow_disallow_lock_);
- size_t first_free_ = 0;
+ enum TableUpdateNullTarget {
+ kIgnoreNull,
+ kRemoveNull,
+ kCallHandleNull
+ };
+
+ template <typename T, TableUpdateNullTarget kTargetNull>
+ void UpdateTableWith(T& updater)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ struct HashGcRoot {
+ size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
+ }
+ };
+
+ struct EqGcRoot {
+ bool operator()(const art::GcRoot<art::mirror::Object>& r1,
+ const art::GcRoot<art::mirror::Object>& r2) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
+ }
+ };
+
+ std::unordered_map<art::GcRoot<art::mirror::Object>,
+ jlong,
+ HashGcRoot,
+ EqGcRoot> tagged_objects_
+ GUARDED_BY(allow_disallow_lock_)
+ GUARDED_BY(art::Locks::mutator_lock_);
EventHandler* event_handler_;
};
diff --git a/test/905-object-free/expected.txt b/test/905-object-free/expected.txt
index 31b7318..436ca11 100644
--- a/test/905-object-free/expected.txt
+++ b/test/905-object-free/expected.txt
@@ -1,10 +1,12 @@
-ObjectFree tag=1
+[1]
---
-ObjectFree tag=10
-ObjectFree tag=100
-ObjectFree tag=1000
+[10, 100, 1000]
---
+[]
---
+[]
---
+[]
---
+[]
---
diff --git a/test/905-object-free/src/Main.java b/test/905-object-free/src/Main.java
index 7b52e29..16dec5d 100644
--- a/test/905-object-free/src/Main.java
+++ b/test/905-object-free/src/Main.java
@@ -15,6 +15,7 @@
*/
import java.util.ArrayList;
+import java.util.Arrays;
public class Main {
public static void main(String[] args) throws Exception {
@@ -42,6 +43,7 @@
Runtime.getRuntime().gc();
+ getAndPrintTags();
System.out.println("---");
// Note: the reporting will not depend on the heap layout (which could be unstable). Walking
@@ -53,10 +55,12 @@
Runtime.getRuntime().gc();
+ getAndPrintTags();
System.out.println("---");
Runtime.getRuntime().gc();
+ getAndPrintTags();
System.out.println("---");
}
@@ -66,7 +70,14 @@
setTag(obj, tag);
}
+ private static void getAndPrintTags() {
+ long[] freedTags = getCollectedTags();
+ Arrays.sort(freedTags);
+ System.out.println(Arrays.toString(freedTags));
+ }
+
private static native void setupObjectFreeCallback();
private static native void enableFreeTracking(boolean enable);
private static native void setTag(Object o, long tag);
+ private static native long[] getCollectedTags();
}
diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc
index 5905d48..b41a914 100644
--- a/test/905-object-free/tracking_free.cc
+++ b/test/905-object-free/tracking_free.cc
@@ -32,8 +32,10 @@
namespace art {
namespace Test905ObjectFree {
+static std::vector<jlong> collected_tags;
+
static void JNICALL ObjectFree(jvmtiEnv* ti_env ATTRIBUTE_UNUSED, jlong tag) {
- printf("ObjectFree tag=%zu\n", static_cast<size_t>(tag));
+ collected_tags.push_back(tag);
}
extern "C" JNIEXPORT void JNICALL Java_Main_setupObjectFreeCallback(
@@ -64,6 +66,19 @@
}
}
+extern "C" JNIEXPORT jlongArray JNICALL Java_Main_getCollectedTags(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED) {
+ jlongArray ret = env->NewLongArray(collected_tags.size());
+ if (ret == nullptr) {
+ return ret;
+ }
+
+ env->SetLongArrayRegion(ret, 0, collected_tags.size(), collected_tags.data());
+ collected_tags.clear();
+
+ return ret;
+}
+
// Don't do anything
jint OnLoad(JavaVM* vm,
char* options ATTRIBUTE_UNUSED,