Store ImtIndex in ArtMethod.
This avoids recalculation and reduces pressure on the thread local cache.
This halves the time we spend hashing from 2% to 1% (maps on device).
Test: ./art/test.py -b --host --64
Change-Id: I2407bd9c222de4ddc6eea938908a1ac6d7abc35b
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index f254116..c240017 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -31,6 +31,7 @@
#include "dex/invoke_type.h"
#include "dex/primitive.h"
#include "gc_root-inl.h"
+#include "imtable-inl.h"
#include "intrinsics_enum.h"
#include "jit/profiling_info.h"
#include "mirror/class-inl.h"
@@ -421,6 +422,31 @@
return CodeItemDebugInfoAccessor(*GetDexFile(), GetCodeItem(), GetDexMethodIndex());
}
+inline void ArtMethod::SetCounter(int16_t hotness_count) {
+ DCHECK(!IsAbstract()) << PrettyMethod();
+ hotness_count_ = hotness_count;
+}
+
+inline uint16_t ArtMethod::GetCounter() {
+ DCHECK(!IsAbstract()) << PrettyMethod();
+ return hotness_count_;
+}
+
+inline uint32_t ArtMethod::GetImtIndex() {
+ if (LIKELY(IsAbstract() && imt_index_ != 0)) {
+ uint16_t imt_index = ~imt_index_;
+ DCHECK_EQ(imt_index, ImTable::GetImtIndex(this)) << PrettyMethod();
+ return imt_index;
+ } else {
+ return ImTable::GetImtIndex(this);
+ }
+}
+
+inline void ArtMethod::CalculateAndSetImtIndex() {
+ DCHECK(IsAbstract()) << PrettyMethod();
+ imt_index_ = ~ImTable::GetImtIndex(this);
+}
+
} // namespace art
#endif // ART_RUNTIME_ART_METHOD_INL_H_