Merge "Add missing push/pop shadow frame to artInterpreterToCompiledCodeBridge."
diff --git a/compiler/Android.mk b/compiler/Android.mk
index b7dc9f6..a2419d5 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -23,7 +23,6 @@
dex/local_value_numbering.cc \
dex/arena_allocator.cc \
dex/arena_bit_vector.cc \
- dex/quick/arm/arm_dex_file_method_inliner.cc \
dex/quick/arm/assemble_arm.cc \
dex/quick/arm/call_arm.cc \
dex/quick/arm/fp_arm.cc \
@@ -41,7 +40,6 @@
dex/quick/mips/call_mips.cc \
dex/quick/mips/fp_mips.cc \
dex/quick/mips/int_mips.cc \
- dex/quick/mips/mips_dex_file_method_inliner.cc \
dex/quick/mips/target_mips.cc \
dex/quick/mips/utility_mips.cc \
dex/quick/mir_to_lir.cc \
@@ -52,7 +50,6 @@
dex/quick/x86/int_x86.cc \
dex/quick/x86/target_x86.cc \
dex/quick/x86/utility_x86.cc \
- dex/quick/x86/x86_dex_file_method_inliner.cc \
dex/portable/mir_to_gbc.cc \
dex/dex_to_dex_compiler.cc \
dex/mir_dataflow.cc \
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 56facfd..d73f148 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -374,6 +374,7 @@
kRegUseA,
kRegUseC,
kRegUseD,
+ kRegUseB,
kRegUseFPCSList0,
kRegUseFPCSList2,
kRegUseList0,
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 197bba5..6aabb2a 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -63,21 +63,21 @@
LLVMInfo::~LLVMInfo() {
}
-QuickCompilerContext::QuickCompilerContext(CompilerDriver& compiler)
- : inliner_map_(new DexFileToMethodInlinerMap(&compiler)) {
+QuickCompilerContext::QuickCompilerContext()
+ : inliner_map_(new DexFileToMethodInlinerMap()) {
}
QuickCompilerContext::~QuickCompilerContext() {
}
-extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& compiler) {
- CHECK(compiler.GetCompilerContext() == NULL);
- compiler.SetCompilerContext(new QuickCompilerContext(compiler));
+extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver) {
+ CHECK(driver.GetCompilerContext() == NULL);
+ driver.SetCompilerContext(new QuickCompilerContext());
}
-extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& compiler) {
- delete reinterpret_cast<QuickCompilerContext*>(compiler.GetCompilerContext());
- compiler.SetCompilerContext(NULL);
+extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& driver) {
+ delete reinterpret_cast<QuickCompilerContext*>(driver.GetCompilerContext());
+ driver.SetCompilerContext(NULL);
}
/* Default optimizer/debug setting for the compiler. */
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 4a863f5..bcb8bf0 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -115,7 +115,7 @@
class QuickCompilerContext {
public:
- explicit QuickCompilerContext(CompilerDriver& compiler);
+ QuickCompilerContext();
~QuickCompilerContext();
DexFileToMethodInlinerMap* GetInlinerMap() {
diff --git a/compiler/dex/quick/arm/arm_dex_file_method_inliner.cc b/compiler/dex/quick/arm/arm_dex_file_method_inliner.cc
deleted file mode 100644
index 59f7202..0000000
--- a/compiler/dex/quick/arm/arm_dex_file_method_inliner.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "dex/compiler_enums.h"
-
-#include "arm_dex_file_method_inliner.h"
-
-namespace art {
-
-const DexFileMethodInliner::IntrinsicDef ArmDexFileMethodInliner::kIntrinsicMethods[] = {
-#define INTRINSIC(c, n, p, o, d) \
- { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, d } }
-
- INTRINSIC(JavaLangDouble, DoubleToRawLongBits, D_J, kIntrinsicDoubleCvt, 0),
- INTRINSIC(JavaLangDouble, LongBitsToDouble, J_D, kIntrinsicDoubleCvt, 0),
- INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0),
- INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, 0),
-
- INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, kWord),
- INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, kLong),
- INTRINSIC(JavaLangShort, ReverseBytes, S_S, kIntrinsicReverseBytes, kSignedHalf),
-
- INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0),
- INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0),
- INTRINSIC(JavaLangMath, Abs, J_J, kIntrinsicAbsLong, 0),
- INTRINSIC(JavaLangStrictMath, Abs, J_J, kIntrinsicAbsLong, 0),
- INTRINSIC(JavaLangMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
- INTRINSIC(JavaLangStrictMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
- INTRINSIC(JavaLangMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
- INTRINSIC(JavaLangStrictMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
- INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
- INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
-
- INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
- INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
- INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
- INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone),
- INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0),
- INTRINSIC(JavaLangString, Length, _I, kIntrinsicIsEmptyOrLength, kIntrinsicFlagLength),
-
- INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0),
-
- INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte),
- INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, kWord),
- INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, kLong),
- INTRINSIC(LibcoreIoMemory, PeekShortNative, J_S, kIntrinsicPeek, kSignedHalf),
- INTRINSIC(LibcoreIoMemory, PokeByte, JB_V, kIntrinsicPoke, kSignedByte),
- INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, kWord),
- INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, kLong),
- INTRINSIC(LibcoreIoMemory, PokeShortNative, JS_V, kIntrinsicPoke, kSignedHalf),
-
- INTRINSIC(SunMiscUnsafe, CompareAndSwapInt, ObjectJII_Z, kIntrinsicCas,
- kIntrinsicFlagNone),
- INTRINSIC(SunMiscUnsafe, CompareAndSwapLong, ObjectJJJ_Z, kIntrinsicCas,
- kIntrinsicFlagIsLong),
- INTRINSIC(SunMiscUnsafe, CompareAndSwapObject, ObjectJObjectObject_Z, kIntrinsicCas,
- kIntrinsicFlagIsObject),
-
-#define UNSAFE_GET_PUT(type, code, type_flags) \
- INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- type_flags & ~kIntrinsicFlagIsObject), \
- INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
- INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags), \
- INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsVolatile), \
- INTRINSIC(SunMiscUnsafe, PutOrdered ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsOrdered)
-
- UNSAFE_GET_PUT(Int, I, kIntrinsicFlagNone),
- UNSAFE_GET_PUT(Long, J, kIntrinsicFlagIsLong),
- UNSAFE_GET_PUT(Object, Object, kIntrinsicFlagIsObject),
-#undef UNSAFE_GET_PUT
-
-#undef INTRINSIC
-};
-
-ArmDexFileMethodInliner::ArmDexFileMethodInliner() {
-}
-
-ArmDexFileMethodInliner::~ArmDexFileMethodInliner() {
-}
-
-void ArmDexFileMethodInliner::FindIntrinsics(const DexFile* dex_file) {
- IndexCache cache;
- DoFindIntrinsics(dex_file, &cache, kIntrinsicMethods, arraysize(kIntrinsicMethods));
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/arm/arm_dex_file_method_inliner.h b/compiler/dex/quick/arm/arm_dex_file_method_inliner.h
deleted file mode 100644
index 3428391..0000000
--- a/compiler/dex/quick/arm/arm_dex_file_method_inliner.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_ARM_ARM_DEX_FILE_METHOD_INLINER_H_
-#define ART_COMPILER_DEX_QUICK_ARM_ARM_DEX_FILE_METHOD_INLINER_H_
-
-#include "dex/quick/dex_file_method_inliner.h"
-
-namespace art {
-
-class ArmDexFileMethodInliner : public DexFileMethodInliner {
- public:
- ArmDexFileMethodInliner();
- ~ArmDexFileMethodInliner();
-
- void FindIntrinsics(const DexFile* dex_file);
-
- private:
- static const IntrinsicDef kIntrinsicMethods[];
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_ARM_ARM_DEX_FILE_METHOD_INLINER_H_
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index a682d58..5d78ed5 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -747,7 +747,8 @@
}
MethodReference method_ref(cu_->dex_file, cu_->method_idx);
const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
- verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
+ verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[0]);
+ DCHECK_EQ(gc_map_raw->size(), dex_gc_map.RawSize());
// Compute native offset to references size.
NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
mapping_table.PcToDexSize(),
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 6c0328e..fb471ab 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -157,8 +157,74 @@
kClassCacheJavaLangObject } },
};
-DexFileMethodInliner::~DexFileMethodInliner() {
-}
+const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods[] = {
+#define INTRINSIC(c, n, p, o, d) \
+ { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, d } }
+
+ INTRINSIC(JavaLangDouble, DoubleToRawLongBits, D_J, kIntrinsicDoubleCvt, 0),
+ INTRINSIC(JavaLangDouble, LongBitsToDouble, J_D, kIntrinsicDoubleCvt, 0),
+ INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0),
+ INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, 0),
+
+ INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, kWord),
+ INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, kLong),
+ INTRINSIC(JavaLangShort, ReverseBytes, S_S, kIntrinsicReverseBytes, kSignedHalf),
+
+ INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0),
+ INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0),
+ INTRINSIC(JavaLangMath, Abs, J_J, kIntrinsicAbsLong, 0),
+ INTRINSIC(JavaLangStrictMath, Abs, J_J, kIntrinsicAbsLong, 0),
+ INTRINSIC(JavaLangMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
+ INTRINSIC(JavaLangStrictMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
+ INTRINSIC(JavaLangMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
+ INTRINSIC(JavaLangStrictMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
+ INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
+ INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
+
+ INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
+ INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
+ INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
+ INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone),
+ INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0),
+ INTRINSIC(JavaLangString, Length, _I, kIntrinsicIsEmptyOrLength, kIntrinsicFlagLength),
+
+ INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0),
+
+ INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte),
+ INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, kWord),
+ INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, kLong),
+ INTRINSIC(LibcoreIoMemory, PeekShortNative, J_S, kIntrinsicPeek, kSignedHalf),
+ INTRINSIC(LibcoreIoMemory, PokeByte, JB_V, kIntrinsicPoke, kSignedByte),
+ INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, kWord),
+ INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, kLong),
+ INTRINSIC(LibcoreIoMemory, PokeShortNative, JS_V, kIntrinsicPoke, kSignedHalf),
+
+ INTRINSIC(SunMiscUnsafe, CompareAndSwapInt, ObjectJII_Z, kIntrinsicCas,
+ kIntrinsicFlagNone),
+ INTRINSIC(SunMiscUnsafe, CompareAndSwapLong, ObjectJJJ_Z, kIntrinsicCas,
+ kIntrinsicFlagIsLong),
+ INTRINSIC(SunMiscUnsafe, CompareAndSwapObject, ObjectJObjectObject_Z, kIntrinsicCas,
+ kIntrinsicFlagIsObject),
+
+#define UNSAFE_GET_PUT(type, code, type_flags) \
+ INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
+ type_flags & ~kIntrinsicFlagIsObject), \
+ INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
+ (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
+ INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
+ type_flags), \
+ INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
+ type_flags | kIntrinsicFlagIsVolatile), \
+ INTRINSIC(SunMiscUnsafe, PutOrdered ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
+ type_flags | kIntrinsicFlagIsOrdered)
+
+ UNSAFE_GET_PUT(Int, I, kIntrinsicFlagNone),
+ UNSAFE_GET_PUT(Long, J, kIntrinsicFlagIsLong),
+ UNSAFE_GET_PUT(Object, Object, kIntrinsicFlagIsObject),
+#undef UNSAFE_GET_PUT
+
+#undef INTRINSIC
+};
DexFileMethodInliner::DexFileMethodInliner()
: dex_file_(NULL) {
@@ -170,6 +236,9 @@
COMPILE_ASSERT(arraysize(kProtoCacheDefs) == kProtoCacheLast, bad_arraysize_kProtoCacheNames);
}
+DexFileMethodInliner::~DexFileMethodInliner() {
+}
+
bool DexFileMethodInliner::IsIntrinsic(uint32_t method_index) const {
return intrinsics_.find(method_index) != intrinsics_.end();
}
@@ -333,15 +402,15 @@
std::fill_n(proto_indexes, arraysize(proto_indexes), kIndexUnresolved);
}
-void DexFileMethodInliner::DoFindIntrinsics(const DexFile* dex_file, IndexCache* cache,
- const IntrinsicDef* defs, uint32_t def_count) {
+void DexFileMethodInliner::FindIntrinsics(const DexFile* dex_file) {
DCHECK(dex_file != nullptr);
DCHECK(dex_file_ == nullptr);
- for (uint32_t i = 0u; i != def_count; ++i) {
- uint32_t method_id = FindMethodIndex(dex_file, cache, defs[i].method_def);
+ IndexCache cache;
+ for (const IntrinsicDef& def : kIntrinsicMethods) {
+ uint32_t method_id = FindMethodIndex(dex_file, &cache, def.method_def);
if (method_id != kIndexNotFound) {
DCHECK(intrinsics_.find(method_id) == intrinsics_.end());
- intrinsics_[method_id] = defs[i].intrinsic;
+ intrinsics_[method_id] = def.intrinsic;
}
}
dex_file_ = dex_file;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index bc00513..948f4bb 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -89,12 +89,8 @@
*/
class DexFileMethodInliner {
public:
- virtual ~DexFileMethodInliner();
-
- /**
- * Find all known intrinsic methods in the dex_file and cache their indices.
- */
- virtual void FindIntrinsics(const DexFile* dex_file) = 0;
+ DexFileMethodInliner();
+ ~DexFileMethodInliner();
/**
* Check whether a particular method index corresponds to an intrinsic function.
@@ -103,15 +99,10 @@
/**
* Generate code for an intrinsic function invocation.
- *
- * TODO: This should be target-specific. For the time being,
- * it's shared since it dispatches everything to backend.
*/
bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) const;
- protected:
- DexFileMethodInliner();
-
+ private:
/**
* To avoid multiple lookups of a class by its descriptor, we cache its
* type index in the IndexCache. These are the indexes into the IndexCache
@@ -290,6 +281,7 @@
static const char* kClassCacheNames[];
static const char* kNameCacheNames[];
static const ProtoDef kProtoCacheDefs[];
+ static const IntrinsicDef kIntrinsicMethods[];
static const uint32_t kIndexNotFound = static_cast<uint32_t>(-1);
static const uint32_t kIndexUnresolved = static_cast<uint32_t>(-2);
@@ -303,14 +295,22 @@
static uint32_t FindMethodIndex(const DexFile* dex_file, IndexCache* cache,
const MethodDef& method_def);
- void DoFindIntrinsics(const DexFile* dex_file, IndexCache* cache,
- const IntrinsicDef* defs, uint32_t def_count);
+ /**
+ * Find all known intrinsic methods in the dex_file and cache their indices.
+ *
+ * Only DexFileToMethodInlinerMap may call this function to initialize the inliner.
+ */
+ void FindIntrinsics(const DexFile* dex_file);
+
+ friend class DexFileToMethodInlinerMap;
/*
* Maps method indexes (for the particular DexFile) to Intrinsic defintions.
*/
std::map<uint32_t, Intrinsic> intrinsics_;
const DexFile* dex_file_;
+
+ DISALLOW_COPY_AND_ASSIGN(DexFileMethodInliner);
};
} // namespace art
diff --git a/compiler/dex/quick/dex_file_to_method_inliner_map.cc b/compiler/dex/quick/dex_file_to_method_inliner_map.cc
index 56a42bc..0107ed3 100644
--- a/compiler/dex/quick/dex_file_to_method_inliner_map.cc
+++ b/compiler/dex/quick/dex_file_to_method_inliner_map.cc
@@ -22,17 +22,13 @@
#include "base/mutex-inl.h"
#include "base/logging.h"
#include "driver/compiler_driver.h"
-#include "dex/quick/arm/arm_dex_file_method_inliner.h"
-#include "dex/quick/mips/mips_dex_file_method_inliner.h"
-#include "dex/quick/x86/x86_dex_file_method_inliner.h"
#include "dex_file_to_method_inliner_map.h"
namespace art {
-DexFileToMethodInlinerMap::DexFileToMethodInlinerMap(const CompilerDriver* compiler)
- : compiler_(compiler),
- mutex_("inline_helper_mutex") {
+DexFileToMethodInlinerMap::DexFileToMethodInlinerMap()
+ : lock_("inline_helper_mutex") {
}
DexFileToMethodInlinerMap::~DexFileToMethodInlinerMap() {
@@ -44,31 +40,19 @@
const DexFileMethodInliner& DexFileToMethodInlinerMap::GetMethodInliner(const DexFile* dex_file) {
Thread* self = Thread::Current();
{
- ReaderMutexLock lock(self, mutex_);
+ ReaderMutexLock lock(self, lock_);
auto it = inliners_.find(dex_file);
if (it != inliners_.end()) {
return *it->second;
}
}
- WriterMutexLock lock(self, mutex_);
+ WriterMutexLock lock(self, lock_);
DexFileMethodInliner** inliner = &inliners_[dex_file]; // inserts new entry if not found
if (*inliner) {
return **inliner;
}
- switch (compiler_->GetInstructionSet()) {
- case kThumb2:
- *inliner = new ArmDexFileMethodInliner;
- break;
- case kX86:
- *inliner = new X86DexFileMethodInliner;
- break;
- case kMips:
- *inliner = new MipsDexFileMethodInliner;
- break;
- default:
- LOG(FATAL) << "Unexpected instruction set: " << compiler_->GetInstructionSet();
- }
+ *inliner = new DexFileMethodInliner();
DCHECK(*inliner != nullptr);
// TODO: per-dex file locking for the intrinsics container filling.
(*inliner)->FindIntrinsics(dex_file);
diff --git a/compiler/dex/quick/dex_file_to_method_inliner_map.h b/compiler/dex/quick/dex_file_to_method_inliner_map.h
index 77f2648..476f002 100644
--- a/compiler/dex/quick/dex_file_to_method_inliner_map.h
+++ b/compiler/dex/quick/dex_file_to_method_inliner_map.h
@@ -37,15 +37,16 @@
*/
class DexFileToMethodInlinerMap {
public:
- explicit DexFileToMethodInlinerMap(const CompilerDriver* compiler);
+ DexFileToMethodInlinerMap();
~DexFileToMethodInlinerMap();
- const DexFileMethodInliner& GetMethodInliner(const DexFile* dex_file) LOCKS_EXCLUDED(mutex_);
+ const DexFileMethodInliner& GetMethodInliner(const DexFile* dex_file) LOCKS_EXCLUDED(lock_);
private:
- const CompilerDriver* const compiler_;
- ReaderWriterMutex mutex_;
- std::map<const DexFile*, DexFileMethodInliner*> inliners_ GUARDED_BY(mutex_);
+ ReaderWriterMutex lock_;
+ std::map<const DexFile*, DexFileMethodInliner*> inliners_ GUARDED_BY(lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(DexFileToMethodInlinerMap);
};
} // namespace art
diff --git a/compiler/dex/quick/mips/mips_dex_file_method_inliner.cc b/compiler/dex/quick/mips/mips_dex_file_method_inliner.cc
deleted file mode 100644
index 05d8ac8..0000000
--- a/compiler/dex/quick/mips/mips_dex_file_method_inliner.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "dex/compiler_enums.h"
-
-#include "mips_dex_file_method_inliner.h"
-
-namespace art {
-
-const DexFileMethodInliner::IntrinsicDef MipsDexFileMethodInliner::kIntrinsicMethods[] = {
-#define INTRINSIC(c, n, p, o, d) \
- { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, d } }
-
- // INTRINSIC(JavaLangDouble, DoubleToRawLongBits, D_J, kIntrinsicDoubleCvt, 0),
- // INTRINSIC(JavaLangDouble, LongBitsToDouble, J_D, kIntrinsicDoubleCvt, 0),
- // INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0),
- // INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, 0),
-
- // INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, kWord),
- // INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, kLong),
- // INTRINSIC(JavaLangShort, ReverseBytes, S_S, kIntrinsicReverseBytes, kSignedHalf),
-
- // INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0),
- // INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0),
- // INTRINSIC(JavaLangMath, Abs, J_J, kIntrinsicAbsLong, 0),
- // INTRINSIC(JavaLangStrictMath, Abs, J_J, kIntrinsicAbsLong, 0),
- // INTRINSIC(JavaLangMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
- // INTRINSIC(JavaLangStrictMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
- // INTRINSIC(JavaLangMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
- // INTRINSIC(JavaLangStrictMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
- // INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
- // INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
-
- // INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
- // INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
- // INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
- // INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone),
- // INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0),
- // INTRINSIC(JavaLangString, Length, _I, kIntrinsicIsEmptyOrLength, kIntrinsicFlagLength),
-
- INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0),
-
- INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte),
- // INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, kWord),
- // INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, kLong),
- // INTRINSIC(LibcoreIoMemory, PeekShortNative, J_S, kIntrinsicPeek, kSignedHalf),
- INTRINSIC(LibcoreIoMemory, PokeByte, JB_V, kIntrinsicPoke, kSignedByte),
- // INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, kWord),
- // INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, kLong),
- // INTRINSIC(LibcoreIoMemory, PokeShortNative, JS_V, kIntrinsicPoke, kSignedHalf),
-
- // INTRINSIC(SunMiscUnsafe, CompareAndSwapInt, ObjectJII_Z, kIntrinsicCas,
- // kIntrinsicFlagNone),
- // INTRINSIC(SunMiscUnsafe, CompareAndSwapLong, ObjectJJJ_Z, kIntrinsicCas,
- // kIntrinsicFlagIsLong),
- // INTRINSIC(SunMiscUnsafe, CompareAndSwapObject, ObjectJObjectObject_Z, kIntrinsicCas,
- // kIntrinsicFlagIsObject),
-
-#define UNSAFE_GET_PUT(type, code, type_flags) \
- INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- type_flags & ~kIntrinsicFlagIsObject), \
- INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
- INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags), \
- INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsVolatile), \
- INTRINSIC(SunMiscUnsafe, PutOrdered ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsOrdered)
-
- // UNSAFE_GET_PUT(Int, I, kIntrinsicFlagNone),
- // UNSAFE_GET_PUT(Long, J, kIntrinsicFlagIsLong),
- // UNSAFE_GET_PUT(Object, Object, kIntrinsicFlagIsObject),
-#undef UNSAFE_GET_PUT
-
-#undef INTRINSIC
-};
-
-MipsDexFileMethodInliner::MipsDexFileMethodInliner() {
-}
-
-MipsDexFileMethodInliner::~MipsDexFileMethodInliner() {
-}
-
-void MipsDexFileMethodInliner::FindIntrinsics(const DexFile* dex_file) {
- IndexCache cache;
- DoFindIntrinsics(dex_file, &cache, kIntrinsicMethods, arraysize(kIntrinsicMethods));
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips/mips_dex_file_method_inliner.h b/compiler/dex/quick/mips/mips_dex_file_method_inliner.h
deleted file mode 100644
index 8fe7ec7..0000000
--- a/compiler/dex/quick/mips/mips_dex_file_method_inliner.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS_MIPS_DEX_FILE_METHOD_INLINER_H_
-#define ART_COMPILER_DEX_QUICK_MIPS_MIPS_DEX_FILE_METHOD_INLINER_H_
-
-#include "dex/quick/dex_file_method_inliner.h"
-
-namespace art {
-
-class MipsDexFileMethodInliner : public DexFileMethodInliner {
- public:
- MipsDexFileMethodInliner();
- ~MipsDexFileMethodInliner();
-
- void FindIntrinsics(const DexFile* dex_file);
-
- private:
- static const IntrinsicDef kIntrinsicMethods[];
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_MIPS_MIPS_DEX_FILE_METHOD_INLINER_H_
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 06cec53..2a54eb3 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -71,6 +71,7 @@
#define REG_USEA (1ULL << kRegUseA)
#define REG_USEC (1ULL << kRegUseC)
#define REG_USED (1ULL << kRegUseD)
+#define REG_USEB (1ULL << kRegUseB)
#define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0)
#define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2)
#define REG_USE_LIST0 (1ULL << kRegUseList0)
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 1bf79cc..96dc6ee 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -246,7 +246,9 @@
UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
#undef UNARY_ENCODING_MAP
- { kX86Bswap32R, kRegOpcode, IS_UNARY_OP | REG_DEF0_USE0, { 0, 0, 0x0F, 0xC8, 0, 0, 0, 0 }, "Bswap32R", "!0r" },
+ { kX86Bswap32R, kRegOpcode, IS_UNARY_OP | REG_DEF0_USE0, { 0, 0, 0x0F, 0xC8, 0, 0, 0, 0 }, "Bswap32R", "!0r" },
+ { kX86Push32R, kRegOpcode, IS_UNARY_OP | REG_USE0 | IS_STORE, { 0, 0, 0x50, 0, 0, 0, 0, 0 }, "Push32R", "!0r" },
+ { kX86Pop32R, kRegOpcode, IS_UNARY_OP | REG_DEF0 | IS_LOAD, { 0, 0, 0x58, 0, 0, 0, 0, 0 }, "Pop32R", "!0r" },
#define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \
{ kX86 ## opname ## RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \
@@ -308,6 +310,8 @@
{ kX86CmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
{ kX86LockCmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1d],!2r" },
{ kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+ { kX86LockCmpxchg8bM, kMem, IS_STORE | IS_BINARY_OP | REG_USE0 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0 }, "Lock Cmpxchg8b", "[!0r+!1d]" },
+ { kX86LockCmpxchg8bA, kArray, IS_STORE | IS_QUAD_OP | REG_USE01 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0 }, "Lock Cmpxchg8b", "[!0r+!1r<<!2d+!3d]" },
EXT_0F_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0),
EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
@@ -492,6 +496,37 @@
return 0;
}
+void X86Mir2Lir::EmitPrefix(const X86EncodingMap* entry) {
+ if (entry->skeleton.prefix1 != 0) {
+ code_buffer_.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ code_buffer_.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+}
+
+void X86Mir2Lir::EmitOpcode(const X86EncodingMap* entry) {
+ code_buffer_.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ code_buffer_.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
+ code_buffer_.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+}
+
+void X86Mir2Lir::EmitPrefixAndOpcode(const X86EncodingMap* entry) {
+ EmitPrefix(entry);
+ EmitOpcode(entry);
+}
+
static uint8_t ModrmForDisp(int base, int disp) {
// BP requires an explicit disp, so do not omit it in the 0 case
if (disp == 0 && base != rBP) {
@@ -503,7 +538,7 @@
}
}
-void X86Mir2Lir::EmitDisp(int base, int disp) {
+void X86Mir2Lir::EmitDisp(uint8_t base, int disp) {
// BP requires an explicit disp, so do not omit it in the 0 case
if (disp == 0 && base != rBP) {
return;
@@ -517,167 +552,22 @@
}
}
-void X86Mir2Lir::EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- // There's no 3-byte instruction with +rd
- DCHECK_NE(0x38, entry->skeleton.extra_opcode1);
- DCHECK_NE(0x3A, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- DCHECK(!X86_FPREG(reg));
- DCHECK_LT(reg, 8);
- code_buffer_.back() += reg;
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
- DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- if (X86_FPREG(reg)) {
- reg = reg & X86_FP_REG_MASK;
- }
- if (reg >= 4) {
- DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
- << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- }
- DCHECK_LT(reg, 8);
- uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
- code_buffer_.push_back(modrm);
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
- DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- DCHECK_LT(entry->skeleton.modrm_opcode, 8);
+void X86Mir2Lir::EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int disp) {
+ DCHECK_LT(reg_or_opcode, 8);
DCHECK_LT(base, 8);
- uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
- code_buffer_.push_back(modrm);
- EmitDisp(base, disp);
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
- DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry,
- uint8_t base, int disp, uint8_t reg) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- if (X86_FPREG(reg)) {
- reg = reg & X86_FP_REG_MASK;
- }
- if (reg >= 4) {
- DCHECK(strchr(entry->name, '8') == NULL ||
- entry->opcode == kX86Movzx8RM || entry->opcode == kX86Movsx8RM)
- << entry->name << " " << static_cast<int>(reg)
- << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- }
- DCHECK_LT(reg, 8);
- DCHECK_LT(base, 8);
- uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | base;
+ uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
code_buffer_.push_back(modrm);
if (base == rX86_SP) {
// Special SIB for SP base
code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
}
EmitDisp(base, disp);
- DCHECK_EQ(0, entry->skeleton.modrm_opcode);
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
- DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry,
- uint8_t reg, uint8_t base, int disp) {
- // Opcode will flip operands.
- EmitMemReg(entry, base, disp, reg);
-}
-
-void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
- int scale, int disp) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- if (X86_FPREG(reg)) {
- reg = reg & X86_FP_REG_MASK;
- }
- DCHECK_LT(reg, 8);
- uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | rX86_SP;
+void X86Mir2Lir::EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index,
+ int scale, int disp) {
+ DCHECK_LT(reg_or_opcode, 8);
+ uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | rX86_SP;
code_buffer_.push_back(modrm);
DCHECK_LT(scale, 4);
DCHECK_LT(index, 8);
@@ -685,124 +575,9 @@
uint8_t sib = (scale << 6) | (index << 3) | base;
code_buffer_.push_back(sib);
EmitDisp(base, disp);
- DCHECK_EQ(0, entry->skeleton.modrm_opcode);
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
- DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
-void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
- uint8_t reg) {
- // Opcode will flip operands.
- EmitRegArray(entry, reg, base, index, scale, disp);
-}
-
-void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) {
- DCHECK_NE(entry->skeleton.prefix1, 0);
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- if (X86_FPREG(reg)) {
- reg = reg & X86_FP_REG_MASK;
- }
- if (reg >= 4) {
- DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
- << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- }
- DCHECK_LT(reg, 8);
- uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
- code_buffer_.push_back(modrm);
- code_buffer_.push_back(disp & 0xFF);
- code_buffer_.push_back((disp >> 8) & 0xFF);
- code_buffer_.push_back((disp >> 16) & 0xFF);
- code_buffer_.push_back((disp >> 24) & 0xFF);
- DCHECK_EQ(0, entry->skeleton.modrm_opcode);
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
- DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- if (X86_FPREG(reg1)) {
- reg1 = reg1 & X86_FP_REG_MASK;
- }
- if (X86_FPREG(reg2)) {
- reg2 = reg2 & X86_FP_REG_MASK;
- }
- DCHECK_LT(reg1, 8);
- DCHECK_LT(reg2, 8);
- uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
- code_buffer_.push_back(modrm);
- DCHECK_EQ(0, entry->skeleton.modrm_opcode);
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
- DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry,
- uint8_t reg1, uint8_t reg2, int32_t imm) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- if (X86_FPREG(reg1)) {
- reg1 = reg1 & X86_FP_REG_MASK;
- }
- if (X86_FPREG(reg2)) {
- reg2 = reg2 & X86_FP_REG_MASK;
- }
- DCHECK_LT(reg1, 8);
- DCHECK_LT(reg2, 8);
- uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
- code_buffer_.push_back(modrm);
- DCHECK_EQ(0, entry->skeleton.modrm_opcode);
- DCHECK_EQ(0, entry->skeleton.ax_opcode);
+void X86Mir2Lir::EmitImm(const X86EncodingMap* entry, int imm) {
switch (entry->skeleton.immediate_bytes) {
case 1:
DCHECK(IS_SIMM8(imm));
@@ -826,6 +601,153 @@
}
}
+void X86Mir2Lir::EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg) {
+ EmitPrefixAndOpcode(entry);
+ // There's no 3-byte instruction with +rd
+ DCHECK(entry->skeleton.opcode != 0x0F ||
+ (entry->skeleton.extra_opcode1 != 0x38 && entry->skeleton.extra_opcode1 != 0x3A));
+ DCHECK(!X86_FPREG(reg));
+ DCHECK_LT(reg, 8);
+ code_buffer_.back() += reg;
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
+ EmitPrefixAndOpcode(entry);
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ if (reg >= 4) {
+ DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+ << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ }
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+ code_buffer_.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) {
+ EmitPrefix(entry);
+ code_buffer_.push_back(entry->skeleton.opcode);
+ DCHECK_NE(0x0F, entry->skeleton.opcode);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ DCHECK_NE(rX86_SP, base);
+ EmitModrmDisp(entry->skeleton.modrm_opcode, base, disp);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitOpArray(const X86EncodingMap* entry, uint8_t base, uint8_t index,
+ int scale, int disp) {
+ EmitPrefixAndOpcode(entry);
+ EmitModrmSibDisp(entry->skeleton.modrm_opcode, base, index, scale, disp);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry,
+ uint8_t base, int disp, uint8_t reg) {
+ EmitPrefixAndOpcode(entry);
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ if (reg >= 4) {
+ DCHECK(strchr(entry->name, '8') == NULL ||
+ entry->opcode == kX86Movzx8RM || entry->opcode == kX86Movsx8RM)
+ << entry->name << " " << static_cast<int>(reg)
+ << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ }
+ EmitModrmDisp(reg, base, disp);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry,
+ uint8_t reg, uint8_t base, int disp) {
+ // Opcode will flip operands.
+ EmitMemReg(entry, base, disp, reg);
+}
+
+void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
+ int scale, int disp) {
+ EmitPrefixAndOpcode(entry);
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ EmitModrmSibDisp(reg, base, index, scale, disp);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
+ uint8_t reg) {
+ // Opcode will flip operands.
+ EmitRegArray(entry, reg, base, index, scale, disp);
+}
+
+void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) {
+ DCHECK_NE(entry->skeleton.prefix1, 0);
+ EmitPrefixAndOpcode(entry);
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ if (reg >= 4) {
+ DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+ << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ }
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
+ code_buffer_.push_back(modrm);
+ code_buffer_.push_back(disp & 0xFF);
+ code_buffer_.push_back((disp >> 8) & 0xFF);
+ code_buffer_.push_back((disp >> 16) & 0xFF);
+ code_buffer_.push_back((disp >> 24) & 0xFF);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) {
+ EmitPrefixAndOpcode(entry);
+ if (X86_FPREG(reg1)) {
+ reg1 = reg1 & X86_FP_REG_MASK;
+ }
+ if (X86_FPREG(reg2)) {
+ reg2 = reg2 & X86_FP_REG_MASK;
+ }
+ DCHECK_LT(reg1, 8);
+ DCHECK_LT(reg2, 8);
+ uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+ code_buffer_.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry,
+ uint8_t reg1, uint8_t reg2, int32_t imm) {
+ EmitPrefixAndOpcode(entry);
+ if (X86_FPREG(reg1)) {
+ reg1 = reg1 & X86_FP_REG_MASK;
+ }
+ if (X86_FPREG(reg2)) {
+ reg2 = reg2 & X86_FP_REG_MASK;
+ }
+ DCHECK_LT(reg1, 8);
+ DCHECK_LT(reg2, 8);
+ uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+ code_buffer_.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ EmitImm(entry, imm);
+}
+
void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
if (entry->skeleton.prefix1 != 0) {
code_buffer_.push_back(entry->skeleton.prefix1);
@@ -838,95 +760,25 @@
if (reg == rAX && entry->skeleton.ax_opcode != 0) {
code_buffer_.push_back(entry->skeleton.ax_opcode);
} else {
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
+ EmitOpcode(entry);
if (X86_FPREG(reg)) {
reg = reg & X86_FP_REG_MASK;
}
uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
code_buffer_.push_back(modrm);
}
- switch (entry->skeleton.immediate_bytes) {
- case 1:
- DCHECK(IS_SIMM8(imm));
- code_buffer_.push_back(imm & 0xFF);
- break;
- case 2:
- DCHECK(IS_SIMM16(imm));
- code_buffer_.push_back(imm & 0xFF);
- code_buffer_.push_back((imm >> 8) & 0xFF);
- break;
- case 4:
- code_buffer_.push_back(imm & 0xFF);
- code_buffer_.push_back((imm >> 8) & 0xFF);
- code_buffer_.push_back((imm >> 16) & 0xFF);
- code_buffer_.push_back((imm >> 24) & 0xFF);
- break;
- default:
- LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
- << ") for instruction: " << entry->name;
- break;
- }
+ EmitImm(entry, imm);
}
void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int disp, int imm) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
+ EmitPrefixAndOpcode(entry);
uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
code_buffer_.push_back(modrm);
code_buffer_.push_back(disp & 0xFF);
code_buffer_.push_back((disp >> 8) & 0xFF);
code_buffer_.push_back((disp >> 16) & 0xFF);
code_buffer_.push_back((disp >> 24) & 0xFF);
- switch (entry->skeleton.immediate_bytes) {
- case 1:
- DCHECK(IS_SIMM8(imm));
- code_buffer_.push_back(imm & 0xFF);
- break;
- case 2:
- DCHECK(IS_SIMM16(imm));
- code_buffer_.push_back(imm & 0xFF);
- code_buffer_.push_back((imm >> 8) & 0xFF);
- break;
- case 4:
- code_buffer_.push_back(imm & 0xFF);
- code_buffer_.push_back((imm >> 8) & 0xFF);
- code_buffer_.push_back((imm >> 16) & 0xFF);
- code_buffer_.push_back((imm >> 24) & 0xFF);
- break;
- default:
- LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
- << ") for instruction: " << entry->name;
- break;
- }
+ EmitImm(entry, imm);
DCHECK_EQ(entry->skeleton.ax_opcode, 0);
}
@@ -940,31 +792,16 @@
}
void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
+ EmitPrefix(entry);
if (imm != 1) {
code_buffer_.push_back(entry->skeleton.opcode);
} else {
// Shorter encoding for 1 bit shift
code_buffer_.push_back(entry->skeleton.ax_opcode);
}
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
+ DCHECK_NE(0x0F, entry->skeleton.opcode);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
if (reg >= 4) {
DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
<< " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -981,15 +818,9 @@
void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) {
DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
+ EmitPrefix(entry);
code_buffer_.push_back(entry->skeleton.opcode);
+ DCHECK_NE(0x0F, entry->skeleton.opcode);
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
DCHECK_LT(reg, 8);
@@ -1059,55 +890,15 @@
}
void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp) {
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
- code_buffer_.push_back(modrm);
- if (base == rX86_SP) {
- // Special SIB for SP base
- code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
- }
- EmitDisp(base, disp);
+ EmitPrefixAndOpcode(entry);
+ EmitModrmDisp(entry->skeleton.modrm_opcode, base, disp);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int disp) {
DCHECK_NE(entry->skeleton.prefix1, 0);
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.opcode == 0x0F) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode1);
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
+ EmitPrefixAndOpcode(entry);
uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
code_buffer_.push_back(modrm);
code_buffer_.push_back(disp & 0xFF);
@@ -1131,20 +922,14 @@
reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(base_or_table));
disp = tab_rec->offset;
}
- if (entry->skeleton.prefix1 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix1);
- if (entry->skeleton.prefix2 != 0) {
- code_buffer_.push_back(entry->skeleton.prefix2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.prefix2);
- }
+ EmitPrefix(entry);
if (X86_FPREG(reg)) {
reg = reg & X86_FP_REG_MASK;
}
DCHECK_LT(reg, 8);
if (entry->opcode == kX86PcRelLoadRA) {
code_buffer_.push_back(entry->skeleton.opcode);
+ DCHECK_NE(0x0F, entry->skeleton.opcode);
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP;
@@ -1320,15 +1105,7 @@
case kNullary: // 1 byte of opcode
DCHECK_EQ(0, entry->skeleton.prefix1);
DCHECK_EQ(0, entry->skeleton.prefix2);
- code_buffer_.push_back(entry->skeleton.opcode);
- if (entry->skeleton.extra_opcode1 != 0) {
- code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode2 != 0) {
- code_buffer_.push_back(entry->skeleton.extra_opcode2);
- }
- } else {
- DCHECK_EQ(0, entry->skeleton.extra_opcode2);
- }
+ EmitOpcode(entry);
DCHECK_EQ(0, entry->skeleton.modrm_opcode);
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
@@ -1342,6 +1119,9 @@
case kMem: // lir operands - 0: base, 1: disp
EmitOpMem(entry, lir->operands[0], lir->operands[1]);
break;
+ case kArray: // lir operands - 0: base, 1: index, 2: scale, 3: disp
+ EmitOpArray(entry, lir->operands[0], lir->operands[1], lir->operands[2], lir->operands[3]);
+ break;
case kMemReg: // lir operands - 0: base, 1: disp, 2: reg
EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
break;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 805ef29..6552607 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -171,10 +171,18 @@
bool InexpensiveConstantDouble(int64_t value);
private:
- void EmitDisp(int base, int disp);
+ void EmitPrefix(const X86EncodingMap* entry);
+ void EmitOpcode(const X86EncodingMap* entry);
+ void EmitPrefixAndOpcode(const X86EncodingMap* entry);
+ void EmitDisp(uint8_t base, int disp);
+ void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int disp);
+ void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale, int disp);
+ void EmitImm(const X86EncodingMap* entry, int imm);
void EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg);
void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
+ void EmitOpArray(const X86EncodingMap* entry, uint8_t base, uint8_t index,
+ int scale, int disp);
void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
void EmitRegMem(const X86EncodingMap* entry, uint8_t reg, uint8_t base, int disp);
void EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b65fe54..0133a0a 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -293,7 +293,26 @@
// If is_long, high half is in info->args[7]
if (is_long) {
- LOG(FATAL) << "CAS64: Not implemented";
+ FlushAllRegs();
+ LockCallTemps();
+ NewLIR1(kX86Push32R, rDI);
+ MarkTemp(rDI);
+ LockTemp(rDI);
+ NewLIR1(kX86Push32R, rSI);
+ MarkTemp(rSI);
+ LockTemp(rSI);
+ LoadValueDirectFixed(rl_src_obj, rDI);
+ LoadValueDirectFixed(rl_src_offset, rSI);
+ LoadValueDirectWideFixed(rl_src_expected, rAX, rDX);
+ LoadValueDirectWideFixed(rl_src_new_value, rBX, rCX);
+ NewLIR4(kX86LockCmpxchg8bA, rDI, rSI, 0, 0);
+ FreeTemp(rSI);
+ UnmarkTemp(rSI);
+ NewLIR1(kX86Pop32R, rSI);
+ FreeTemp(rDI);
+ UnmarkTemp(rDI);
+ NewLIR1(kX86Pop32R, rDI);
+ FreeCallTemps();
} else {
// EAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in EAX.
FlushReg(r0);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index cbd0f15..0b8c07e 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -165,6 +165,10 @@
if (flags & REG_USED) {
SetupRegMask(&lir->u.m.use_mask, rDX);
}
+
+ if (flags & REG_USEB) {
+ SetupRegMask(&lir->u.m.use_mask, rBX);
+ }
}
/* For dumping instructions */
diff --git a/compiler/dex/quick/x86/x86_dex_file_method_inliner.cc b/compiler/dex/quick/x86/x86_dex_file_method_inliner.cc
deleted file mode 100644
index f0e76c9..0000000
--- a/compiler/dex/quick/x86/x86_dex_file_method_inliner.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "dex/compiler_enums.h"
-
-#include "x86_dex_file_method_inliner.h"
-
-namespace art {
-
-const DexFileMethodInliner::IntrinsicDef X86DexFileMethodInliner::kIntrinsicMethods[] = {
-#define INTRINSIC(c, n, p, o, d) \
- { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, d } }
-
- INTRINSIC(JavaLangDouble, DoubleToRawLongBits, D_J, kIntrinsicDoubleCvt, 0),
- INTRINSIC(JavaLangDouble, LongBitsToDouble, J_D, kIntrinsicDoubleCvt, 0),
- INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0),
- INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, 0),
-
- INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, kWord),
- INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, kLong),
- INTRINSIC(JavaLangShort, ReverseBytes, S_S, kIntrinsicReverseBytes, kSignedHalf),
-
- INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0),
- INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0),
- INTRINSIC(JavaLangMath, Abs, J_J, kIntrinsicAbsLong, 0),
- INTRINSIC(JavaLangStrictMath, Abs, J_J, kIntrinsicAbsLong, 0),
- INTRINSIC(JavaLangMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
- INTRINSIC(JavaLangStrictMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin),
- INTRINSIC(JavaLangMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
- INTRINSIC(JavaLangStrictMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax),
- // INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
- // INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
-
- INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
- INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
- INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
- INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone),
- INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0),
- INTRINSIC(JavaLangString, Length, _I, kIntrinsicIsEmptyOrLength, kIntrinsicFlagLength),
-
- INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0),
-
- INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte),
- INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, kWord),
- INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, kLong),
- INTRINSIC(LibcoreIoMemory, PeekShortNative, J_S, kIntrinsicPeek, kSignedHalf),
- INTRINSIC(LibcoreIoMemory, PokeByte, JB_V, kIntrinsicPoke, kSignedByte),
- INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, kWord),
- INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, kLong),
- INTRINSIC(LibcoreIoMemory, PokeShortNative, JS_V, kIntrinsicPoke, kSignedHalf),
-
- INTRINSIC(SunMiscUnsafe, CompareAndSwapInt, ObjectJII_Z, kIntrinsicCas,
- kIntrinsicFlagNone),
- // INTRINSIC(SunMiscUnsafe, CompareAndSwapLong, ObjectJJJ_Z, kIntrinsicCas,
- // kIntrinsicFlagIsLong),
- INTRINSIC(SunMiscUnsafe, CompareAndSwapObject, ObjectJObjectObject_Z, kIntrinsicCas,
- kIntrinsicFlagIsObject),
-
-#define UNSAFE_GET_PUT(type, code, type_flags) \
- INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- type_flags & ~kIntrinsicFlagIsObject), \
- INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
- INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags), \
- INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsVolatile), \
- INTRINSIC(SunMiscUnsafe, PutOrdered ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
- type_flags | kIntrinsicFlagIsOrdered)
-
- UNSAFE_GET_PUT(Int, I, kIntrinsicFlagNone),
- UNSAFE_GET_PUT(Long, J, kIntrinsicFlagIsLong),
-
- // UNSAFE_GET_PUT(Object, Object, kIntrinsicFlagIsObject),
- // PutObject: "TODO: fix X86, it exhausts registers for card marking."
- INTRINSIC(SunMiscUnsafe, GetObject, ObjectJ_Object, kIntrinsicUnsafeGet,
- kIntrinsicFlagNone),
- INTRINSIC(SunMiscUnsafe, GetObjectVolatile, ObjectJ_Object, kIntrinsicUnsafeGet,
- kIntrinsicFlagIsVolatile),
-#undef UNSAFE_GET_PUT
-
-#undef INTRINSIC
-};
-
-X86DexFileMethodInliner::X86DexFileMethodInliner() {
-}
-
-X86DexFileMethodInliner::~X86DexFileMethodInliner() {
-}
-
-void X86DexFileMethodInliner::FindIntrinsics(const DexFile* dex_file) {
- IndexCache cache;
- DoFindIntrinsics(dex_file, &cache, kIntrinsicMethods, arraysize(kIntrinsicMethods));
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/x86/x86_dex_file_method_inliner.h b/compiler/dex/quick/x86/x86_dex_file_method_inliner.h
deleted file mode 100644
index 7813e44..0000000
--- a/compiler/dex/quick/x86/x86_dex_file_method_inliner.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_X86_X86_DEX_FILE_METHOD_INLINER_H_
-#define ART_COMPILER_DEX_QUICK_X86_X86_DEX_FILE_METHOD_INLINER_H_
-
-#include "dex/quick/dex_file_method_inliner.h"
-
-namespace art {
-
-class X86DexFileMethodInliner : public DexFileMethodInliner {
- public:
- X86DexFileMethodInliner();
- ~X86DexFileMethodInliner();
-
- void FindIntrinsics(const DexFile* dex_file);
-
- private:
- static const IntrinsicDef kIntrinsicMethods[];
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_X86_X86_DEX_FILE_METHOD_INLINER_H_
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index bbcae92..5fe76fe 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -314,6 +314,7 @@
UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
kX86Bswap32R,
+ kX86Push32R, kX86Pop32R,
#undef UnaryOpcode
#define Binary0fOpCode(opcode) \
opcode ## RR, opcode ## RM, opcode ## RA
@@ -355,6 +356,7 @@
Binary0fOpCode(kX86Imul32), // 32bit multiply
kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR, // compare and exchange
kX86LockCmpxchgMR, kX86LockCmpxchgAR, // locked compare and exchange
+ kX86LockCmpxchg8bM, kX86LockCmpxchg8bA, // locked compare and exchange
Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 9ed65cd..8781c7a 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -520,6 +520,13 @@
case 0xB7: opcode << "movzxw"; has_modrm = true; load = true; break;
case 0xBE: opcode << "movsxb"; has_modrm = true; load = true; break;
case 0xBF: opcode << "movsxw"; has_modrm = true; load = true; break;
+ case 0xC7:
+ static const char* x0FxC7_opcodes[] = { "unknown-0f-c7", "cmpxchg8b", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7" };
+ modrm_opcodes = x0FxC7_opcodes;
+ has_modrm = true;
+ reg_is_opcode = true;
+ store = true;
+ break;
case 0xC8: case 0xC9: case 0xCA: case 0xCB: case 0xCC: case 0xCD: case 0xCE: case 0xCF:
opcode << "bswap";
reg_in_opcode = true;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 463e673..429c516 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -100,7 +100,7 @@
}
UniquePtr<ZipEntry> zip_entry(zip_archive->Find(kClassesDex, error_msg));
if (zip_entry.get() == NULL) {
- *error_msg = StringPrintf("Zip archive '%s' doesn\'t contain %s (error msg: %s)", filename,
+ *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
kClassesDex, error_msg->c_str());
return false;
}
@@ -177,7 +177,7 @@
struct stat sbuf;
memset(&sbuf, 0, sizeof(sbuf));
if (fstat(fd, &sbuf) == -1) {
- *error_msg = StringPrintf("DexFile: fstat \'%s\' failed: %s", location, strerror(errno));
+ *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location, strerror(errno));
return nullptr;
}
if (S_ISDIR(sbuf.st_mode)) {
@@ -194,7 +194,7 @@
if (map->Size() < sizeof(DexFile::Header)) {
*error_msg = StringPrintf(
- "DexFile: failed to open dex file \'%s\' that is too short to have a header", location);
+ "DexFile: failed to open dex file '%s' that is too short to have a header", location);
return nullptr;
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 2806f94..3ab8888 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -57,7 +57,7 @@
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
DCHECK(throw_location.GetMethod() == referrer);
self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;",
- "Found type %s; filled-new-array not implemented for anything but \'int\'",
+ "Found type %s; filled-new-array not implemented for anything but 'int'",
PrettyDescriptor(klass).c_str());
}
return nullptr; // Failure
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
index 8a2c899..4f19964 100644
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -36,11 +36,7 @@
ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
const uint8_t* gc_map = method->GetNativeGcMap();
- uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
- (gc_map[1] << 16) |
- (gc_map[2] << 8) |
- (gc_map[3] << 0));
- verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
+ verifier::DexPcToReferenceMap dex_gc_map(gc_map);
const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
for (size_t reg = 0; reg < num_regs; ++reg) {
if (TestBitmap(reg, reg_bitmap)) {
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 6b597ae..60c3b1c 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -31,10 +31,6 @@
class SpaceTest : public CommonTest {
public:
- void SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
- int round, size_t growth_limit);
- void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
-
void AddSpace(ContinuousSpace* space) {
// For RosAlloc, revoke the thread local runs before moving onto a
// new alloc space.
@@ -55,6 +51,26 @@
arr->SetLength(length);
EXPECT_EQ(arr->SizeOf(), size);
}
+
+ static MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin) {
+ return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
+ }
+ static MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin) {
+ return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
+ }
+
+ typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin);
+ void InitTestBody(CreateSpaceFn create_space);
+ void ZygoteSpaceTestBody(CreateSpaceFn create_space);
+ void AllocAndFreeTestBody(CreateSpaceFn create_space);
+ void AllocAndFreeListTestBody(CreateSpaceFn create_space);
+
+ void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
+ int round, size_t growth_limit);
+ void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
};
static size_t test_rand(size_t* seed) {
@@ -62,128 +78,143 @@
return *seed;
}
-TEST_F(SpaceTest, Init) {
+void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
{
// Init < max == growth
- UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 32 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init == max == growth
- UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 16 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init > max == growth
- UniquePtr<Space> space(DlMallocSpace::Create("test", 32 * MB, 16 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
{
// Growth == init < max
- UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 16 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Growth < init < max
- UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 8 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
{
// Init < growth < max
- UniquePtr<Space> space(DlMallocSpace::Create("test", 8 * MB, 16 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init < max < growth
- UniquePtr<Space> space(DlMallocSpace::Create("test", 8 * MB, 32 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
}
+TEST_F(SpaceTest, Init_DlMallocSpace) {
+ InitTestBody(SpaceTest::CreateDlMallocSpace);
+}
+TEST_F(SpaceTest, Init_RosAllocSpace) {
+ InitTestBody(SpaceTest::CreateRosAllocSpace);
+}
+
// TODO: This test is not very good, we should improve it.
// The test should do more allocations before the creation of the ZygoteSpace, and then do
// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
// the GC works with the ZygoteSpace.
-TEST_F(SpaceTest, ZygoteSpace) {
- size_t dummy = 0;
- MallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
- ASSERT_TRUE(space != NULL);
+void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
+ size_t dummy = 0;
+ MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ ASSERT_TRUE(space != NULL);
- // Make space findable to the heap, will also delete space when runtime is cleaned up
- AddSpace(space);
- Thread* self = Thread::Current();
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ AddSpace(space);
+ Thread* self = Thread::Current();
- // Succeeds, fits without adjusting the footprint limit.
- mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
- EXPECT_TRUE(ptr1 != NULL);
- InstallClass(ptr1, 1 * MB);
+ // Succeeds, fits without adjusting the footprint limit.
+ mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
+ EXPECT_TRUE(ptr1 != NULL);
+ InstallClass(ptr1, 1 * MB);
- // Fails, requires a higher footprint limit.
- mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr2 == NULL);
+ // Fails, requires a higher footprint limit.
+ mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
+ EXPECT_TRUE(ptr2 == NULL);
- // Succeeds, adjusts the footprint.
- size_t ptr3_bytes_allocated;
- mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
- EXPECT_TRUE(ptr3 != NULL);
- EXPECT_LE(8U * MB, ptr3_bytes_allocated);
- InstallClass(ptr3, 8 * MB);
+ // Succeeds, adjusts the footprint.
+ size_t ptr3_bytes_allocated;
+ mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
+ EXPECT_TRUE(ptr3 != NULL);
+ EXPECT_LE(8U * MB, ptr3_bytes_allocated);
+ InstallClass(ptr3, 8 * MB);
- // Fails, requires a higher footprint limit.
- mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr4 == NULL);
+ // Fails, requires a higher footprint limit.
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
+ EXPECT_TRUE(ptr4 == NULL);
- // Also fails, requires a higher allowed footprint.
- mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr5 == NULL);
+ // Also fails, requires a higher allowed footprint.
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
+ EXPECT_TRUE(ptr5 == NULL);
- // Release some memory.
- size_t free3 = space->AllocationSize(ptr3);
- EXPECT_EQ(free3, ptr3_bytes_allocated);
- EXPECT_EQ(free3, space->Free(self, ptr3));
- EXPECT_LE(8U * MB, free3);
+ // Release some memory.
+ size_t free3 = space->AllocationSize(ptr3);
+ EXPECT_EQ(free3, ptr3_bytes_allocated);
+ EXPECT_EQ(free3, space->Free(self, ptr3));
+ EXPECT_LE(8U * MB, free3);
- // Succeeds, now that memory has been freed.
- mirror::Object* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
- EXPECT_TRUE(ptr6 != NULL);
- InstallClass(ptr6, 9 * MB);
+ // Succeeds, now that memory has been freed.
+ mirror::Object* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
+ EXPECT_TRUE(ptr6 != NULL);
+ InstallClass(ptr6, 9 * MB);
- // Final clean up.
- size_t free1 = space->AllocationSize(ptr1);
- space->Free(self, ptr1);
- EXPECT_LE(1U * MB, free1);
+ // Final clean up.
+ size_t free1 = space->AllocationSize(ptr1);
+ space->Free(self, ptr1);
+ EXPECT_LE(1U * MB, free1);
- // Make sure that the zygote space isn't directly at the start of the space.
- space->Alloc(self, 1U * MB, &dummy);
- space = space->CreateZygoteSpace("alloc space");
+ // Make sure that the zygote space isn't directly at the start of the space.
+ space->Alloc(self, 1U * MB, &dummy);
+ space = space->CreateZygoteSpace("alloc space");
- // Make space findable to the heap, will also delete space when runtime is cleaned up
- AddSpace(space);
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ AddSpace(space);
- // Succeeds, fits without adjusting the footprint limit.
- ptr1 = space->Alloc(self, 1 * MB, &dummy);
- EXPECT_TRUE(ptr1 != NULL);
- InstallClass(ptr1, 1 * MB);
+ // Succeeds, fits without adjusting the footprint limit.
+ ptr1 = space->Alloc(self, 1 * MB, &dummy);
+ EXPECT_TRUE(ptr1 != NULL);
+ InstallClass(ptr1, 1 * MB);
- // Fails, requires a higher footprint limit.
- ptr2 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr2 == NULL);
+ // Fails, requires a higher footprint limit.
+ ptr2 = space->Alloc(self, 8 * MB, &dummy);
+ EXPECT_TRUE(ptr2 == NULL);
- // Succeeds, adjusts the footprint.
- ptr3 = space->AllocWithGrowth(self, 2 * MB, &dummy);
- EXPECT_TRUE(ptr3 != NULL);
- InstallClass(ptr3, 2 * MB);
- space->Free(self, ptr3);
+ // Succeeds, adjusts the footprint.
+ ptr3 = space->AllocWithGrowth(self, 2 * MB, &dummy);
+ EXPECT_TRUE(ptr3 != NULL);
+ InstallClass(ptr3, 2 * MB);
+ space->Free(self, ptr3);
- // Final clean up.
- free1 = space->AllocationSize(ptr1);
- space->Free(self, ptr1);
- EXPECT_LE(1U * MB, free1);
+ // Final clean up.
+ free1 = space->AllocationSize(ptr1);
+ space->Free(self, ptr1);
+ EXPECT_LE(1U * MB, free1);
}
-TEST_F(SpaceTest, AllocAndFree) {
+TEST_F(SpaceTest, ZygoteSpace_DlMallocSpace) {
+ ZygoteSpaceTestBody(SpaceTest::CreateDlMallocSpace);
+}
+
+TEST_F(SpaceTest, ZygoteSpace_RosAllocSpace) {
+ ZygoteSpaceTestBody(SpaceTest::CreateRosAllocSpace);
+}
+
+void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
size_t dummy = 0;
- DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
Thread* self = Thread::Current();
@@ -231,6 +262,13 @@
EXPECT_LE(1U * MB, free1);
}
+TEST_F(SpaceTest, AllocAndFree_DlMallocSpace) {
+ AllocAndFreeTestBody(SpaceTest::CreateDlMallocSpace);
+}
+TEST_F(SpaceTest, AllocAndFree_RosAllocSpace) {
+ AllocAndFreeTestBody(SpaceTest::CreateRosAllocSpace);
+}
+
TEST_F(SpaceTest, LargeObjectTest) {
size_t rand_seed = 0;
for (size_t i = 0; i < 2; ++i) {
@@ -292,8 +330,8 @@
}
}
-TEST_F(SpaceTest, AllocAndFreeList) {
- DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
+ MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -332,7 +370,14 @@
}
}
-void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
+TEST_F(SpaceTest, AllocAndFreeList_DlMallocSpace) {
+ AllocAndFreeListTestBody(SpaceTest::CreateDlMallocSpace);
+}
+TEST_F(SpaceTest, AllocAndFreeList_RosAllocSpace) {
+ AllocAndFreeListTestBody(SpaceTest::CreateRosAllocSpace);
+}
+
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
int round, size_t growth_limit) {
if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
@@ -493,11 +538,11 @@
EXPECT_LE(space->Size(), growth_limit);
}
-void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size) {
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
size_t initial_size = 4 * MB;
size_t growth_limit = 8 * MB;
size_t capacity = 16 * MB;
- DlMallocSpace* space(DlMallocSpace::Create("test", initial_size, growth_limit, capacity, NULL));
+ MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, NULL));
ASSERT_TRUE(space != NULL);
// Basic sanity
@@ -518,16 +563,25 @@
}
#define TEST_SizeFootPrintGrowthLimitAndTrim(name, size) \
- TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
- SizeFootPrintGrowthLimitAndTrimDriver(size); \
+ TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name##_DlMallocSpace) { \
+ SizeFootPrintGrowthLimitAndTrimDriver(size, SpaceTest::CreateDlMallocSpace); \
} \
- TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
- SizeFootPrintGrowthLimitAndTrimDriver(-size); \
+ TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name##_DlMallocSpace) { \
+ SizeFootPrintGrowthLimitAndTrimDriver(-size, SpaceTest::CreateDlMallocSpace); \
+ } \
+ TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name##_RosAllocSpace) { \
+ SizeFootPrintGrowthLimitAndTrimDriver(size, SpaceTest::CreateRosAllocSpace); \
+ } \
+ TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name##_RosAllocSpace) { \
+ SizeFootPrintGrowthLimitAndTrimDriver(-size, SpaceTest::CreateRosAllocSpace); \
}
// Each size test is its own test so that we get a fresh heap each time
-TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_12B) {
- SizeFootPrintGrowthLimitAndTrimDriver(12);
+TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_12B_DlMallocSpace) {
+ SizeFootPrintGrowthLimitAndTrimDriver(12, SpaceTest::CreateDlMallocSpace);
+}
+TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_12B_RosAllocSpace) {
+ SizeFootPrintGrowthLimitAndTrimDriver(12, SpaceTest::CreateRosAllocSpace);
}
TEST_SizeFootPrintGrowthLimitAndTrim(16B, 16)
TEST_SizeFootPrintGrowthLimitAndTrim(24B, 24)
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ca066b6..5b9e55f 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -188,7 +188,7 @@
} else {
self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
"Ljava/lang/InternalError;",
- "Found type %s; filled-new-array not implemented for anything but \'int\'",
+ "Found type %s; filled-new-array not implemented for anything but 'int'",
PrettyDescriptor(componentClass).c_str());
}
return false;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fa49faa..715be99 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1994,11 +1994,7 @@
// Portable path use DexGcMap and store in Method.native_gc_map_.
const uint8_t* gc_map = m->GetNativeGcMap();
CHECK(gc_map != NULL) << PrettyMethod(m);
- uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
- (gc_map[1] << 16) |
- (gc_map[2] << 8) |
- (gc_map[3] << 0));
- verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
+ verifier::DexPcToReferenceMap dex_gc_map(gc_map);
uint32_t dex_pc = GetDexPc();
const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
DCHECK(reg_bitmap != NULL);
diff --git a/runtime/verifier/dex_gc_map.h b/runtime/verifier/dex_gc_map.h
index 2a95ba2..4570ae8 100644
--- a/runtime/verifier/dex_gc_map.h
+++ b/runtime/verifier/dex_gc_map.h
@@ -38,11 +38,13 @@
// Lightweight wrapper for Dex PC to reference bit maps.
class DexPcToReferenceMap {
public:
- DexPcToReferenceMap(const uint8_t* data, size_t data_length) : data_(data) {
+ explicit DexPcToReferenceMap(const uint8_t* data) : data_(data) {
CHECK(data_ != NULL);
- // Check the size of the table agrees with the number of entries
- size_t data_size = data_length - 4;
- DCHECK_EQ(EntryWidth() * NumEntries(), data_size);
+ }
+
+ // The total size of the reference bit map including header.
+ size_t RawSize() const {
+ return EntryWidth() * NumEntries() + 4u /* header */;
}
// The number of entries in the table
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 1e45c60..5f5d865 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1068,13 +1068,13 @@
bool compile = IsCandidateForCompilation(ref, method_access_flags_);
if (compile) {
/* Generate a register map and add it to the method. */
- const std::vector<uint8_t>* dex_gc_map = GenerateLengthPrefixedGcMap();
+ const std::vector<uint8_t>* dex_gc_map = GenerateGcMap();
if (dex_gc_map == NULL) {
DCHECK_NE(failures_.size(), 0U);
return false; // Not a real failure, but a failure to encode
}
if (kIsDebugBuild) {
- VerifyLengthPrefixedGcMap(*dex_gc_map);
+ VerifyGcMap(*dex_gc_map);
}
verifier::MethodVerifier::SetDexGcMap(ref, dex_gc_map);
}
@@ -4054,7 +4054,7 @@
return pc_to_concrete_method_map.release();
}
-const std::vector<uint8_t>* MethodVerifier::GenerateLengthPrefixedGcMap() {
+const std::vector<uint8_t>* MethodVerifier::GenerateGcMap() {
size_t num_entries, ref_bitmap_bits, pc_bits;
ComputeGcMapSizes(&num_entries, &ref_bitmap_bits, &pc_bits);
// There's a single byte to encode the size of each bitmap
@@ -4092,12 +4092,7 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Failed to encode GC map (size=" << table_size << ")";
return NULL;
}
- table->reserve(table_size + 4); // table_size plus the length prefix
- // Write table size
- table->push_back((table_size & 0xff000000) >> 24);
- table->push_back((table_size & 0x00ff0000) >> 16);
- table->push_back((table_size & 0x0000ff00) >> 8);
- table->push_back((table_size & 0x000000ff) >> 0);
+ table->reserve(table_size);
// Write table header
table->push_back(format | ((ref_bitmap_bytes >> DexPcToReferenceMap::kRegMapFormatShift) &
~DexPcToReferenceMap::kRegMapFormatMask));
@@ -4115,18 +4110,15 @@
line->WriteReferenceBitMap(*table, ref_bitmap_bytes);
}
}
- DCHECK_EQ(table->size(), table_size + 4); // table_size plus the length prefix
+ DCHECK_EQ(table->size(), table_size);
return table;
}
-void MethodVerifier::VerifyLengthPrefixedGcMap(const std::vector<uint8_t>& data) {
+void MethodVerifier::VerifyGcMap(const std::vector<uint8_t>& data) {
// Check that for every GC point there is a map entry, there aren't entries for non-GC points,
// that the table data is well formed and all references are marked (or not) in the bitmap
- DCHECK_GE(data.size(), 4u);
- size_t table_size = data.size() - 4u;
- DCHECK_EQ(table_size, static_cast<size_t>((data[0] << 24) | (data[1] << 16) |
- (data[2] << 8) | (data[3] << 0)));
- DexPcToReferenceMap map(&data[4], table_size);
+ DexPcToReferenceMap map(&data[0]);
+ DCHECK_EQ(data.size(), map.RawSize());
size_t map_index = 0;
for (size_t i = 0; i < code_item_->insns_size_in_code_units_; i++) {
const uint8_t* reg_bitmap = map.FindBitMap(i, false);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index f72898e..892b7a8 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -614,10 +614,10 @@
* encode it in some clever fashion.
* Returns a pointer to a newly-allocated RegisterMap, or NULL on failure.
*/
- const std::vector<uint8_t>* GenerateLengthPrefixedGcMap();
+ const std::vector<uint8_t>* GenerateGcMap();
// Verify that the GC map associated with method_ is well formed
- void VerifyLengthPrefixedGcMap(const std::vector<uint8_t>& data);
+ void VerifyGcMap(const std::vector<uint8_t>& data);
// Compute sizes for GC map data
void ComputeGcMapSizes(size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc);