Merge "ART: Fix NullCheckElimination, BBCombine, and SplitBlock"
diff --git a/compiler/Android.mk b/compiler/Android.mk
index eb9ad47..84176a1 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -180,7 +180,8 @@
driver/compiler_options.h \
image_writer.h \
optimizing/locations.h \
- utils/arm/constants_arm.h
+ utils/arm/constants_arm.h \
+ utils/dex_instruction_utils.h
# $(1): target or host
# $(2): ndebug or debug
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index d311bc7..dbe9850 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -15,7 +15,6 @@
*/
#include "global_value_numbering.h"
-
#include "local_value_numbering.h"
namespace art {
@@ -31,8 +30,6 @@
modifications_allowed_(true),
mode_(mode),
global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
- field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
- field_index_reverse_map_(allocator->Adapter()),
array_location_map_(ArrayLocationComparator(), allocator->Adapter()),
array_location_reverse_map_(allocator->Adapter()),
ref_set_map_(std::less<ValueNameSet>(), allocator->Adapter()),
@@ -145,19 +142,6 @@
return change;
}
-uint16_t GlobalValueNumbering::GetFieldId(const MirFieldInfo& field_info, uint16_t type) {
- FieldReference key = { field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex(), type };
- auto lb = field_index_map_.lower_bound(key);
- if (lb != field_index_map_.end() && !field_index_map_.key_comp()(key, lb->first)) {
- return lb->second;
- }
- DCHECK_LT(field_index_map_.size(), kNoValue);
- uint16_t id = field_index_map_.size();
- auto it = field_index_map_.PutBefore(lb, key, id);
- field_index_reverse_map_.push_back(&*it);
- return id;
-}
-
uint16_t GlobalValueNumbering::GetArrayLocation(uint16_t base, uint16_t index) {
auto cmp = array_location_map_.key_comp();
ArrayLocation key = { base, index };
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index 72d1112..8a93afb 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -39,6 +39,12 @@
cu->mir_graph->GetMaxNestedLoops() > kMaxAllowedNestedLoops;
}
+ // Instance and static field id map is held by MIRGraph to avoid multiple recalculations
+ // when doing LVN.
+ template <typename Container> // Container of MirIFieldLoweringInfo or MirSFieldLoweringInfo.
+ static uint16_t* PrepareGvnFieldIds(ScopedArenaAllocator* allocator,
+ const Container& field_infos);
+
GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator, Mode mode);
~GlobalValueNumbering();
@@ -114,34 +120,24 @@
return (it != global_value_map_.end() && it->second == value);
}
- // FieldReference represents a unique resolved field.
- struct FieldReference {
- const DexFile* dex_file;
- uint16_t field_idx;
- uint16_t type; // See comments for LocalValueNumbering::kFieldTypeCount.
- };
+ // Get an instance field id.
+ uint16_t GetIFieldId(MIR* mir) {
+ return GetMirGraph()->GetGvnIFieldId(mir);
+ }
- struct FieldReferenceComparator {
- bool operator()(const FieldReference& lhs, const FieldReference& rhs) const {
- if (lhs.field_idx != rhs.field_idx) {
- return lhs.field_idx < rhs.field_idx;
- }
- // If the field_idx and dex_file match, the type must also match.
- DCHECK(lhs.dex_file != rhs.dex_file || lhs.type == rhs.type);
- return lhs.dex_file < rhs.dex_file;
- }
- };
+ // Get a static field id.
+ uint16_t GetSFieldId(MIR* mir) {
+ return GetMirGraph()->GetGvnSFieldId(mir);
+ }
- // Maps field key to field id for resolved fields.
- typedef ScopedArenaSafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
+ // Get an instance field type based on field id.
+ uint16_t GetIFieldType(uint16_t field_id) {
+ return static_cast<uint16_t>(GetMirGraph()->GetIFieldLoweringInfo(field_id).MemAccessType());
+ }
- // Get a field id.
- uint16_t GetFieldId(const MirFieldInfo& field_info, uint16_t type);
-
- // Get a field type based on field id.
- uint16_t GetFieldType(uint16_t field_id) {
- DCHECK_LT(field_id, field_index_reverse_map_.size());
- return field_index_reverse_map_[field_id]->first.type;
+ // Get a static field type based on field id.
+ uint16_t GetSFieldType(uint16_t field_id) {
+ return static_cast<uint16_t>(GetMirGraph()->GetSFieldLoweringInfo(field_id).MemAccessType());
}
struct ArrayLocation {
@@ -239,8 +235,6 @@
Mode mode_;
ValueMap global_value_map_;
- FieldIndexMap field_index_map_;
- ScopedArenaVector<const FieldIndexMap::value_type*> field_index_reverse_map_;
ArrayLocationMap array_location_map_;
ScopedArenaVector<const ArrayLocationMap::value_type*> array_location_reverse_map_;
RefSetIdMap ref_set_map_;
@@ -268,6 +262,32 @@
return last_value_;
}
+template <typename Container> // Container of MirIFieldLoweringInfo or MirSFieldLoweringInfo.
+uint16_t* GlobalValueNumbering::PrepareGvnFieldIds(ScopedArenaAllocator* allocator,
+ const Container& field_infos) {
+ size_t size = field_infos.size();
+ uint16_t* field_ids = reinterpret_cast<uint16_t*>(allocator->Alloc(size * sizeof(uint16_t),
+ kArenaAllocMisc));
+ for (size_t i = 0u; i != size; ++i) {
+ size_t idx = i;
+ const MirFieldInfo& cur_info = field_infos[i];
+ if (cur_info.IsResolved()) {
+ for (size_t j = 0; j != i; ++j) {
+ const MirFieldInfo& prev_info = field_infos[j];
+ if (prev_info.IsResolved() &&
+ prev_info.DeclaringDexFile() == cur_info.DeclaringDexFile() &&
+ prev_info.DeclaringFieldIndex() == cur_info.DeclaringFieldIndex()) {
+ DCHECK_EQ(cur_info.MemAccessType(), prev_info.MemAccessType());
+ idx = j;
+ break;
+ }
+ }
+ }
+ field_ids[i] = idx;
+ }
+ return field_ids;
+}
+
} // namespace art
#endif // ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 35d5b99..a788129 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -17,6 +17,7 @@
#include "compiler_internals.h"
#include "dataflow_iterator.h"
#include "dataflow_iterator-inl.h"
+#include "dex/mir_field_info.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
#include "gtest/gtest.h"
@@ -32,6 +33,7 @@
uintptr_t declaring_dex_file;
uint16_t declaring_field_idx;
bool is_volatile;
+ DexMemAccessType type;
};
struct SFieldDef {
@@ -39,6 +41,7 @@
uintptr_t declaring_dex_file;
uint16_t declaring_field_idx;
bool is_volatile;
+ DexMemAccessType type;
};
struct BBDef {
@@ -137,12 +140,11 @@
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
- field_info.flags_ = 0u | // Without kFlagIsStatic.
- (def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
+ field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
}
cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
}
@@ -158,15 +160,14 @@
cu_.mir_graph->sfield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const SFieldDef* def = &defs[i];
- MirSFieldLoweringInfo field_info(def->field_idx);
+ MirSFieldLoweringInfo field_info(def->field_idx, def->type);
// Mark even unresolved fields as initialized.
- field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
- MirSFieldLoweringInfo::kFlagClassIsInitialized;
+ field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
// NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
- field_info.flags_ |= (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
+ field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
}
cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
}
@@ -238,12 +239,16 @@
mir->dalvikInsn.opcode = def->opcode;
mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
mir->dalvikInsn.vB_wide = def->value;
- if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+ if (IsInstructionIGetOrIPut(def->opcode)) {
ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
mir->meta.ifield_lowering_info = def->field_info;
- } else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
+ ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
+ IGetOrIPutMemAccessType(def->opcode));
+ } else if (IsInstructionSGetOrSPut(def->opcode)) {
ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
mir->meta.sfield_lowering_info = def->field_info;
+ ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
+ SGetOrSPutMemAccessType(def->opcode));
} else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
mir->meta.phi_incoming = static_cast<BasicBlockId*>(
allocator_->Alloc(def->num_uses * sizeof(BasicBlockId), kArenaAllocDFInfo));
@@ -288,6 +293,10 @@
cu_.mir_graph->ComputeDominators();
cu_.mir_graph->ComputeTopologicalSortOrder();
cu_.mir_graph->SSATransformationEnd();
+ cu_.mir_graph->temp_.gvn.ifield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
+ cu_.mir_graph->temp_.gvn.sfield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
ASSERT_TRUE(gvn_ == nullptr);
gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
GlobalValueNumbering::kModeGvn));
@@ -498,18 +507,18 @@
TEST_F(GlobalValueNumberingTestDiamond, NonAliasingIFields) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
- { 3u, 1u, 3u, false }, // Int.
- { 4u, 1u, 4u, false }, // Short.
- { 5u, 1u, 5u, false }, // Char.
- { 6u, 0u, 0u, false }, // Unresolved, Short.
- { 7u, 1u, 7u, false }, // Int.
- { 8u, 0u, 0u, false }, // Unresolved, Int.
- { 9u, 1u, 9u, false }, // Int.
- { 10u, 1u, 10u, false }, // Int.
- { 11u, 1u, 11u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ { 3u, 1u, 3u, false, kDexMemAccessWord },
+ { 4u, 1u, 4u, false, kDexMemAccessShort },
+ { 5u, 1u, 5u, false, kDexMemAccessChar },
+ { 6u, 0u, 0u, false, kDexMemAccessShort }, // Unresolved.
+ { 7u, 1u, 7u, false, kDexMemAccessWord },
+ { 8u, 0u, 0u, false, kDexMemAccessWord }, // Unresolved.
+ { 9u, 1u, 9u, false, kDexMemAccessWord },
+ { 10u, 1u, 10u, false, kDexMemAccessWord },
+ { 11u, 1u, 11u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -604,15 +613,15 @@
TEST_F(GlobalValueNumberingTestDiamond, AliasingIFieldsSingleObject) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
- { 3u, 1u, 3u, false }, // Int.
- { 4u, 1u, 4u, false }, // Short.
- { 5u, 1u, 5u, false }, // Char.
- { 6u, 0u, 0u, false }, // Unresolved, Short.
- { 7u, 1u, 7u, false }, // Int.
- { 8u, 1u, 8u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ { 3u, 1u, 3u, false, kDexMemAccessWord },
+ { 4u, 1u, 4u, false, kDexMemAccessShort },
+ { 5u, 1u, 5u, false, kDexMemAccessChar },
+ { 6u, 0u, 0u, false, kDexMemAccessShort }, // Unresolved.
+ { 7u, 1u, 7u, false, kDexMemAccessWord },
+ { 8u, 1u, 8u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -671,15 +680,15 @@
TEST_F(GlobalValueNumberingTestDiamond, AliasingIFieldsTwoObjects) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
- { 3u, 1u, 3u, false }, // Int.
- { 4u, 1u, 4u, false }, // Short.
- { 5u, 1u, 5u, false }, // Char.
- { 6u, 0u, 0u, false }, // Unresolved, Short.
- { 7u, 1u, 7u, false }, // Int.
- { 8u, 1u, 8u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ { 3u, 1u, 3u, false, kDexMemAccessWord },
+ { 4u, 1u, 4u, false, kDexMemAccessShort },
+ { 5u, 1u, 5u, false, kDexMemAccessChar },
+ { 6u, 0u, 0u, false, kDexMemAccessShort }, // Unresolved.
+ { 7u, 1u, 7u, false, kDexMemAccessWord },
+ { 8u, 1u, 8u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -740,15 +749,15 @@
TEST_F(GlobalValueNumberingTestDiamond, SFields) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
- { 3u, 1u, 3u, false }, // Int.
- { 4u, 1u, 4u, false }, // Short.
- { 5u, 1u, 5u, false }, // Char.
- { 6u, 0u, 0u, false }, // Unresolved, Short.
- { 7u, 1u, 7u, false }, // Int.
- { 8u, 1u, 8u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ { 3u, 1u, 3u, false, kDexMemAccessWord },
+ { 4u, 1u, 4u, false, kDexMemAccessShort },
+ { 5u, 1u, 5u, false, kDexMemAccessChar },
+ { 6u, 0u, 0u, false, kDexMemAccessShort }, // Unresolved.
+ { 7u, 1u, 7u, false, kDexMemAccessWord },
+ { 8u, 1u, 8u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -1078,18 +1087,18 @@
TEST_F(GlobalValueNumberingTestLoop, NonAliasingIFields) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
- { 3u, 1u, 3u, false }, // Int.
- { 4u, 1u, 4u, false }, // Int.
- { 5u, 1u, 5u, false }, // Short.
- { 6u, 1u, 6u, false }, // Char.
- { 7u, 0u, 0u, false }, // Unresolved, Short.
- { 8u, 1u, 8u, false }, // Int.
- { 9u, 0u, 0u, false }, // Unresolved, Int.
- { 10u, 1u, 10u, false }, // Int.
- { 11u, 1u, 11u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ { 3u, 1u, 3u, false, kDexMemAccessWord },
+ { 4u, 1u, 4u, false, kDexMemAccessWord },
+ { 5u, 1u, 5u, false, kDexMemAccessShort },
+ { 6u, 1u, 6u, false, kDexMemAccessChar },
+ { 7u, 0u, 0u, false, kDexMemAccessShort }, // Unresolved.
+ { 8u, 1u, 8u, false, kDexMemAccessWord },
+ { 9u, 0u, 0u, false, kDexMemAccessWord }, // Unresolved.
+ { 10u, 1u, 10u, false, kDexMemAccessWord },
+ { 11u, 1u, 11u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -1201,14 +1210,14 @@
TEST_F(GlobalValueNumberingTestLoop, AliasingIFieldsSingleObject) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
- { 3u, 1u, 3u, false }, // Int.
- { 4u, 1u, 4u, false }, // Int.
- { 5u, 1u, 5u, false }, // Short.
- { 6u, 1u, 6u, false }, // Char.
- { 7u, 0u, 0u, false }, // Unresolved, Short.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ { 3u, 1u, 3u, false, kDexMemAccessWord },
+ { 4u, 1u, 4u, false, kDexMemAccessWord },
+ { 5u, 1u, 5u, false, kDexMemAccessShort },
+ { 6u, 1u, 6u, false, kDexMemAccessChar },
+ { 7u, 0u, 0u, false, kDexMemAccessShort }, // Unresolved.
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -1272,14 +1281,14 @@
TEST_F(GlobalValueNumberingTestLoop, AliasingIFieldsTwoObjects) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
- { 3u, 1u, 3u, false }, // Short.
- { 4u, 1u, 4u, false }, // Char.
- { 5u, 0u, 0u, false }, // Unresolved, Short.
- { 6u, 1u, 6u, false }, // Int.
- { 7u, 1u, 7u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
+ { 3u, 1u, 3u, false, kDexMemAccessShort },
+ { 4u, 1u, 4u, false, kDexMemAccessChar },
+ { 5u, 0u, 0u, false, kDexMemAccessShort }, // Unresolved.
+ { 6u, 1u, 6u, false, kDexMemAccessWord },
+ { 7u, 1u, 7u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -1341,7 +1350,7 @@
TEST_F(GlobalValueNumberingTestLoop, IFieldToBaseDependency) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// For the IGET that loads sreg 3u using base 2u, the following IPUT creates a dependency
@@ -1366,9 +1375,9 @@
TEST_F(GlobalValueNumberingTestLoop, SFields) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
- { 2u, 1u, 2u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -1562,8 +1571,8 @@
TEST_F(GlobalValueNumberingTestCatch, IFields) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false },
- { 1u, 1u, 1u, false },
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 200u),
@@ -1608,8 +1617,8 @@
TEST_F(GlobalValueNumberingTestCatch, SFields) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, false },
- { 1u, 1u, 1u, false },
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_SGET(3, Instruction::SGET, 0u, 0u),
@@ -1731,8 +1740,8 @@
TEST_F(GlobalValueNumberingTest, NullCheckIFields) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Object.
- { 1u, 1u, 1u, false }, // Object.
+ { 0u, 1u, 0u, false, kDexMemAccessObject }, // Object.
+ { 1u, 1u, 1u, false, kDexMemAccessObject }, // Object.
};
static const BBDef bbs[] = {
DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
@@ -1780,8 +1789,8 @@
TEST_F(GlobalValueNumberingTest, NullCheckSFields) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, false }, // Object.
- { 1u, 1u, 1u, false }, // Object.
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
+ { 1u, 1u, 1u, false, kDexMemAccessObject },
};
static const BBDef bbs[] = {
DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
@@ -1907,12 +1916,12 @@
TEST_F(GlobalValueNumberingTestDiamond, MergeSameValueInDifferentMemoryLocations) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, false }, // Int.
- { 1u, 1u, 1u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessWord },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 100u),
@@ -1977,7 +1986,7 @@
// LVN's aliasing_array_value_map_'s load_value_map for BBs #9, #4, #5, #7 because of the
// DFS ordering of LVN evaluation.
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Object.
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
};
static const BBDef bbs[] = {
DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
@@ -2015,7 +2024,7 @@
TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, IFieldAndPhi) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
};
static const MIRDef mirs[] = {
DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
@@ -2052,10 +2061,10 @@
TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, NullCheck) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
};
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
};
static const MIRDef mirs[] = {
DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
@@ -2143,7 +2152,7 @@
TEST_F(GlobalValueNumberingTestTwoNestedLoops, IFieldAndPhi) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, false }, // Int.
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
};
static const MIRDef mirs[] = {
DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index c1ce2ac..90b91bc 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -56,7 +56,7 @@
public:
static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
uint16_t field_id) {
- uint16_t type = gvn->GetFieldType(field_id);
+ uint16_t type = gvn->GetIFieldType(field_id);
return gvn->LookupValue(kAliasingIFieldStartVersionOp, field_id,
lvn->global_memory_version_, lvn->unresolved_ifield_version_[type]);
}
@@ -75,7 +75,7 @@
static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
uint16_t field_id, uint16_t base) {
// If the base/field_id is non-aliasing in lvn, use the non-aliasing value.
- uint16_t type = gvn->GetFieldType(field_id);
+ uint16_t type = gvn->GetIFieldType(field_id);
if (lvn->IsNonAliasingIField(base, field_id, type)) {
uint16_t loc = gvn->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
auto lb = lvn->non_aliasing_ifield_value_map_.find(loc);
@@ -89,7 +89,7 @@
static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
uint16_t field_id) {
- uint16_t type = gvn->GetFieldType(field_id);
+ uint16_t type = gvn->GetIFieldType(field_id);
return lvn->unresolved_ifield_version_[type] == lvn->merge_new_memory_version_ ||
lvn->global_memory_version_ == lvn->merge_new_memory_version_;
}
@@ -711,7 +711,7 @@
if (it != lvn->sfield_value_map_.end()) {
value_name = it->second;
} else {
- uint16_t type = gvn_->GetFieldType(field_id);
+ uint16_t type = gvn_->GetSFieldType(field_id);
value_name = gvn_->LookupValue(kResolvedSFieldOp, field_id,
lvn->unresolved_sfield_version_[type],
lvn->global_memory_version_);
@@ -1150,12 +1150,11 @@
}
uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
- // uint16_t type = opcode - Instruction::AGET;
uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, array);
uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
HandleRangeCheck(mir, array, index);
- uint16_t type = opcode - Instruction::AGET;
+ uint16_t type = AGetMemAccessType(static_cast<Instruction::Code>(opcode));
// Establish value number for loaded register.
uint16_t res;
if (IsNonAliasingArray(array, type)) {
@@ -1182,7 +1181,7 @@
uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
HandleRangeCheck(mir, array, index);
- uint16_t type = opcode - Instruction::APUT;
+ uint16_t type = APutMemAccessType(static_cast<Instruction::Code>(opcode));
uint16_t value = (opcode == Instruction::APUT_WIDE)
? GetOperandValueWide(mir->ssa_rep->uses[0])
: GetOperandValue(mir->ssa_rep->uses[0]);
@@ -1224,8 +1223,8 @@
// Use result s_reg - will be unique.
res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
} else {
- uint16_t type = opcode - Instruction::IGET;
- uint16_t field_id = gvn_->GetFieldId(field_info, type);
+ uint16_t type = IGetMemAccessType(static_cast<Instruction::Code>(opcode));
+ uint16_t field_id = gvn_->GetIFieldId(mir);
if (IsNonAliasingIField(base, field_id, type)) {
uint16_t loc = gvn_->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
auto lb = non_aliasing_ifield_value_map_.lower_bound(loc);
@@ -1249,10 +1248,10 @@
}
void LocalValueNumbering::HandleIPut(MIR* mir, uint16_t opcode) {
- uint16_t type = opcode - Instruction::IPUT;
int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
HandleNullCheck(mir, base);
+ uint16_t type = IPutMemAccessType(static_cast<Instruction::Code>(opcode));
const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
if (!field_info.IsResolved()) {
// Unresolved fields always alias with everything of the same type.
@@ -1272,7 +1271,7 @@
// Aliasing fields of the same type may have been overwritten.
auto it = aliasing_ifield_value_map_.begin(), end = aliasing_ifield_value_map_.end();
while (it != end) {
- if (gvn_->GetFieldType(it->first) != type) {
+ if (gvn_->GetIFieldType(it->first) != type) {
++it;
} else {
it = aliasing_ifield_value_map_.erase(it);
@@ -1282,7 +1281,7 @@
// Nothing to do, resolved volatile fields always get a new memory version anyway and
// can't alias with resolved non-volatile fields.
} else {
- uint16_t field_id = gvn_->GetFieldId(field_info, type);
+ uint16_t field_id = gvn_->GetIFieldId(mir);
uint16_t value = (opcode == Instruction::IPUT_WIDE)
? GetOperandValueWide(mir->ssa_rep->uses[0])
: GetOperandValue(mir->ssa_rep->uses[0]);
@@ -1333,8 +1332,8 @@
// Use result s_reg - will be unique.
res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
} else {
- uint16_t type = opcode - Instruction::SGET;
- uint16_t field_id = gvn_->GetFieldId(field_info, type);
+ uint16_t type = SGetMemAccessType(static_cast<Instruction::Code>(opcode));
+ uint16_t field_id = gvn_->GetSFieldId(mir);
auto lb = sfield_value_map_.lower_bound(field_id);
if (lb != sfield_value_map_.end() && lb->first == field_id) {
res = lb->second;
@@ -1362,7 +1361,7 @@
// Class initialization can call arbitrary functions, we need to wipe aliasing values.
HandleInvokeOrClInitOrAcquireOp(mir);
}
- uint16_t type = opcode - Instruction::SPUT;
+ uint16_t type = SPutMemAccessType(static_cast<Instruction::Code>(opcode));
if (!field_info.IsResolved()) {
// Unresolved fields always alias with everything of the same type.
// Use mir->offset as modifier; without elaborate inlining, it will be unique.
@@ -1373,7 +1372,7 @@
// Nothing to do, resolved volatile fields always get a new memory version anyway and
// can't alias with resolved non-volatile fields.
} else {
- uint16_t field_id = gvn_->GetFieldId(field_info, type);
+ uint16_t field_id = gvn_->GetSFieldId(mir);
uint16_t value = (opcode == Instruction::SPUT_WIDE)
? GetOperandValueWide(mir->ssa_rep->uses[0])
: GetOperandValue(mir->ssa_rep->uses[0]);
@@ -1397,7 +1396,7 @@
void LocalValueNumbering::RemoveSFieldsForType(uint16_t type) {
// Erase all static fields of this type from the sfield_value_map_.
for (auto it = sfield_value_map_.begin(), end = sfield_value_map_.end(); it != end; ) {
- if (gvn_->GetFieldType(it->first) == type) {
+ if (gvn_->GetSFieldType(it->first) == type) {
it = sfield_value_map_.erase(it);
} else {
++it;
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 824c323..51aa9d9 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -15,6 +15,7 @@
*/
#include "compiler_internals.h"
+#include "dex/mir_field_info.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
#include "gtest/gtest.h"
@@ -28,6 +29,7 @@
uintptr_t declaring_dex_file;
uint16_t declaring_field_idx;
bool is_volatile;
+ DexMemAccessType type;
};
struct SFieldDef {
@@ -35,6 +37,7 @@
uintptr_t declaring_dex_file;
uint16_t declaring_field_idx;
bool is_volatile;
+ DexMemAccessType type;
};
struct MIRDef {
@@ -90,12 +93,11 @@
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
- field_info.flags_ = 0u | // Without kFlagIsStatic.
- (def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
+ field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
}
cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
}
@@ -111,15 +113,14 @@
cu_.mir_graph->sfield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const SFieldDef* def = &defs[i];
- MirSFieldLoweringInfo field_info(def->field_idx);
+ MirSFieldLoweringInfo field_info(def->field_idx, def->type);
// Mark even unresolved fields as initialized.
- field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
- MirSFieldLoweringInfo::kFlagClassIsInitialized;
+ field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
// NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by LVN.
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
- field_info.flags_ |= (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
+ field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
}
cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
}
@@ -140,12 +141,16 @@
mir->dalvikInsn.opcode = def->opcode;
mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
mir->dalvikInsn.vB_wide = def->value;
- if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+ if (IsInstructionIGetOrIPut(def->opcode)) {
ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
mir->meta.ifield_lowering_info = def->field_info;
- } else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
+ ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
+ IGetOrIPutMemAccessType(def->opcode));
+ } else if (IsInstructionSGetOrSPut(def->opcode)) {
ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
mir->meta.sfield_lowering_info = def->field_info;
+ ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
+ SGetOrSPutMemAccessType(def->opcode));
}
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
@@ -177,6 +182,13 @@
}
void PerformLVN() {
+ cu_.mir_graph->temp_.gvn.ifield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
+ cu_.mir_graph->temp_.gvn.sfield_ids_ = GlobalValueNumbering::PrepareGvnFieldIds(
+ allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
+ gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
+ GlobalValueNumbering::kModeLvn));
+ lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
value_names_.resize(mir_count_);
for (size_t i = 0; i != mir_count_; ++i) {
value_names_[i] = lvn_->GetValueNumber(&mirs_[i]);
@@ -196,9 +208,6 @@
value_names_() {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
- gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
- GlobalValueNumbering::kModeLvn));
- lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
}
ArenaPool pool_;
@@ -214,7 +223,7 @@
TEST_F(LocalValueNumberingTest, IGetIGetInvokeIGet) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
@@ -237,8 +246,8 @@
TEST_F(LocalValueNumberingTest, IGetIPutIGetIGetIGet) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
- { 2u, 1u, 2u, false },
+ { 1u, 1u, 1u, false, kDexMemAccessObject },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_IGET(Instruction::IGET_OBJECT, 0u, 10u, 0u),
@@ -262,7 +271,7 @@
TEST_F(LocalValueNumberingTest, UniquePreserve1) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
@@ -284,7 +293,7 @@
TEST_F(LocalValueNumberingTest, UniquePreserve2) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 11u),
@@ -306,7 +315,7 @@
TEST_F(LocalValueNumberingTest, UniquePreserveAndEscape) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
@@ -331,8 +340,8 @@
TEST_F(LocalValueNumberingTest, Volatile) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
- { 2u, 1u, 2u, true },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, true, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_IGET(Instruction::IGET, 0u, 10u, 1u), // Volatile.
@@ -358,9 +367,9 @@
TEST_F(LocalValueNumberingTest, UnresolvedIField) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false }, // Resolved field #1.
- { 2u, 1u, 2u, false }, // Resolved field #2.
- { 3u, 0u, 0u, false }, // Unresolved field.
+ { 1u, 1u, 1u, false, kDexMemAccessWord }, // Resolved field #1.
+ { 2u, 1u, 2u, false, kDexMemAccessWide }, // Resolved field #2.
+ { 3u, 0u, 0u, false, kDexMemAccessWord }, // Unresolved field.
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
@@ -407,9 +416,9 @@
TEST_F(LocalValueNumberingTest, UnresolvedSField) {
static const SFieldDef sfields[] = {
- { 1u, 1u, 1u, false }, // Resolved field #1.
- { 2u, 1u, 2u, false }, // Resolved field #2.
- { 3u, 0u, 0u, false }, // Unresolved field.
+ { 1u, 1u, 1u, false, kDexMemAccessWord }, // Resolved field #1.
+ { 2u, 1u, 2u, false, kDexMemAccessWide }, // Resolved field #2.
+ { 3u, 0u, 0u, false, kDexMemAccessWord }, // Unresolved field.
};
static const MIRDef mirs[] = {
DEF_SGET(Instruction::SGET, 0u, 0u), // Resolved field #1.
@@ -438,11 +447,11 @@
TEST_F(LocalValueNumberingTest, UninitializedSField) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false }, // Resolved field #1.
+ { 1u, 1u, 1u, false, kDexMemAccessWord }, // Resolved field #1.
};
static const SFieldDef sfields[] = {
- { 1u, 1u, 1u, false }, // Resolved field #1.
- { 2u, 1u, 2u, false }, // Resolved field #2; uninitialized.
+ { 1u, 1u, 1u, false, kDexMemAccessWord }, // Resolved field #1.
+ { 2u, 1u, 2u, false, kDexMemAccessWord }, // Resolved field #2; uninitialized.
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 200u),
@@ -487,11 +496,11 @@
TEST_F(LocalValueNumberingTest, SameValueInDifferentMemoryLocations) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
- { 2u, 1u, 2u, false },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
};
static const SFieldDef sfields[] = {
- { 3u, 1u, 3u, false },
+ { 3u, 1u, 3u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 201u),
@@ -551,12 +560,12 @@
TEST_F(LocalValueNumberingTest, EscapingRefs) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false }, // Field #1.
- { 2u, 1u, 2u, false }, // Field #2.
- { 3u, 1u, 3u, false }, // Reference field for storing escaping refs.
- { 4u, 1u, 4u, false }, // Wide.
- { 5u, 0u, 0u, false }, // Unresolved field, int.
- { 6u, 0u, 0u, false }, // Unresolved field, wide.
+ { 1u, 1u, 1u, false, kDexMemAccessWord }, // Field #1.
+ { 2u, 1u, 2u, false, kDexMemAccessWord }, // Field #2.
+ { 3u, 1u, 3u, false, kDexMemAccessObject }, // For storing escaping refs.
+ { 4u, 1u, 4u, false, kDexMemAccessWide }, // Wide.
+ { 5u, 0u, 0u, false, kDexMemAccessWord }, // Unresolved field, int.
+ { 6u, 0u, 0u, false, kDexMemAccessWide }, // Unresolved field, wide.
};
static const MIRDef mirs[] = {
DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
@@ -634,11 +643,11 @@
TEST_F(LocalValueNumberingTest, StoringSameValueKeepsMemoryVersion) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false },
- { 2u, 1u, 2u, false },
+ { 1u, 1u, 1u, false, kDexMemAccessWord },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
};
static const SFieldDef sfields[] = {
- { 2u, 1u, 2u, false },
+ { 2u, 1u, 2u, false, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_IGET(Instruction::IGET, 0u, 30u, 0u),
@@ -716,8 +725,8 @@
TEST_F(LocalValueNumberingTest, ClInitOnSget) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, false },
- { 1u, 2u, 1u, false },
+ { 0u, 1u, 0u, false, kDexMemAccessObject },
+ { 1u, 2u, 1u, false, kDexMemAccessObject },
};
static const MIRDef mirs[] = {
DEF_SGET(Instruction::SGET_OBJECT, 0u, 0u),
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 44f69ba..7b53b14 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -21,6 +21,7 @@
#include "dataflow_iterator-inl.h"
#include "dex_instruction.h"
#include "dex_instruction-inl.h"
+#include "dex/mir_field_info.h"
#include "dex/verified_method.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
@@ -1204,6 +1205,8 @@
ScopedArenaAllocator allocator(&cu_->arena_stack);
uint16_t* field_idxs =
reinterpret_cast<uint16_t*>(allocator.Alloc(max_refs * sizeof(uint16_t), kArenaAllocMisc));
+ DexMemAccessType* field_types = reinterpret_cast<DexMemAccessType*>(
+ allocator.Alloc(max_refs * sizeof(DexMemAccessType), kArenaAllocMisc));
// Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
size_t ifield_pos = 0u;
@@ -1214,38 +1217,41 @@
continue;
}
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- if (mir->dalvikInsn.opcode >= Instruction::IGET &&
- mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
- // Get field index and try to find it among existing indexes. If found, it's usually among
- // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
- // is a linear search, it actually performs much better than map based approach.
- if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
- uint16_t field_idx = mir->dalvikInsn.vC;
- size_t i = ifield_pos;
- while (i != 0u && field_idxs[i - 1] != field_idx) {
- --i;
- }
- if (i != 0u) {
- mir->meta.ifield_lowering_info = i - 1;
- } else {
- mir->meta.ifield_lowering_info = ifield_pos;
- field_idxs[ifield_pos++] = field_idx;
- }
- } else {
- uint16_t field_idx = mir->dalvikInsn.vB;
- size_t i = sfield_pos;
- while (i != max_refs && field_idxs[i] != field_idx) {
- ++i;
- }
- if (i != max_refs) {
- mir->meta.sfield_lowering_info = max_refs - i - 1u;
- } else {
- mir->meta.sfield_lowering_info = max_refs - sfield_pos;
- field_idxs[--sfield_pos] = field_idx;
- }
+ // Get field index and try to find it among existing indexes. If found, it's usually among
+ // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
+ // is a linear search, it actually performs much better than map based approach.
+ if (IsInstructionIGetOrIPut(mir->dalvikInsn.opcode)) {
+ uint16_t field_idx = mir->dalvikInsn.vC;
+ size_t i = ifield_pos;
+ while (i != 0u && field_idxs[i - 1] != field_idx) {
+ --i;
}
- DCHECK_LE(ifield_pos, sfield_pos);
+ if (i != 0u) {
+ mir->meta.ifield_lowering_info = i - 1;
+ DCHECK_EQ(field_types[i - 1], IGetOrIPutMemAccessType(mir->dalvikInsn.opcode));
+ } else {
+ mir->meta.ifield_lowering_info = ifield_pos;
+ field_idxs[ifield_pos] = field_idx;
+ field_types[ifield_pos] = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
+ ++ifield_pos;
+ }
+ } else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
+ uint16_t field_idx = mir->dalvikInsn.vB;
+ size_t i = sfield_pos;
+ while (i != max_refs && field_idxs[i] != field_idx) {
+ ++i;
+ }
+ if (i != max_refs) {
+ mir->meta.sfield_lowering_info = max_refs - i - 1u;
+ DCHECK_EQ(field_types[i], SGetOrSPutMemAccessType(mir->dalvikInsn.opcode));
+ } else {
+ mir->meta.sfield_lowering_info = max_refs - sfield_pos;
+ --sfield_pos;
+ field_idxs[sfield_pos] = field_idx;
+ field_types[sfield_pos] = SGetOrSPutMemAccessType(mir->dalvikInsn.opcode);
+ }
}
+ DCHECK_LE(ifield_pos, sfield_pos);
}
}
@@ -1254,7 +1260,7 @@
DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
ifield_lowering_infos_.reserve(ifield_pos);
for (size_t pos = 0u; pos != ifield_pos; ++pos) {
- ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos]));
+ ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos], field_types[pos]));
}
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
ifield_lowering_infos_.data(), ifield_pos);
@@ -1266,7 +1272,7 @@
sfield_lowering_infos_.reserve(max_refs - sfield_pos);
for (size_t pos = max_refs; pos != sfield_pos;) {
--pos;
- sfield_lowering_infos_.push_back(MirSFieldLoweringInfo(field_idxs[pos]));
+ sfield_lowering_infos_.push_back(MirSFieldLoweringInfo(field_idxs[pos], field_types[pos]));
}
MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
sfield_lowering_infos_.data(), max_refs - sfield_pos);
@@ -1329,19 +1335,10 @@
continue;
}
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- if (mir->dalvikInsn.opcode >= Instruction::INVOKE_VIRTUAL &&
- mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
- mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
+ if (IsInstructionInvoke(mir->dalvikInsn.opcode)) {
// Decode target method index and invoke type.
- uint16_t target_method_idx;
- uint16_t invoke_type_idx;
- if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
- target_method_idx = mir->dalvikInsn.vB;
- invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
- } else {
- target_method_idx = mir->dalvikInsn.vB;
- invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
- }
+ uint16_t target_method_idx = mir->dalvikInsn.vB;
+ DexInvokeType invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
// Find devirtualization target.
// TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 1db3b5b..53afcad 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -35,7 +35,7 @@
DCHECK(field_infos != nullptr);
DCHECK_NE(count, 0u);
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
- MirIFieldLoweringInfo unresolved(it->field_idx_);
+ MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType());
DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
}
}
@@ -66,6 +66,7 @@
std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx);
it->flags_ = 0u | // Without kFlagIsStatic.
+ (it->flags_ & (kMemAccessTypeMask << kBitMemAccessTypeBegin)) |
(is_volatile ? kFlagIsVolatile : 0u) |
(fast_path.first ? kFlagFastGet : 0u) |
(fast_path.second ? kFlagFastPut : 0u);
@@ -79,7 +80,7 @@
DCHECK(field_infos != nullptr);
DCHECK_NE(count, 0u);
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
- MirSFieldLoweringInfo unresolved(it->field_idx_);
+ MirSFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType());
// In 64-bit builds, there's padding after storage_index_, don't include it in memcmp.
size_t size = OFFSETOF_MEMBER(MirSFieldLoweringInfo, storage_index_) +
sizeof(it->storage_index_);
@@ -114,6 +115,7 @@
std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
dex_cache.Get(), referrer_class, resolved_field, field_idx, &it->storage_index_);
uint16_t flags = kFlagIsStatic |
+ (it->flags_ & (kMemAccessTypeMask << kBitMemAccessTypeBegin)) |
(is_volatile ? kFlagIsVolatile : 0u) |
(fast_path.first ? kFlagFastGet : 0u) |
(fast_path.second ? kFlagFastPut : 0u);
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index e97f7a0..ff427f8 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -20,6 +20,7 @@
#include "base/macros.h"
#include "dex_file.h"
#include "offsets.h"
+#include "utils/dex_instruction_utils.h"
namespace art {
@@ -63,18 +64,27 @@
return (flags_ & kFlagIsVolatile) != 0u;
}
+ DexMemAccessType MemAccessType() const {
+ return static_cast<DexMemAccessType>((flags_ >> kBitMemAccessTypeBegin) & kMemAccessTypeMask);
+ }
+
protected:
enum {
kBitIsStatic = 0,
kBitIsVolatile,
- kFieldInfoBitEnd
+ kBitMemAccessTypeBegin,
+ kBitMemAccessTypeEnd = kBitMemAccessTypeBegin + 3, // 3 bits for raw type.
+ kFieldInfoBitEnd = kBitMemAccessTypeEnd
};
static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+ static constexpr uint16_t kMemAccessTypeMask = 7u;
+ static_assert((1u << (kBitMemAccessTypeEnd - kBitMemAccessTypeBegin)) - 1u == kMemAccessTypeMask,
+ "Invalid raw type mask");
- MirFieldInfo(uint16_t field_idx, uint16_t flags)
+ MirFieldInfo(uint16_t field_idx, uint16_t flags, DexMemAccessType type)
: field_idx_(field_idx),
- flags_(flags),
+ flags_(flags | static_cast<uint16_t>(type) << kBitMemAccessTypeBegin),
declaring_field_idx_(0u),
declaring_class_idx_(0u),
declaring_dex_file_(nullptr) {
@@ -107,8 +117,8 @@
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Construct an unresolved instance field lowering info.
- explicit MirIFieldLoweringInfo(uint16_t field_idx)
- : MirFieldInfo(field_idx, kFlagIsVolatile), // Without kFlagIsStatic.
+ explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
+ : MirFieldInfo(field_idx, kFlagIsVolatile, type), // Without kFlagIsStatic.
field_offset_(0u) {
}
@@ -155,8 +165,8 @@
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Construct an unresolved static field lowering info.
- explicit MirSFieldLoweringInfo(uint16_t field_idx)
- : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic),
+ explicit MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
+ : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic, type),
field_offset_(0u),
storage_index_(DexFile::kDexNoIndex) {
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d77ad6f..63b1f2d 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -661,13 +661,29 @@
void DoCacheFieldLoweringInfo();
const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) const {
- DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.size());
- return ifield_lowering_infos_[mir->meta.ifield_lowering_info];
+ return GetIFieldLoweringInfo(mir->meta.ifield_lowering_info);
+ }
+
+ const MirIFieldLoweringInfo& GetIFieldLoweringInfo(uint32_t lowering_info) const {
+ DCHECK_LT(lowering_info, ifield_lowering_infos_.size());
+ return ifield_lowering_infos_[lowering_info];
+ }
+
+ size_t GetIFieldLoweringInfoCount() const {
+ return ifield_lowering_infos_.size();
}
const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) const {
- DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.size());
- return sfield_lowering_infos_[mir->meta.sfield_lowering_info];
+ return GetSFieldLoweringInfo(mir->meta.sfield_lowering_info);
+ }
+
+ const MirSFieldLoweringInfo& GetSFieldLoweringInfo(uint32_t lowering_info) const {
+ DCHECK_LT(lowering_info, sfield_lowering_infos_.size());
+ return sfield_lowering_infos_[lowering_info];
+ }
+
+ size_t GetSFieldLoweringInfoCount() const {
+ return sfield_lowering_infos_.size();
}
void DoCacheMethodLoweringInfo();
@@ -1035,6 +1051,21 @@
bool ApplyGlobalValueNumberingGate();
bool ApplyGlobalValueNumbering(BasicBlock* bb);
void ApplyGlobalValueNumberingEnd();
+
+ uint16_t GetGvnIFieldId(MIR* mir) const {
+ DCHECK(IsInstructionIGetOrIPut(mir->dalvikInsn.opcode));
+ DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.size());
+ DCHECK(temp_.gvn.ifield_ids_ != nullptr);
+ return temp_.gvn.ifield_ids_[mir->meta.ifield_lowering_info];
+ }
+
+ uint16_t GetGvnSFieldId(MIR* mir) const {
+ DCHECK(IsInstructionSGetOrSPut(mir->dalvikInsn.opcode));
+ DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.size());
+ DCHECK(temp_.gvn.sfield_ids_ != nullptr);
+ return temp_.gvn.sfield_ids_[mir->meta.sfield_lowering_info];
+ }
+
/*
* Type inference handling helpers. Because Dalvik's bytecode is not fully typed,
* we have to do some work to figure out the sreg type. For some operations it is
@@ -1300,6 +1331,8 @@
// Global value numbering.
struct {
GlobalValueNumbering* gvn;
+ uint16_t* ifield_ids_; // Part of GVN/LVN but cached here for LVN to avoid recalculation.
+ uint16_t* sfield_ids_; // Ditto.
} gvn;
} temp_;
static const int kInvalidEntry = -1;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index bb7ee89..9d52807 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -19,6 +19,7 @@
#include "dataflow_iterator-inl.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
+#include "mir_field_info.h"
#include "quick/dex_file_method_inliner.h"
#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
@@ -217,10 +218,6 @@
static_assert(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
"if_ccz_ccodes_size1");
-static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
- return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
-}
-
static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
}
@@ -1163,8 +1160,7 @@
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (bb->block_type == kDalvikByteCode) {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- if (mir->dalvikInsn.opcode >= Instruction::SGET &&
- mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
if (!field_info.IsReferrersClass()) {
DCHECK_LT(class_to_index_map.size(), 0xffffu);
@@ -1180,8 +1176,7 @@
// Using offset/2 for index into temp_.cice.indexes.
temp_.cice.indexes[mir->offset / 2u] = index;
}
- } else if (mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
- mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE) {
+ } else if (IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
DCHECK(method_info.IsStatic());
if (method_info.FastPath() && !method_info.IsReferrersClass()) {
@@ -1265,12 +1260,10 @@
// NOTE: index != 0xffff does not guarantee that this is an SGET/SPUT/INVOKE_STATIC.
// Dex instructions with width 1 can have the same offset/2.
- if (mir->dalvikInsn.opcode >= Instruction::SGET &&
- mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
check_initialization = true;
check_dex_cache = true;
- } else if (mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
- mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE) {
+ } else if (IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
check_initialization = true;
// NOTE: INVOKE_STATIC doesn't guarantee that the type will be in the dex cache.
}
@@ -1337,6 +1330,10 @@
DCHECK(temp_scoped_alloc_ == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+ temp_.gvn.ifield_ids_ =
+ GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
+ temp_.gvn.sfield_ids_ =
+ GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
DCHECK(temp_.gvn.gvn == nullptr);
temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
cu_, temp_scoped_alloc_.get(), GlobalValueNumbering::kModeGvn);
@@ -1382,6 +1379,8 @@
delete temp_.gvn.gvn;
temp_.gvn.gvn = nullptr;
+ temp_.gvn.ifield_ids_ = nullptr;
+ temp_.gvn.sfield_ids_ = nullptr;
DCHECK(temp_scoped_alloc_ != nullptr);
temp_scoped_alloc_.reset();
}
@@ -1400,7 +1399,8 @@
cu_, cu_->class_loader, cu_->class_linker, *target.dex_file,
nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
0u /* access_flags not used */, nullptr /* verified_method not used */);
- MirIFieldLoweringInfo inlined_field_info(field_idx);
+ DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode);
+ MirIFieldLoweringInfo inlined_field_info(field_idx, type);
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
DCHECK(inlined_field_info.IsResolved());
@@ -1548,6 +1548,14 @@
}
void MIRGraph::BasicBlockOptimization() {
+ if ((cu_->disable_opt & (1 << kLocalValueNumbering)) == 0) {
+ temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+ temp_.gvn.ifield_ids_ =
+ GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
+ temp_.gvn.sfield_ids_ =
+ GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
+ }
+
if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
ClearAllVisitedFlags();
PreOrderDfsIterator iter2(this);
@@ -1564,6 +1572,11 @@
BasicBlockOpt(bb);
}
}
+
+ // Clean up after LVN.
+ temp_.gvn.ifield_ids_ = nullptr;
+ temp_.gvn.sfield_ids_ = nullptr;
+ temp_scoped_alloc_.reset();
}
} // namespace art
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 8874faf..c794cc6 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -19,6 +19,7 @@
#include "compiler_internals.h"
#include "dataflow_iterator.h"
#include "dataflow_iterator-inl.h"
+#include "dex/mir_field_info.h"
#include "gtest/gtest.h"
namespace art {
@@ -236,15 +237,17 @@
ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
bb->AppendMIR(mir);
- if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
- ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.size());
- mir->meta.sfield_lowering_info = def->field_or_method_info;
- } else if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+ if (IsInstructionIGetOrIPut(def->opcode)) {
ASSERT_LT(def->field_or_method_info, cu_.mir_graph->ifield_lowering_infos_.size());
mir->meta.ifield_lowering_info = def->field_or_method_info;
- } else if (def->opcode >= Instruction::INVOKE_VIRTUAL &&
- def->opcode < Instruction::INVOKE_INTERFACE_RANGE &&
- def->opcode != Instruction::RETURN_VOID_BARRIER) {
+ ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_or_method_info].MemAccessType(),
+ IGetOrIPutMemAccessType(def->opcode));
+ } else if (IsInstructionSGetOrSPut(def->opcode)) {
+ ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.size());
+ mir->meta.sfield_lowering_info = def->field_or_method_info;
+ ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_or_method_info].MemAccessType(),
+ SGetOrSPutMemAccessType(def->opcode));
+ } else if (IsInstructionInvoke(def->opcode)) {
ASSERT_LT(def->field_or_method_info, cu_.mir_graph->method_lowering_infos_.size());
mir->meta.method_lowering_info = def->field_or_method_info;
}
@@ -294,6 +297,7 @@
uintptr_t declaring_dex_file;
uint16_t declaring_class_idx;
uint16_t declaring_field_idx;
+ DexMemAccessType type;
};
void DoPrepareSFields(const SFieldDef* defs, size_t count) {
@@ -301,12 +305,12 @@
cu_.mir_graph->sfield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const SFieldDef* def = &defs[i];
- MirSFieldLoweringInfo field_info(def->field_idx);
+ MirSFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_class_idx_ = def->declaring_class_idx;
field_info.declaring_field_idx_ = def->declaring_field_idx;
- field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic;
+ // We don't care about the volatile flag in these tests.
}
ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
ASSERT_FALSE(field_info.IsClassInitialized());
@@ -343,6 +347,7 @@
uintptr_t declaring_dex_file;
uint16_t declaring_class_idx;
uint16_t declaring_field_idx;
+ DexMemAccessType type;
};
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
@@ -350,11 +355,12 @@
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_class_idx_ = def->declaring_class_idx;
field_info.declaring_field_idx_ = def->declaring_field_idx;
+ // We don't care about the volatile flag in these tests.
}
ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
@@ -393,12 +399,12 @@
TEST_F(ClassInitCheckEliminationTest, SingleBlock) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
- { 2u, 1u, 2u, 2u },
- { 3u, 1u, 3u, 3u }, // Same declaring class as sfield[4].
- { 4u, 1u, 3u, 4u }, // Same declaring class as sfield[3].
- { 5u, 0u, 0u, 0u }, // Unresolved.
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 2u, 2u, kDexMemAccessWord },
+ { 3u, 1u, 3u, 3u, kDexMemAccessWord }, // Same declaring class as sfield[4].
+ { 4u, 1u, 3u, 4u, kDexMemAccessWord }, // Same declaring class as sfield[3].
+ { 5u, 0u, 0u, 0u, kDexMemAccessWord }, // Unresolved.
};
static const MIRDef mirs[] = {
DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 5u), // Unresolved.
@@ -432,9 +438,9 @@
TEST_F(ClassInitCheckEliminationTest, SingleBlockWithInvokes) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
- { 2u, 1u, 2u, 2u },
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 2u, 2u, kDexMemAccessWord },
};
static const MethodDef methods[] = {
{ 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
@@ -473,17 +479,17 @@
TEST_F(ClassInitCheckEliminationTest, Diamond) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
- { 2u, 1u, 2u, 2u },
- { 3u, 1u, 3u, 3u },
- { 4u, 1u, 4u, 4u },
- { 5u, 1u, 5u, 5u },
- { 6u, 1u, 6u, 6u },
- { 7u, 1u, 7u, 7u },
- { 8u, 1u, 8u, 8u }, // Same declaring class as sfield[9].
- { 9u, 1u, 8u, 9u }, // Same declaring class as sfield[8].
- { 10u, 0u, 0u, 0u }, // Unresolved.
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 2u, 2u, kDexMemAccessWord },
+ { 3u, 1u, 3u, 3u, kDexMemAccessWord },
+ { 4u, 1u, 4u, 4u, kDexMemAccessWord },
+ { 5u, 1u, 5u, 5u, kDexMemAccessWord },
+ { 6u, 1u, 6u, 6u, kDexMemAccessWord },
+ { 7u, 1u, 7u, 7u, kDexMemAccessWord },
+ { 8u, 1u, 8u, 8u, kDexMemAccessWord }, // Same declaring class as sfield[9].
+ { 9u, 1u, 8u, 9u, kDexMemAccessWord }, // Same declaring class as sfield[8].
+ { 10u, 0u, 0u, 0u, kDexMemAccessWord }, // Unresolved.
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -539,11 +545,11 @@
TEST_F(ClassInitCheckEliminationTest, DiamondWithInvokes) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
- { 2u, 1u, 2u, 2u },
- { 3u, 1u, 3u, 3u },
- { 4u, 1u, 4u, 4u },
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 2u, 2u, kDexMemAccessWord },
+ { 3u, 1u, 3u, 3u, kDexMemAccessWord },
+ { 4u, 1u, 4u, 4u, kDexMemAccessWord },
};
static const MethodDef methods[] = {
{ 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
@@ -600,9 +606,9 @@
TEST_F(ClassInitCheckEliminationTest, Loop) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
- { 2u, 1u, 2u, 2u },
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 2u, 2u, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),
@@ -631,7 +637,7 @@
TEST_F(ClassInitCheckEliminationTest, LoopWithInvokes) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, 0u },
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
};
static const MethodDef methods[] = {
{ 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
@@ -671,10 +677,10 @@
TEST_F(ClassInitCheckEliminationTest, Catch) {
static const SFieldDef sfields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
- { 2u, 1u, 2u, 2u },
- { 3u, 1u, 3u, 3u },
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 2u, 2u, kDexMemAccessWord },
+ { 3u, 1u, 3u, 3u, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u), // Before the exception edge.
@@ -707,9 +713,9 @@
TEST_F(NullCheckEliminationTest, SingleBlock) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 0u, 1u },
- { 2u, 1u, 0u, 2u }, // Object.
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 0u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 0u, 2u, kDexMemAccessObject },
};
static const MIRDef mirs[] = {
DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 0u, 100u, 2u),
@@ -768,9 +774,9 @@
TEST_F(NullCheckEliminationTest, Diamond) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 0u, 1u },
- { 2u, 1u, 0u, 2u }, // int[].
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 0u, 1u, kDexMemAccessWord },
+ { 2u, 1u, 0u, 2u, kDexMemAccessObject }, // int[].
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
@@ -816,8 +822,8 @@
TEST_F(NullCheckEliminationTest, Loop) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_IGET_IPUT(3u, Instruction::IGET, 0u, 100u, 0u),
@@ -846,8 +852,8 @@
TEST_F(NullCheckEliminationTest, Catch) {
static const IFieldDef ifields[] = {
- { 0u, 1u, 0u, 0u },
- { 1u, 1u, 1u, 1u },
+ { 0u, 1u, 0u, 0u, kDexMemAccessWord },
+ { 1u, 1u, 1u, 1u, kDexMemAccessWord },
};
static const MIRDef mirs[] = {
DEF_IGET_IPUT(3u, Instruction::IGET, 0u, 100u, 0u), // Before the exception edge.
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index c00f90b..4dd24cb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -583,6 +583,7 @@
void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, OpSize size) {
const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ DCHECK_EQ(SPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
if (!SLOW_FIELD_PATH && field_info.FastPut()) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
@@ -701,6 +702,7 @@
void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type) {
const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ DCHECK_EQ(SGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
if (!SLOW_FIELD_PATH && field_info.FastGet()) {
@@ -839,6 +841,7 @@
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
RegLocation rl_dest, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ DCHECK_EQ(IGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
if (!SLOW_FIELD_PATH && field_info.FastGet()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
@@ -912,6 +915,7 @@
void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ DCHECK_EQ(IPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
if (!SLOW_FIELD_PATH && field_info.FastPut()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
diff --git a/compiler/utils/dex_instruction_utils.h b/compiler/utils/dex_instruction_utils.h
new file mode 100644
index 0000000..09d9419
--- /dev/null
+++ b/compiler/utils/dex_instruction_utils.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
+#define ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
+
+#include "dex_instruction.h"
+
+namespace art {
+
+// Dex invoke type corresponds to the ordering of INVOKE instructions;
+// this order is the same for range and non-range invokes.
+enum DexInvokeType : uint8_t {
+ kDexInvokeVirtual = 0, // invoke-virtual, invoke-virtual-range
+ kDexInvokeSuper, // invoke-super, invoke-super-range
+ kDexInvokeDirect, // invoke-direct, invoke-direct-range
+ kDexInvokeStatic, // invoke-static, invoke-static-range
+ kDexInvokeInterface, // invoke-interface, invoke-interface-range
+ kDexInvokeTypeCount
+};
+
+// Dex instruction memory access types correspond to the ordering of GET/PUT instructions;
+// this order is the same for IGET, IPUT, SGET, SPUT, AGET and APUT.
+enum DexMemAccessType : uint8_t {
+ kDexMemAccessWord = 0, // op 0; int or float, the actual type is not encoded.
+ kDexMemAccessWide, // op_WIDE 1; long or double, the actual type is not encoded.
+ kDexMemAccessObject, // op_OBJECT 2; the actual reference type is not encoded.
+ kDexMemAccessBoolean, // op_BOOLEAN 3
+ kDexMemAccessByte, // op_BYTE 4
+ kDexMemAccessChar, // op_CHAR 5
+ kDexMemAccessShort, // op_SHORT 6
+ kDexMemAccessTypeCount
+};
+
+std::ostream& operator<<(std::ostream& os, const DexMemAccessType& type);
+
+// NOTE: The following functions disregard quickened instructions.
+
+constexpr bool IsInstructionInvoke(Instruction::Code opcode) {
+ return Instruction::INVOKE_VIRTUAL <= opcode && opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
+ opcode != Instruction::RETURN_VOID_BARRIER;
+}
+
+constexpr bool IsInstructionInvokeStatic(Instruction::Code opcode) {
+ return opcode == Instruction::INVOKE_STATIC || opcode == Instruction::INVOKE_STATIC_RANGE;
+}
+
+constexpr bool IsInstructionIfCc(Instruction::Code opcode) {
+ return Instruction::IF_EQ <= opcode && opcode <= Instruction::IF_LE;
+}
+
+constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
+ return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
+}
+
+constexpr bool IsInstructionIGet(Instruction::Code code) {
+ return Instruction::IGET <= code && code <= Instruction::IGET_SHORT;
+}
+
+constexpr bool IsInstructionIPut(Instruction::Code code) {
+ return Instruction::IPUT <= code && code <= Instruction::IPUT_SHORT;
+}
+
+constexpr bool IsInstructionSGet(Instruction::Code code) {
+ return Instruction::SGET <= code && code <= Instruction::SGET_SHORT;
+}
+
+constexpr bool IsInstructionSPut(Instruction::Code code) {
+ return Instruction::SPUT <= code && code <= Instruction::SPUT_SHORT;
+}
+
+constexpr bool IsInstructionAGet(Instruction::Code code) {
+ return Instruction::AGET <= code && code <= Instruction::AGET_SHORT;
+}
+
+constexpr bool IsInstructionAPut(Instruction::Code code) {
+ return Instruction::APUT <= code && code <= Instruction::APUT_SHORT;
+}
+
+constexpr bool IsInstructionIGetOrIPut(Instruction::Code code) {
+ return Instruction::IGET <= code && code <= Instruction::IPUT_SHORT;
+}
+
+constexpr bool IsInstructionSGetOrSPut(Instruction::Code code) {
+ return Instruction::SGET <= code && code <= Instruction::SPUT_SHORT;
+}
+
+constexpr bool IsInstructionAGetOrAPut(Instruction::Code code) {
+ return Instruction::AGET <= code && code <= Instruction::APUT_SHORT;
+}
+
+// TODO: Remove the #if guards below when we fully migrate to C++14.
+
+constexpr bool IsInvokeInstructionRange(Instruction::Code opcode) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionInvoke(opcode));
+#endif
+ return opcode >= Instruction::INVOKE_VIRTUAL_RANGE;
+}
+
+constexpr DexInvokeType InvokeInstructionType(Instruction::Code opcode) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionInvoke(opcode));
+#endif
+ return static_cast<DexInvokeType>(IsInvokeInstructionRange(opcode)
+ ? (opcode - Instruction::INVOKE_VIRTUAL_RANGE)
+ : (opcode - Instruction::INVOKE_VIRTUAL));
+}
+
+constexpr DexMemAccessType IGetMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionIGet(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::IGET);
+}
+
+constexpr DexMemAccessType IPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionIPut(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::IPUT);
+}
+
+constexpr DexMemAccessType SGetMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionSGet(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::SGET);
+}
+
+constexpr DexMemAccessType SPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionSPut(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::SPUT);
+}
+
+constexpr DexMemAccessType AGetMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionAGet(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::AGET);
+}
+
+constexpr DexMemAccessType APutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionAPut(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::APUT);
+}
+
+constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionIGetOrIPut(opcode));
+#endif
+ return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
+}
+
+constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionSGetOrSPut(opcode));
+#endif
+ return (code >= Instruction::SPUT) ? SPutMemAccessType(code) : SGetMemAccessType(code);
+}
+
+constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionAGetOrAPut(opcode));
+#endif
+ return (code >= Instruction::APUT) ? APutMemAccessType(code) : AGetMemAccessType(code);
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index c310191..cb69817 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -97,7 +97,9 @@
}
}
}
- CHECK(!bad_mutexes_held);
+ if (gAborting == 0) { // Avoid recursive aborts.
+ CHECK(!bad_mutexes_held);
+ }
}
// Don't record monitors as they are outside the scope of analysis. They may be inspected off of
// the monitor list.
@@ -112,7 +114,7 @@
return;
}
if (level_ != kMonitorLock) {
- if (kDebugLocking && !gAborting) {
+ if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
}
self->SetHeldMutex(level_, NULL);
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 37c5f9c..6597235 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -794,7 +794,7 @@
Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
::GetHashChain(size_t i, bool* ok) const {
- if (i >= GetHashBucketNum()) {
+ if (i >= GetHashChainNum()) {
*ok = false;
return 0;
}
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index 793d798..ae8e892 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -126,6 +126,30 @@
size_t kValgrindRedZoneBytes,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
+mirror::Object* ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::AllocThreadUnsafe(
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ size_t bytes_allocated;
+ size_t usable_size;
+ void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kValgrindRedZoneBytes,
+ &bytes_allocated, &usable_size);
+ if (obj_with_rdz == nullptr) {
+ return nullptr;
+ }
+
+ return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
+ kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
+ bytes_allocated, usable_size,
+ bytes_allocated_out,
+ usable_size_out);
+}
+
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
size_t ValgrindMallocSpace<S,
kValgrindRedZoneBytes,
kAdjustForRedzoneInAllocSize,
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index d102f49..707ea69 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -37,6 +37,9 @@
size_t* usable_size) OVERRIDE;
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size) OVERRIDE;
+ mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 6d4af83..4f5ca3f 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -436,13 +436,27 @@
return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
}
Runtime* runtime = Runtime::Current();
- // For Proxy method we exclude direct method (there is only one direct method - constructor).
- // Direct method is cloned from original java.lang.reflect.Proxy class together with code
- // and as a result it is executed as usual quick compiled method without any stubs.
- // So the frame info should be returned as it is a quick method not a stub.
- if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod() && !IsDirect())) {
+
+ if (UNLIKELY(IsAbstract())) {
return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
}
+
+ // For Proxy method we add special handling for the direct method case (there is only one
+ // direct method - constructor). Direct method is cloned from original
+ // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
+ // quick compiled method without any stubs. So the frame info should be returned as it is a
+ // quick method not a stub. However, if instrumentation stubs are installed, the
+ // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
+ // oat code pointer, thus we have to add a special case here.
+ if (UNLIKELY(IsProxyMethod())) {
+ if (IsDirect()) {
+ CHECK(IsConstructor());
+ return GetQuickFrameInfo(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
+ } else {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+ }
+
if (UNLIKELY(IsRuntimeMethod())) {
return runtime->GetRuntimeMethodFrameInfo(this);
}
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index a8d4308..72b696b 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -106,9 +106,7 @@
};
struct InlineIGetIPutData {
- // The op_variant below is opcode-Instruction::IGET for IGETs and
- // opcode-Instruction::IPUT for IPUTs. This is because the runtime
- // doesn't know the OpSize enumeration.
+ // The op_variant below is DexMemAccessType but the runtime doesn't know that enumeration.
uint16_t op_variant : 3;
uint16_t method_is_static : 1;
uint16_t object_arg : 4;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index cd47b5e..163c11d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -96,8 +96,8 @@
void Thread::InitTlsEntryPoints() {
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
- uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) +
- sizeof(tlsPtr_.quick_entrypoints));
+ uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
+ sizeof(tlsPtr_.quick_entrypoints));
for (uintptr_t* it = begin; it != end; ++it) {
*it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
}
diff --git a/test/run-test b/test/run-test
index 843714b..e9dd86a 100755
--- a/test/run-test
+++ b/test/run-test
@@ -586,7 +586,7 @@
echo '#################### info'
cat "${td_info}" | sed 's/^/# /g'
echo '#################### diffs'
- diff --strip-trailing-cr -u "$expected" "$output" | tail -n 500
+ diff --strip-trailing-cr -u "$expected" "$output" | tail -n 2000
echo '####################'
echo ' '
fi