Use HashSet<std::string> instead of unordered_set<>.
Change the default parameters for HashSet<std::string> to
allow passing StringPiece as a key, avoiding an unnecessary
allocation. Use the HashSet<std::string> instead of
std::unordered_set<std::string>. Rename HashSet<> functions
that mirror std::unordered_multiset<> to lower-case.
Fix CompilerDriver::LoadImageClasses() to avoid using
invalidated iterator.
Test: m test-art-host-gtest
Test: testrunner.py --host
Change-Id: I7f8b82ee0b07befc5a0ee1c420b08a2068ad931e
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 1a7f926..54bff22 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -47,7 +47,7 @@
candidate_fences_.push_back(constructor_fence);
for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
- candidate_fence_targets_.Insert(constructor_fence->InputAt(input_idx));
+ candidate_fence_targets_.insert(constructor_fence->InputAt(input_idx));
}
}
@@ -208,13 +208,13 @@
// there is no benefit to this extra complexity unless we also reordered
// the stores to come later.
candidate_fences_.clear();
- candidate_fence_targets_.Clear();
+ candidate_fence_targets_.clear();
}
// A publishing 'store' is only interesting if the value being stored
// is one of the fence `targets` in `candidate_fences`.
bool IsInterestingPublishTarget(HInstruction* store_input) const {
- return candidate_fence_targets_.Find(store_input) != candidate_fence_targets_.end();
+ return candidate_fence_targets_.find(store_input) != candidate_fence_targets_.end();
}
void MaybeMerge(HConstructorFence* target, HConstructorFence* src) {
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index fa7ad82..42e6498 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1183,7 +1183,7 @@
void ColoringIteration::BuildInterferenceGraph(
const ScopedArenaVector<LiveInterval*>& intervals,
const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
- DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
+ DCHECK(interval_node_map_.empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
// jump from endpoint to endpoint, maintaining a set of intervals live at each
@@ -1208,7 +1208,7 @@
if (range != nullptr) {
InterferenceNode* node =
new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
- interval_node_map_.Insert(std::make_pair(sibling, node));
+ interval_node_map_.insert(std::make_pair(sibling, node));
if (sibling->HasRegister()) {
// Fixed nodes should alias the canonical node for the corresponding register.
@@ -1303,7 +1303,7 @@
// Coalesce siblings.
LiveInterval* next_sibling = interval->GetNextSibling();
if (next_sibling != nullptr && interval->GetEnd() == next_sibling->GetStart()) {
- auto it = interval_node_map_.Find(next_sibling);
+ auto it = interval_node_map_.find(next_sibling);
if (it != interval_node_map_.end()) {
InterferenceNode* sibling_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1318,7 +1318,7 @@
if (parent->HasRegister()
&& parent->GetNextSibling() == interval
&& parent->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(parent);
+ auto it = interval_node_map_.find(parent);
if (it != interval_node_map_.end()) {
InterferenceNode* parent_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1341,7 +1341,7 @@
size_t position = predecessor->GetLifetimeEnd() - 1;
LiveInterval* existing = interval->GetParent()->GetSiblingAt(position);
if (existing != nullptr) {
- auto it = interval_node_map_.Find(existing);
+ auto it = interval_node_map_.find(existing);
if (it != interval_node_map_.end()) {
InterferenceNode* existing_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1364,7 +1364,7 @@
size_t position = predecessors[i]->GetLifetimeEnd() - 1;
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(position);
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node, input_node, CoalesceKind::kPhi, position);
@@ -1380,7 +1380,7 @@
= defined_by->InputAt(0)->GetLiveInterval()->GetSiblingAt(interval->GetStart() - 1);
// TODO: Could we consider lifetime holes here?
if (input_interval->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1407,7 +1407,7 @@
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(def_point);
if (input_interval != nullptr &&
input_interval->HasHighInterval() == interval->HasHighInterval()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 8e98f19..c7683e0 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -262,14 +262,14 @@
std::unique_ptr<SchedulingNode> node(
new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
SchedulingNode* result = node.get();
- nodes_map_.Insert(std::make_pair(instr, std::move(node)));
+ nodes_map_.insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
AddDependencies(instr, is_scheduling_barrier);
return result;
}
void Clear() {
- nodes_map_.Clear();
+ nodes_map_.clear();
contains_scheduling_barrier_ = false;
}
@@ -278,7 +278,7 @@
}
SchedulingNode* GetNode(const HInstruction* instr) const {
- auto it = nodes_map_.Find(instr);
+ auto it = nodes_map_.find(instr);
if (it == nodes_map_.end()) {
return nullptr;
} else {
@@ -294,7 +294,7 @@
bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const;
size_t Size() const {
- return nodes_map_.Size();
+ return nodes_map_.size();
}
// Dump the scheduling graph, in dot file format, appending it to the file
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index 1b43618..878967c 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -72,12 +72,12 @@
// Returns whether two Edge sets are equal (ArenaHashSet doesn't have "Equal" method).
static bool EdgeHashSetsEqual(const HEdgeSet* set1, const HEdgeSet* set2) {
- if (set1->Size() != set2->Size()) {
+ if (set1->size() != set2->size()) {
return false;
}
for (auto e : *set1) {
- if (set2->Find(e) == set2->end()) {
+ if (set2->find(e) == set2->end()) {
return false;
}
}
@@ -472,8 +472,8 @@
continue;
}
- auto orig_redir = remap_orig_internal_->Find(HEdge(orig_block_id, orig_succ_id));
- auto copy_redir = remap_copy_internal_->Find(HEdge(orig_block_id, orig_succ_id));
+ auto orig_redir = remap_orig_internal_->find(HEdge(orig_block_id, orig_succ_id));
+ auto copy_redir = remap_copy_internal_->find(HEdge(orig_block_id, orig_succ_id));
// Due to construction all successors of copied block were set to original.
if (copy_redir != remap_copy_internal_->end()) {
@@ -864,9 +864,9 @@
EdgeHashSetsEqual(&remap_copy_internal, remap_copy_internal_) &&
EdgeHashSetsEqual(&remap_incoming, remap_incoming_);
- remap_orig_internal.Clear();
- remap_copy_internal.Clear();
- remap_incoming.Clear();
+ remap_orig_internal.clear();
+ remap_copy_internal.clear();
+ remap_incoming.clear();
// Check whether remapping info corresponds to loop peeling.
CollectRemappingInfoForPeelUnroll(/* to_unroll*/ false,
@@ -1022,16 +1022,16 @@
for (HBasicBlock* back_edge_block : loop_info->GetBackEdges()) {
HEdge e = HEdge(back_edge_block, loop_header);
if (to_unroll) {
- remap_orig_internal->Insert(e);
- remap_copy_internal->Insert(e);
+ remap_orig_internal->insert(e);
+ remap_copy_internal->insert(e);
} else {
- remap_copy_internal->Insert(e);
+ remap_copy_internal->insert(e);
}
}
// Set up remap_incoming edges set.
if (!to_unroll) {
- remap_incoming->Insert(HEdge(loop_info->GetPreHeader(), loop_header));
+ remap_incoming->insert(HEdge(loop_info->GetPreHeader(), loop_header));
}
}
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index df2e517..6f3bcda 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -708,8 +708,8 @@
orig_bb_set.SetBit(preheader->GetBlockId());
// Adjust incoming edges.
- remap_incoming.Clear();
- remap_incoming.Insert(HEdge(preheader->GetSinglePredecessor(), preheader));
+ remap_incoming.clear();
+ remap_incoming.insert(HEdge(preheader->GetSinglePredecessor(), preheader));
HBasicBlockMap bb_map(std::less<HBasicBlock*>(), arena->Adapter(kArenaAllocSuperblockCloner));
HInstructionMap hir_map(std::less<HInstruction*>(), arena->Adapter(kArenaAllocSuperblockCloner));