Use HashSet<std::string> instead of unordered_set<>.
Change the default parameters for HashSet<std::string> to
allow passing StringPiece as a key, avoiding an unnecessary
allocation. Use the HashSet<std::string> instead of
std::unordered_set<std::string>. Rename HashSet<> functions
that mirror std::unordered_multiset<> to lower-case.
Fix CompilerDriver::LoadImageClasses() to avoid using
invalidated iterator.
Test: m test-art-host-gtest
Test: testrunner.py --host
Change-Id: I7f8b82ee0b07befc5a0ee1c420b08a2068ad931e
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1d4f020..c37d452 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -136,9 +136,9 @@
// Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
// driver assumes ownership of the set, so the test should properly release the set.
-std::unordered_set<std::string>* CommonCompilerTest::GetImageClasses() {
+std::unique_ptr<HashSet<std::string>> CommonCompilerTest::GetImageClasses() {
// Empty set: by default no classes are retained in the image.
- return new std::unordered_set<std::string>();
+ return std::make_unique<HashSet<std::string>>();
}
// Get ProfileCompilationInfo that should be passed to the driver.
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 39c8bd8..46b59a3 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -18,9 +18,9 @@
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
#include <list>
-#include <unordered_set>
#include <vector>
+#include "base/hash_set.h"
#include "common_runtime_test.h"
#include "compiler.h"
#include "oat_file.h"
@@ -63,9 +63,8 @@
InstructionSet GetInstructionSet() const;
- // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
- // driver assumes ownership of the set, so the test should properly release the set.
- virtual std::unordered_set<std::string>* GetImageClasses();
+ // Get the set of image classes given to the compiler-driver in SetUp.
+ virtual std::unique_ptr<HashSet<std::string>> GetImageClasses();
virtual ProfileCompilationInfo* GetProfileCompilationInfo();
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index aa8277e..d56b135 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -21,6 +21,7 @@
#include <android-base/logging.h>
+#include "base/data_hash.h"
#include "base/utils.h"
#include "compiled_method.h"
#include "linker/linker_patch.h"
@@ -80,65 +81,7 @@
public:
size_t operator()(const ArrayRef<ContentType>& array) const {
- const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
- // TODO: More reasonable assertion.
- // static_assert(IsPowerOfTwo(sizeof(ContentType)),
- // "ContentType is not power of two, don't know whether array layout is as assumed");
- uint32_t len = sizeof(ContentType) * array.size();
- if (kUseMurmur3Hash) {
- static constexpr uint32_t c1 = 0xcc9e2d51;
- static constexpr uint32_t c2 = 0x1b873593;
- static constexpr uint32_t r1 = 15;
- static constexpr uint32_t r2 = 13;
- static constexpr uint32_t m = 5;
- static constexpr uint32_t n = 0xe6546b64;
-
- uint32_t hash = 0;
-
- const int nblocks = len / 4;
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
- const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
- int i;
- for (i = 0; i < nblocks; i++) {
- uint32_t k = blocks[i];
- k *= c1;
- k = (k << r1) | (k >> (32 - r1));
- k *= c2;
-
- hash ^= k;
- hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
- }
-
- const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
- uint32_t k1 = 0;
-
- switch (len & 3) {
- case 3:
- k1 ^= tail[2] << 16;
- FALLTHROUGH_INTENDED;
- case 2:
- k1 ^= tail[1] << 8;
- FALLTHROUGH_INTENDED;
- case 1:
- k1 ^= tail[0];
-
- k1 *= c1;
- k1 = (k1 << r1) | (k1 >> (32 - r1));
- k1 *= c2;
- hash ^= k1;
- }
-
- hash ^= len;
- hash ^= (hash >> 16);
- hash *= 0x85ebca6b;
- hash ^= (hash >> 13);
- hash *= 0xc2b2ae35;
- hash ^= (hash >> 16);
-
- return hash;
- } else {
- return HashBytes(data, len);
- }
+ return DataHash()(array);
}
};
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6cb3936..bd2b107 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -264,7 +264,7 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- std::unordered_set<std::string>* image_classes,
+ std::unique_ptr<HashSet<std::string>>&& image_classes,
size_t thread_count,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info)
@@ -277,7 +277,7 @@
instruction_set_features_(instruction_set_features),
requires_constructor_barrier_lock_("constructor barrier lock"),
non_relative_linker_patch_count_(0u),
- image_classes_(image_classes),
+ image_classes_(std::move(image_classes)),
number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
parallel_thread_count_(thread_count),
@@ -991,7 +991,7 @@
bool CompilerDriver::IsImageClass(const char* descriptor) const {
if (image_classes_ != nullptr) {
// If we have a set of image classes, use those.
- return image_classes_->find(descriptor) != image_classes_->end();
+ return image_classes_->find(StringPiece(descriptor)) != image_classes_->end();
}
// No set of image classes, assume we include all the classes.
// NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
@@ -1002,7 +1002,7 @@
if (classes_to_compile_ == nullptr) {
return true;
}
- return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+ return classes_to_compile_->find(StringPiece(descriptor)) != classes_to_compile_->end();
}
bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
@@ -1091,7 +1091,7 @@
class RecordImageClassesVisitor : public ClassVisitor {
public:
- explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
+ explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
: image_classes_(image_classes) {}
bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1101,7 +1101,7 @@
}
private:
- std::unordered_set<std::string>* const image_classes_;
+ HashSet<std::string>* const image_classes_;
};
// Make a list of descriptors for classes to include in the image
@@ -1124,7 +1124,7 @@
hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
if (klass == nullptr) {
VLOG(compiler) << "Failed to find class " << descriptor;
- image_classes_->erase(it++);
+ it = image_classes_->erase(it);
self->ClearException();
} else {
++it;
@@ -1177,12 +1177,12 @@
RecordImageClassesVisitor visitor(image_classes_.get());
class_linker->VisitClasses(&visitor);
- CHECK_NE(image_classes_->size(), 0U);
+ CHECK(!image_classes_->empty());
}
static void MaybeAddToImageClasses(Thread* self,
ObjPtr<mirror::Class> klass,
- std::unordered_set<std::string>* image_classes)
+ HashSet<std::string>* image_classes)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(self, Thread::Current());
StackHandleScope<1> hs(self);
@@ -1190,11 +1190,10 @@
const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
- std::pair<std::unordered_set<std::string>::iterator, bool> result =
- image_classes->insert(descriptor);
- if (!result.second) { // Previously inserted.
- break;
+ if (image_classes->find(StringPiece(descriptor)) != image_classes->end()) {
+ break; // Previously inserted.
}
+ image_classes->insert(descriptor);
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0, num_interfaces = klass->NumDirectInterfaces(); i != num_interfaces; ++i) {
ObjPtr<mirror::Class> interface = mirror::Class::GetDirectInterface(self, klass, i);
@@ -1216,7 +1215,7 @@
class ClinitImageUpdate {
public:
static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
- std::unordered_set<std::string>* image_class_descriptors,
+ HashSet<std::string>* image_class_descriptors,
Thread* self,
ClassLinker* linker) {
std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
@@ -1273,7 +1272,7 @@
bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
- const char* name = klass->GetDescriptor(&temp);
+ StringPiece name(klass->GetDescriptor(&temp));
if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
data_->image_classes_.push_back(hs_.NewHandle(klass));
} else {
@@ -1292,7 +1291,7 @@
};
ClinitImageUpdate(VariableSizedHandleScope& hs,
- std::unordered_set<std::string>* image_class_descriptors,
+ HashSet<std::string>* image_class_descriptors,
Thread* self,
ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
: hs_(hs),
@@ -1339,7 +1338,7 @@
VariableSizedHandleScope& hs_;
mutable std::vector<Handle<mirror::Class>> to_insert_;
mutable std::unordered_set<mirror::Object*> marked_objects_;
- std::unordered_set<std::string>* const image_class_descriptors_;
+ HashSet<std::string>* const image_class_descriptors_;
std::vector<Handle<mirror::Class>> image_classes_;
Thread* const self_;
const char* old_cause_;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 55f3561..ff70d96 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -20,7 +20,6 @@
#include <atomic>
#include <set>
#include <string>
-#include <unordered_set>
#include <vector>
#include "android-base/strings.h"
@@ -28,6 +27,7 @@
#include "arch/instruction_set.h"
#include "base/array_ref.h"
#include "base/bit_utils.h"
+#include "base/hash_set.h"
#include "base/mutex.h"
#include "base/os.h"
#include "base/quasi_atomic.h"
@@ -99,7 +99,7 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- std::unordered_set<std::string>* image_classes,
+ std::unique_ptr<HashSet<std::string>>&& image_classes,
size_t thread_count,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info);
@@ -144,7 +144,7 @@
return compiler_.get();
}
- const std::unordered_set<std::string>* GetImageClasses() const {
+ const HashSet<std::string>* GetImageClasses() const {
return image_classes_.get();
}
@@ -493,12 +493,12 @@
// If image_ is true, specifies the classes that will be included in the image.
// Note if image_classes_ is null, all classes are included in the image.
- std::unique_ptr<std::unordered_set<std::string>> image_classes_;
+ std::unique_ptr<HashSet<std::string>> image_classes_;
// Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
// all classes are eligible for compilation (duplication filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
- std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
+ std::unique_ptr<HashSet<std::string>> classes_to_compile_;
std::atomic<uint32_t> number_of_soft_verifier_failures_;
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 1a7f926..54bff22 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -47,7 +47,7 @@
candidate_fences_.push_back(constructor_fence);
for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
- candidate_fence_targets_.Insert(constructor_fence->InputAt(input_idx));
+ candidate_fence_targets_.insert(constructor_fence->InputAt(input_idx));
}
}
@@ -208,13 +208,13 @@
// there is no benefit to this extra complexity unless we also reordered
// the stores to come later.
candidate_fences_.clear();
- candidate_fence_targets_.Clear();
+ candidate_fence_targets_.clear();
}
// A publishing 'store' is only interesting if the value being stored
// is one of the fence `targets` in `candidate_fences`.
bool IsInterestingPublishTarget(HInstruction* store_input) const {
- return candidate_fence_targets_.Find(store_input) != candidate_fence_targets_.end();
+ return candidate_fence_targets_.find(store_input) != candidate_fence_targets_.end();
}
void MaybeMerge(HConstructorFence* target, HConstructorFence* src) {
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index fa7ad82..42e6498 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1183,7 +1183,7 @@
void ColoringIteration::BuildInterferenceGraph(
const ScopedArenaVector<LiveInterval*>& intervals,
const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
- DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
+ DCHECK(interval_node_map_.empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
// jump from endpoint to endpoint, maintaining a set of intervals live at each
@@ -1208,7 +1208,7 @@
if (range != nullptr) {
InterferenceNode* node =
new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
- interval_node_map_.Insert(std::make_pair(sibling, node));
+ interval_node_map_.insert(std::make_pair(sibling, node));
if (sibling->HasRegister()) {
// Fixed nodes should alias the canonical node for the corresponding register.
@@ -1303,7 +1303,7 @@
// Coalesce siblings.
LiveInterval* next_sibling = interval->GetNextSibling();
if (next_sibling != nullptr && interval->GetEnd() == next_sibling->GetStart()) {
- auto it = interval_node_map_.Find(next_sibling);
+ auto it = interval_node_map_.find(next_sibling);
if (it != interval_node_map_.end()) {
InterferenceNode* sibling_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1318,7 +1318,7 @@
if (parent->HasRegister()
&& parent->GetNextSibling() == interval
&& parent->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(parent);
+ auto it = interval_node_map_.find(parent);
if (it != interval_node_map_.end()) {
InterferenceNode* parent_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1341,7 +1341,7 @@
size_t position = predecessor->GetLifetimeEnd() - 1;
LiveInterval* existing = interval->GetParent()->GetSiblingAt(position);
if (existing != nullptr) {
- auto it = interval_node_map_.Find(existing);
+ auto it = interval_node_map_.find(existing);
if (it != interval_node_map_.end()) {
InterferenceNode* existing_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1364,7 +1364,7 @@
size_t position = predecessors[i]->GetLifetimeEnd() - 1;
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(position);
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node, input_node, CoalesceKind::kPhi, position);
@@ -1380,7 +1380,7 @@
= defined_by->InputAt(0)->GetLiveInterval()->GetSiblingAt(interval->GetStart() - 1);
// TODO: Could we consider lifetime holes here?
if (input_interval->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1407,7 +1407,7 @@
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(def_point);
if (input_interval != nullptr &&
input_interval->HasHighInterval() == interval->HasHighInterval()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 8e98f19..c7683e0 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -262,14 +262,14 @@
std::unique_ptr<SchedulingNode> node(
new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
SchedulingNode* result = node.get();
- nodes_map_.Insert(std::make_pair(instr, std::move(node)));
+ nodes_map_.insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
AddDependencies(instr, is_scheduling_barrier);
return result;
}
void Clear() {
- nodes_map_.Clear();
+ nodes_map_.clear();
contains_scheduling_barrier_ = false;
}
@@ -278,7 +278,7 @@
}
SchedulingNode* GetNode(const HInstruction* instr) const {
- auto it = nodes_map_.Find(instr);
+ auto it = nodes_map_.find(instr);
if (it == nodes_map_.end()) {
return nullptr;
} else {
@@ -294,7 +294,7 @@
bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const;
size_t Size() const {
- return nodes_map_.Size();
+ return nodes_map_.size();
}
// Dump the scheduling graph, in dot file format, appending it to the file
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index 1b43618..878967c 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -72,12 +72,12 @@
// Returns whether two Edge sets are equal (ArenaHashSet doesn't have "Equal" method).
static bool EdgeHashSetsEqual(const HEdgeSet* set1, const HEdgeSet* set2) {
- if (set1->Size() != set2->Size()) {
+ if (set1->size() != set2->size()) {
return false;
}
for (auto e : *set1) {
- if (set2->Find(e) == set2->end()) {
+ if (set2->find(e) == set2->end()) {
return false;
}
}
@@ -472,8 +472,8 @@
continue;
}
- auto orig_redir = remap_orig_internal_->Find(HEdge(orig_block_id, orig_succ_id));
- auto copy_redir = remap_copy_internal_->Find(HEdge(orig_block_id, orig_succ_id));
+ auto orig_redir = remap_orig_internal_->find(HEdge(orig_block_id, orig_succ_id));
+ auto copy_redir = remap_copy_internal_->find(HEdge(orig_block_id, orig_succ_id));
// Due to construction all successors of copied block were set to original.
if (copy_redir != remap_copy_internal_->end()) {
@@ -864,9 +864,9 @@
EdgeHashSetsEqual(&remap_copy_internal, remap_copy_internal_) &&
EdgeHashSetsEqual(&remap_incoming, remap_incoming_);
- remap_orig_internal.Clear();
- remap_copy_internal.Clear();
- remap_incoming.Clear();
+ remap_orig_internal.clear();
+ remap_copy_internal.clear();
+ remap_incoming.clear();
// Check whether remapping info corresponds to loop peeling.
CollectRemappingInfoForPeelUnroll(/* to_unroll*/ false,
@@ -1022,16 +1022,16 @@
for (HBasicBlock* back_edge_block : loop_info->GetBackEdges()) {
HEdge e = HEdge(back_edge_block, loop_header);
if (to_unroll) {
- remap_orig_internal->Insert(e);
- remap_copy_internal->Insert(e);
+ remap_orig_internal->insert(e);
+ remap_copy_internal->insert(e);
} else {
- remap_copy_internal->Insert(e);
+ remap_copy_internal->insert(e);
}
}
// Set up remap_incoming edges set.
if (!to_unroll) {
- remap_incoming->Insert(HEdge(loop_info->GetPreHeader(), loop_header));
+ remap_incoming->insert(HEdge(loop_info->GetPreHeader(), loop_header));
}
}
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index df2e517..6f3bcda 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -708,8 +708,8 @@
orig_bb_set.SetBit(preheader->GetBlockId());
// Adjust incoming edges.
- remap_incoming.Clear();
- remap_incoming.Insert(HEdge(preheader->GetSinglePredecessor(), preheader));
+ remap_incoming.clear();
+ remap_incoming.insert(HEdge(preheader->GetSinglePredecessor(), preheader));
HBasicBlockMap bb_map(std::less<HBasicBlock*>(), arena->Adapter(kArenaAllocSuperblockCloner));
HInstructionMap hir_map(std::less<HInstruction*>(), arena->Adapter(kArenaAllocSuperblockCloner));
diff --git a/compiler/utils/dedupe_set-inl.h b/compiler/utils/dedupe_set-inl.h
index c866504..4e892f2 100644
--- a/compiler/utils/dedupe_set-inl.h
+++ b/compiler/utils/dedupe_set-inl.h
@@ -71,13 +71,13 @@
const StoreKey* Add(Thread* self, size_t hash, const InKey& in_key) REQUIRES(!lock_) {
MutexLock lock(self, lock_);
HashedKey<InKey> hashed_in_key(hash, &in_key);
- auto it = keys_.Find(hashed_in_key);
+ auto it = keys_.find(hashed_in_key);
if (it != keys_.end()) {
DCHECK(it->Key() != nullptr);
return it->Key();
}
const StoreKey* store_key = alloc_.Copy(in_key);
- keys_.Insert(HashedKey<StoreKey> { hash, store_key });
+ keys_.insert(HashedKey<StoreKey> { hash, store_key });
return store_key;
}
@@ -90,7 +90,7 @@
// Note: The total_probe_distance will be updated with the current state.
// It may have been higher before a re-hash.
global_stats->total_probe_distance += keys_.TotalProbeDistance();
- global_stats->total_size += keys_.Size();
+ global_stats->total_size += keys_.size();
for (const HashedKey<StoreKey>& key : keys_) {
auto it = stats.find(key.Hash());
if (it == stats.end()) {