Use HashSet<std::string> instead of unordered_set<>.
Change the default parameters for HashSet<std::string> to
allow passing StringPiece as a key, avoiding an unnecessary
allocation. Use the HashSet<std::string> instead of
std::unordered_set<std::string>. Rename HashSet<> functions
that mirror std::unordered_multiset<> to lower-case.
Fix CompilerDriver::LoadImageClasses() to avoid using
invalidated iterator.
Test: m test-art-host-gtest
Test: testrunner.py --host
Change-Id: I7f8b82ee0b07befc5a0ee1c420b08a2068ad931e
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1d4f020..c37d452 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -136,9 +136,9 @@
// Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
// driver assumes ownership of the set, so the test should properly release the set.
-std::unordered_set<std::string>* CommonCompilerTest::GetImageClasses() {
+std::unique_ptr<HashSet<std::string>> CommonCompilerTest::GetImageClasses() {
// Empty set: by default no classes are retained in the image.
- return new std::unordered_set<std::string>();
+ return std::make_unique<HashSet<std::string>>();
}
// Get ProfileCompilationInfo that should be passed to the driver.
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 39c8bd8..46b59a3 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -18,9 +18,9 @@
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
#include <list>
-#include <unordered_set>
#include <vector>
+#include "base/hash_set.h"
#include "common_runtime_test.h"
#include "compiler.h"
#include "oat_file.h"
@@ -63,9 +63,8 @@
InstructionSet GetInstructionSet() const;
- // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
- // driver assumes ownership of the set, so the test should properly release the set.
- virtual std::unordered_set<std::string>* GetImageClasses();
+ // Get the set of image classes given to the compiler-driver in SetUp.
+ virtual std::unique_ptr<HashSet<std::string>> GetImageClasses();
virtual ProfileCompilationInfo* GetProfileCompilationInfo();
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index aa8277e..d56b135 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -21,6 +21,7 @@
#include <android-base/logging.h>
+#include "base/data_hash.h"
#include "base/utils.h"
#include "compiled_method.h"
#include "linker/linker_patch.h"
@@ -80,65 +81,7 @@
public:
size_t operator()(const ArrayRef<ContentType>& array) const {
- const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
- // TODO: More reasonable assertion.
- // static_assert(IsPowerOfTwo(sizeof(ContentType)),
- // "ContentType is not power of two, don't know whether array layout is as assumed");
- uint32_t len = sizeof(ContentType) * array.size();
- if (kUseMurmur3Hash) {
- static constexpr uint32_t c1 = 0xcc9e2d51;
- static constexpr uint32_t c2 = 0x1b873593;
- static constexpr uint32_t r1 = 15;
- static constexpr uint32_t r2 = 13;
- static constexpr uint32_t m = 5;
- static constexpr uint32_t n = 0xe6546b64;
-
- uint32_t hash = 0;
-
- const int nblocks = len / 4;
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
- const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
- int i;
- for (i = 0; i < nblocks; i++) {
- uint32_t k = blocks[i];
- k *= c1;
- k = (k << r1) | (k >> (32 - r1));
- k *= c2;
-
- hash ^= k;
- hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
- }
-
- const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
- uint32_t k1 = 0;
-
- switch (len & 3) {
- case 3:
- k1 ^= tail[2] << 16;
- FALLTHROUGH_INTENDED;
- case 2:
- k1 ^= tail[1] << 8;
- FALLTHROUGH_INTENDED;
- case 1:
- k1 ^= tail[0];
-
- k1 *= c1;
- k1 = (k1 << r1) | (k1 >> (32 - r1));
- k1 *= c2;
- hash ^= k1;
- }
-
- hash ^= len;
- hash ^= (hash >> 16);
- hash *= 0x85ebca6b;
- hash ^= (hash >> 13);
- hash *= 0xc2b2ae35;
- hash ^= (hash >> 16);
-
- return hash;
- } else {
- return HashBytes(data, len);
- }
+ return DataHash()(array);
}
};
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6cb3936..bd2b107 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -264,7 +264,7 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- std::unordered_set<std::string>* image_classes,
+ std::unique_ptr<HashSet<std::string>>&& image_classes,
size_t thread_count,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info)
@@ -277,7 +277,7 @@
instruction_set_features_(instruction_set_features),
requires_constructor_barrier_lock_("constructor barrier lock"),
non_relative_linker_patch_count_(0u),
- image_classes_(image_classes),
+ image_classes_(std::move(image_classes)),
number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
parallel_thread_count_(thread_count),
@@ -991,7 +991,7 @@
bool CompilerDriver::IsImageClass(const char* descriptor) const {
if (image_classes_ != nullptr) {
// If we have a set of image classes, use those.
- return image_classes_->find(descriptor) != image_classes_->end();
+ return image_classes_->find(StringPiece(descriptor)) != image_classes_->end();
}
// No set of image classes, assume we include all the classes.
// NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
@@ -1002,7 +1002,7 @@
if (classes_to_compile_ == nullptr) {
return true;
}
- return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+ return classes_to_compile_->find(StringPiece(descriptor)) != classes_to_compile_->end();
}
bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
@@ -1091,7 +1091,7 @@
class RecordImageClassesVisitor : public ClassVisitor {
public:
- explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
+ explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
: image_classes_(image_classes) {}
bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1101,7 +1101,7 @@
}
private:
- std::unordered_set<std::string>* const image_classes_;
+ HashSet<std::string>* const image_classes_;
};
// Make a list of descriptors for classes to include in the image
@@ -1124,7 +1124,7 @@
hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
if (klass == nullptr) {
VLOG(compiler) << "Failed to find class " << descriptor;
- image_classes_->erase(it++);
+ it = image_classes_->erase(it);
self->ClearException();
} else {
++it;
@@ -1177,12 +1177,12 @@
RecordImageClassesVisitor visitor(image_classes_.get());
class_linker->VisitClasses(&visitor);
- CHECK_NE(image_classes_->size(), 0U);
+ CHECK(!image_classes_->empty());
}
static void MaybeAddToImageClasses(Thread* self,
ObjPtr<mirror::Class> klass,
- std::unordered_set<std::string>* image_classes)
+ HashSet<std::string>* image_classes)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(self, Thread::Current());
StackHandleScope<1> hs(self);
@@ -1190,11 +1190,10 @@
const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
- std::pair<std::unordered_set<std::string>::iterator, bool> result =
- image_classes->insert(descriptor);
- if (!result.second) { // Previously inserted.
- break;
+ if (image_classes->find(StringPiece(descriptor)) != image_classes->end()) {
+ break; // Previously inserted.
}
+ image_classes->insert(descriptor);
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0, num_interfaces = klass->NumDirectInterfaces(); i != num_interfaces; ++i) {
ObjPtr<mirror::Class> interface = mirror::Class::GetDirectInterface(self, klass, i);
@@ -1216,7 +1215,7 @@
class ClinitImageUpdate {
public:
static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
- std::unordered_set<std::string>* image_class_descriptors,
+ HashSet<std::string>* image_class_descriptors,
Thread* self,
ClassLinker* linker) {
std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
@@ -1273,7 +1272,7 @@
bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
- const char* name = klass->GetDescriptor(&temp);
+ StringPiece name(klass->GetDescriptor(&temp));
if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
data_->image_classes_.push_back(hs_.NewHandle(klass));
} else {
@@ -1292,7 +1291,7 @@
};
ClinitImageUpdate(VariableSizedHandleScope& hs,
- std::unordered_set<std::string>* image_class_descriptors,
+ HashSet<std::string>* image_class_descriptors,
Thread* self,
ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
: hs_(hs),
@@ -1339,7 +1338,7 @@
VariableSizedHandleScope& hs_;
mutable std::vector<Handle<mirror::Class>> to_insert_;
mutable std::unordered_set<mirror::Object*> marked_objects_;
- std::unordered_set<std::string>* const image_class_descriptors_;
+ HashSet<std::string>* const image_class_descriptors_;
std::vector<Handle<mirror::Class>> image_classes_;
Thread* const self_;
const char* old_cause_;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 55f3561..ff70d96 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -20,7 +20,6 @@
#include <atomic>
#include <set>
#include <string>
-#include <unordered_set>
#include <vector>
#include "android-base/strings.h"
@@ -28,6 +27,7 @@
#include "arch/instruction_set.h"
#include "base/array_ref.h"
#include "base/bit_utils.h"
+#include "base/hash_set.h"
#include "base/mutex.h"
#include "base/os.h"
#include "base/quasi_atomic.h"
@@ -99,7 +99,7 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- std::unordered_set<std::string>* image_classes,
+ std::unique_ptr<HashSet<std::string>>&& image_classes,
size_t thread_count,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info);
@@ -144,7 +144,7 @@
return compiler_.get();
}
- const std::unordered_set<std::string>* GetImageClasses() const {
+ const HashSet<std::string>* GetImageClasses() const {
return image_classes_.get();
}
@@ -493,12 +493,12 @@
// If image_ is true, specifies the classes that will be included in the image.
// Note if image_classes_ is null, all classes are included in the image.
- std::unique_ptr<std::unordered_set<std::string>> image_classes_;
+ std::unique_ptr<HashSet<std::string>> image_classes_;
// Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
// all classes are eligible for compilation (duplication filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
- std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
+ std::unique_ptr<HashSet<std::string>> classes_to_compile_;
std::atomic<uint32_t> number_of_soft_verifier_failures_;
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 1a7f926..54bff22 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -47,7 +47,7 @@
candidate_fences_.push_back(constructor_fence);
for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
- candidate_fence_targets_.Insert(constructor_fence->InputAt(input_idx));
+ candidate_fence_targets_.insert(constructor_fence->InputAt(input_idx));
}
}
@@ -208,13 +208,13 @@
// there is no benefit to this extra complexity unless we also reordered
// the stores to come later.
candidate_fences_.clear();
- candidate_fence_targets_.Clear();
+ candidate_fence_targets_.clear();
}
// A publishing 'store' is only interesting if the value being stored
// is one of the fence `targets` in `candidate_fences`.
bool IsInterestingPublishTarget(HInstruction* store_input) const {
- return candidate_fence_targets_.Find(store_input) != candidate_fence_targets_.end();
+ return candidate_fence_targets_.find(store_input) != candidate_fence_targets_.end();
}
void MaybeMerge(HConstructorFence* target, HConstructorFence* src) {
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index fa7ad82..42e6498 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1183,7 +1183,7 @@
void ColoringIteration::BuildInterferenceGraph(
const ScopedArenaVector<LiveInterval*>& intervals,
const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
- DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
+ DCHECK(interval_node_map_.empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
// jump from endpoint to endpoint, maintaining a set of intervals live at each
@@ -1208,7 +1208,7 @@
if (range != nullptr) {
InterferenceNode* node =
new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
- interval_node_map_.Insert(std::make_pair(sibling, node));
+ interval_node_map_.insert(std::make_pair(sibling, node));
if (sibling->HasRegister()) {
// Fixed nodes should alias the canonical node for the corresponding register.
@@ -1303,7 +1303,7 @@
// Coalesce siblings.
LiveInterval* next_sibling = interval->GetNextSibling();
if (next_sibling != nullptr && interval->GetEnd() == next_sibling->GetStart()) {
- auto it = interval_node_map_.Find(next_sibling);
+ auto it = interval_node_map_.find(next_sibling);
if (it != interval_node_map_.end()) {
InterferenceNode* sibling_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1318,7 +1318,7 @@
if (parent->HasRegister()
&& parent->GetNextSibling() == interval
&& parent->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(parent);
+ auto it = interval_node_map_.find(parent);
if (it != interval_node_map_.end()) {
InterferenceNode* parent_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1341,7 +1341,7 @@
size_t position = predecessor->GetLifetimeEnd() - 1;
LiveInterval* existing = interval->GetParent()->GetSiblingAt(position);
if (existing != nullptr) {
- auto it = interval_node_map_.Find(existing);
+ auto it = interval_node_map_.find(existing);
if (it != interval_node_map_.end()) {
InterferenceNode* existing_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1364,7 +1364,7 @@
size_t position = predecessors[i]->GetLifetimeEnd() - 1;
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(position);
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node, input_node, CoalesceKind::kPhi, position);
@@ -1380,7 +1380,7 @@
= defined_by->InputAt(0)->GetLiveInterval()->GetSiblingAt(interval->GetStart() - 1);
// TODO: Could we consider lifetime holes here?
if (input_interval->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1407,7 +1407,7 @@
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(def_point);
if (input_interval != nullptr &&
input_interval->HasHighInterval() == interval->HasHighInterval()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 8e98f19..c7683e0 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -262,14 +262,14 @@
std::unique_ptr<SchedulingNode> node(
new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
SchedulingNode* result = node.get();
- nodes_map_.Insert(std::make_pair(instr, std::move(node)));
+ nodes_map_.insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
AddDependencies(instr, is_scheduling_barrier);
return result;
}
void Clear() {
- nodes_map_.Clear();
+ nodes_map_.clear();
contains_scheduling_barrier_ = false;
}
@@ -278,7 +278,7 @@
}
SchedulingNode* GetNode(const HInstruction* instr) const {
- auto it = nodes_map_.Find(instr);
+ auto it = nodes_map_.find(instr);
if (it == nodes_map_.end()) {
return nullptr;
} else {
@@ -294,7 +294,7 @@
bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const;
size_t Size() const {
- return nodes_map_.Size();
+ return nodes_map_.size();
}
// Dump the scheduling graph, in dot file format, appending it to the file
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index 1b43618..878967c 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -72,12 +72,12 @@
// Returns whether two Edge sets are equal (ArenaHashSet doesn't have "Equal" method).
static bool EdgeHashSetsEqual(const HEdgeSet* set1, const HEdgeSet* set2) {
- if (set1->Size() != set2->Size()) {
+ if (set1->size() != set2->size()) {
return false;
}
for (auto e : *set1) {
- if (set2->Find(e) == set2->end()) {
+ if (set2->find(e) == set2->end()) {
return false;
}
}
@@ -472,8 +472,8 @@
continue;
}
- auto orig_redir = remap_orig_internal_->Find(HEdge(orig_block_id, orig_succ_id));
- auto copy_redir = remap_copy_internal_->Find(HEdge(orig_block_id, orig_succ_id));
+ auto orig_redir = remap_orig_internal_->find(HEdge(orig_block_id, orig_succ_id));
+ auto copy_redir = remap_copy_internal_->find(HEdge(orig_block_id, orig_succ_id));
// Due to construction all successors of copied block were set to original.
if (copy_redir != remap_copy_internal_->end()) {
@@ -864,9 +864,9 @@
EdgeHashSetsEqual(&remap_copy_internal, remap_copy_internal_) &&
EdgeHashSetsEqual(&remap_incoming, remap_incoming_);
- remap_orig_internal.Clear();
- remap_copy_internal.Clear();
- remap_incoming.Clear();
+ remap_orig_internal.clear();
+ remap_copy_internal.clear();
+ remap_incoming.clear();
// Check whether remapping info corresponds to loop peeling.
CollectRemappingInfoForPeelUnroll(/* to_unroll*/ false,
@@ -1022,16 +1022,16 @@
for (HBasicBlock* back_edge_block : loop_info->GetBackEdges()) {
HEdge e = HEdge(back_edge_block, loop_header);
if (to_unroll) {
- remap_orig_internal->Insert(e);
- remap_copy_internal->Insert(e);
+ remap_orig_internal->insert(e);
+ remap_copy_internal->insert(e);
} else {
- remap_copy_internal->Insert(e);
+ remap_copy_internal->insert(e);
}
}
// Set up remap_incoming edges set.
if (!to_unroll) {
- remap_incoming->Insert(HEdge(loop_info->GetPreHeader(), loop_header));
+ remap_incoming->insert(HEdge(loop_info->GetPreHeader(), loop_header));
}
}
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index df2e517..6f3bcda 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -708,8 +708,8 @@
orig_bb_set.SetBit(preheader->GetBlockId());
// Adjust incoming edges.
- remap_incoming.Clear();
- remap_incoming.Insert(HEdge(preheader->GetSinglePredecessor(), preheader));
+ remap_incoming.clear();
+ remap_incoming.insert(HEdge(preheader->GetSinglePredecessor(), preheader));
HBasicBlockMap bb_map(std::less<HBasicBlock*>(), arena->Adapter(kArenaAllocSuperblockCloner));
HInstructionMap hir_map(std::less<HInstruction*>(), arena->Adapter(kArenaAllocSuperblockCloner));
diff --git a/compiler/utils/dedupe_set-inl.h b/compiler/utils/dedupe_set-inl.h
index c866504..4e892f2 100644
--- a/compiler/utils/dedupe_set-inl.h
+++ b/compiler/utils/dedupe_set-inl.h
@@ -71,13 +71,13 @@
const StoreKey* Add(Thread* self, size_t hash, const InKey& in_key) REQUIRES(!lock_) {
MutexLock lock(self, lock_);
HashedKey<InKey> hashed_in_key(hash, &in_key);
- auto it = keys_.Find(hashed_in_key);
+ auto it = keys_.find(hashed_in_key);
if (it != keys_.end()) {
DCHECK(it->Key() != nullptr);
return it->Key();
}
const StoreKey* store_key = alloc_.Copy(in_key);
- keys_.Insert(HashedKey<StoreKey> { hash, store_key });
+ keys_.insert(HashedKey<StoreKey> { hash, store_key });
return store_key;
}
@@ -90,7 +90,7 @@
// Note: The total_probe_distance will be updated with the current state.
// It may have been higher before a re-hash.
global_stats->total_probe_distance += keys_.TotalProbeDistance();
- global_stats->total_size += keys_.Size();
+ global_stats->total_size += keys_.size();
for (const HashedKey<StoreKey>& key : keys_) {
auto it = stats.find(key.Hash());
if (it == stats.end()) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 00c893a..6cd947c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -27,7 +27,6 @@
#include <sstream>
#include <string>
#include <type_traits>
-#include <unordered_set>
#include <vector>
#if defined(__linux__) && defined(__arm__)
@@ -954,9 +953,9 @@
compiler_options_->force_determinism_ = force_determinism_;
if (passes_to_run_filename_ != nullptr) {
- passes_to_run_.reset(ReadCommentedInputFromFile<std::vector<std::string>>(
+ passes_to_run_ = ReadCommentedInputFromFile<std::vector<std::string>>(
passes_to_run_filename_,
- nullptr)); // No post-processing.
+ nullptr); // No post-processing.
if (passes_to_run_.get() == nullptr) {
Usage("Failed to read list of passes to run.");
}
@@ -1493,13 +1492,12 @@
// If we don't have a profile, treat it as an empty set of classes. b/77340429
if (image_classes_ == nullptr) {
// May be non-null when --image-classes is passed in, in that case avoid clearing the list.
- image_classes_.reset(new std::unordered_set<std::string>());
+ image_classes_.reset(new HashSet<std::string>());
}
if (profile_compilation_info_ != nullptr) {
// Filter out class path classes since we don't want to include these in the image.
image_classes_.reset(
- new std::unordered_set<std::string>(
- profile_compilation_info_->GetClassDescriptors(dex_files_)));
+ new HashSet<std::string>(profile_compilation_info_->GetClassDescriptors(dex_files_)));
VLOG(compiler) << "Loaded " << image_classes_->size()
<< " image class descriptors from profile";
if (VLOG_IS_ON(compiler)) {
@@ -1850,7 +1848,7 @@
compiler_kind_,
instruction_set_,
instruction_set_features_.get(),
- image_classes_.release(),
+ std::move(image_classes_),
thread_count_,
swap_fd_,
profile_compilation_info_.get()));
@@ -2390,20 +2388,20 @@
return false;
}
} else if (IsBootImage()) {
- image_classes_.reset(new std::unordered_set<std::string>);
+ image_classes_.reset(new HashSet<std::string>);
}
return true;
}
- static std::unique_ptr<std::unordered_set<std::string>> ReadClasses(const char* zip_filename,
- const char* classes_filename,
- const char* tag) {
- std::unique_ptr<std::unordered_set<std::string>> classes;
+ static std::unique_ptr<HashSet<std::string>> ReadClasses(const char* zip_filename,
+ const char* classes_filename,
+ const char* tag) {
+ std::unique_ptr<HashSet<std::string>> classes;
std::string error_msg;
if (zip_filename != nullptr) {
- classes.reset(ReadImageClassesFromZip(zip_filename, classes_filename, &error_msg));
+ classes = ReadImageClassesFromZip(zip_filename, classes_filename, &error_msg);
} else {
- classes.reset(ReadImageClassesFromFile(classes_filename));
+ classes = ReadImageClassesFromFile(classes_filename);
}
if (classes == nullptr) {
LOG(ERROR) << "Failed to create list of " << tag << " classes from '"
@@ -2414,9 +2412,9 @@
bool PrepareDirtyObjects() {
if (dirty_image_objects_filename_ != nullptr) {
- dirty_image_objects_.reset(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
+ dirty_image_objects_ = ReadCommentedInputFromFile<HashSet<std::string>>(
dirty_image_objects_filename_,
- nullptr));
+ nullptr);
if (dirty_image_objects_ == nullptr) {
LOG(ERROR) << "Failed to create list of dirty objects from '"
<< dirty_image_objects_filename_ << "'";
@@ -2678,29 +2676,28 @@
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- static std::unordered_set<std::string>* ReadImageClassesFromFile(
+ static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromFile(
const char* image_classes_filename) {
std::function<std::string(const char*)> process = DotToDescriptor;
- return ReadCommentedInputFromFile<std::unordered_set<std::string>>(image_classes_filename,
- &process);
+ return ReadCommentedInputFromFile<HashSet<std::string>>(image_classes_filename, &process);
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- static std::unordered_set<std::string>* ReadImageClassesFromZip(
+ static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromZip(
const char* zip_filename,
const char* image_classes_filename,
std::string* error_msg) {
std::function<std::string(const char*)> process = DotToDescriptor;
- return ReadCommentedInputFromZip<std::unordered_set<std::string>>(zip_filename,
- image_classes_filename,
- &process,
- error_msg);
+ return ReadCommentedInputFromZip<HashSet<std::string>>(zip_filename,
+ image_classes_filename,
+ &process,
+ error_msg);
}
// Read lines from the given file, dropping comments and empty lines. Post-process each line with
// the given function.
template <typename T>
- static T* ReadCommentedInputFromFile(
+ static std::unique_ptr<T> ReadCommentedInputFromFile(
const char* input_filename, std::function<std::string(const char*)>* process) {
std::unique_ptr<std::ifstream> input_file(new std::ifstream(input_filename, std::ifstream::in));
if (input_file.get() == nullptr) {
@@ -2710,13 +2707,13 @@
std::unique_ptr<T> result(
ReadCommentedInputStream<T>(*input_file, process));
input_file->close();
- return result.release();
+ return result;
}
// Read lines from the given file from the given zip file, dropping comments and empty lines.
// Post-process each line with the given function.
template <typename T>
- static T* ReadCommentedInputFromZip(
+ static std::unique_ptr<T> ReadCommentedInputFromZip(
const char* zip_filename,
const char* input_filename,
std::function<std::string(const char*)>* process,
@@ -2748,7 +2745,7 @@
// Read lines from the given stream, dropping comments and empty lines. Post-process each line
// with the given function.
template <typename T>
- static T* ReadCommentedInputStream(
+ static std::unique_ptr<T> ReadCommentedInputStream(
std::istream& in_stream,
std::function<std::string(const char*)>* process) {
std::unique_ptr<T> output(new T());
@@ -2765,7 +2762,7 @@
output->insert(output->end(), dot);
}
}
- return output.release();
+ return output;
}
void LogCompletionTime() {
@@ -2854,10 +2851,8 @@
ImageHeader::StorageMode image_storage_mode_;
const char* passes_to_run_filename_;
const char* dirty_image_objects_filename_;
- std::unique_ptr<std::unordered_set<std::string>> image_classes_;
- std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
- std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
- std::unique_ptr<std::unordered_set<std::string>> dirty_image_objects_;
+ std::unique_ptr<HashSet<std::string>> image_classes_;
+ std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
std::unique_ptr<std::vector<std::string>> passes_to_run_;
bool multi_image_;
bool is_host_;
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index f0daf69..12ecd3a 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -27,6 +27,7 @@
#include "art_method-inl.h"
#include "base/file_utils.h"
+#include "base/hash_set.h"
#include "base/unix_file/fd_file.h"
#include "base/utils.h"
#include "class_linker-inl.h"
@@ -93,8 +94,8 @@
options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
}
- std::unordered_set<std::string>* GetImageClasses() OVERRIDE {
- return new std::unordered_set<std::string>(image_classes_);
+ std::unique_ptr<HashSet<std::string>> GetImageClasses() OVERRIDE {
+ return std::make_unique<HashSet<std::string>>(image_classes_);
}
ArtMethod* FindCopiedMethod(ArtMethod* origin, ObjPtr<mirror::Class> klass)
@@ -110,7 +111,7 @@
}
private:
- std::unordered_set<std::string> image_classes_;
+ HashSet<std::string> image_classes_;
};
inline CompilationHelper::~CompilationHelper() {
@@ -426,7 +427,7 @@
}
ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
- std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
+ HashSet<std::string> image_classes(*compiler_driver_->GetImageClasses());
// Need to delete the compiler since it has worker threads which are attached to runtime.
compiler_driver_.reset();
@@ -496,7 +497,7 @@
ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
EXPECT_TRUE(klass != nullptr) << descriptor;
uint8_t* raw_klass = reinterpret_cast<uint8_t*>(klass.Ptr());
- if (image_classes.find(descriptor) == image_classes.end()) {
+ if (image_classes.find(StringPiece(descriptor)) == image_classes.end()) {
EXPECT_TRUE(raw_klass >= image_end || raw_klass < image_begin) << descriptor;
} else {
// Image classes should be located inside the image.
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index da69b83..8840dfa 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -2869,7 +2869,7 @@
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
- const std::unordered_set<std::string>* dirty_image_objects)
+ const HashSet<std::string>* dirty_image_objects)
: compiler_driver_(compiler_driver),
global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
image_objects_offset_begin_(0),
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index c282a2a..2fcf5fd 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -31,6 +31,7 @@
#include "base/bit_utils.h"
#include "base/dchecked_vector.h"
#include "base/enums.h"
+#include "base/hash_set.h"
#include "base/length_prefixed_array.h"
#include "base/macros.h"
#include "base/mem_map.h"
@@ -80,7 +81,7 @@
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
- const std::unordered_set<std::string>* dirty_image_objects);
+ const HashSet<std::string>* dirty_image_objects);
bool PrepareImageAddressSpace(TimingLogger* timings);
@@ -644,7 +645,7 @@
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
// Set of objects known to be dirty in the image. Can be nullptr if there are none.
- const std::unordered_set<std::string>* dirty_image_objects_;
+ const HashSet<std::string>* dirty_image_objects_;
class ComputeLazyFieldsForClassesVisitor;
class FixupClassVisitor;
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index 4b142a8..e7d5ed9 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -22,7 +22,7 @@
#include <memory> // For unique_ptr
#include <unordered_map>
-#include "base/utils.h"
+#include "base/data_hash.h"
#include "dex_writer.h"
namespace art {
diff --git a/libartbase/base/arena_containers.h b/libartbase/base/arena_containers.h
index bd57fb1..41b3bb9 100644
--- a/libartbase/base/arena_containers.h
+++ b/libartbase/base/arena_containers.h
@@ -70,15 +70,15 @@
template <typename T,
typename EmptyFn = DefaultEmptyFn<T>,
- typename HashFn = std::hash<T>,
- typename Pred = std::equal_to<T>>
+ typename HashFn = DefaultHashFn<T>,
+ typename Pred = DefaultPred<T>>
using ArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ArenaAllocatorAdapter<T>>;
template <typename Key,
typename Value,
typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
- typename HashFn = std::hash<Key>,
- typename Pred = std::equal_to<Key>>
+ typename HashFn = DefaultHashFn<Key>,
+ typename Pred = DefaultPred<Key>>
using ArenaHashMap = HashMap<Key,
Value,
EmptyFn,
diff --git a/libartbase/base/data_hash.h b/libartbase/base/data_hash.h
new file mode 100644
index 0000000..5ad7779
--- /dev/null
+++ b/libartbase/base/data_hash.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_DATA_HASH_H_
+#define ART_LIBARTBASE_BASE_DATA_HASH_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+// Hash bytes using a relatively fast hash.
+static inline size_t HashBytes(const uint8_t* data, size_t len) {
+ size_t hash = 0x811c9dc5;
+ for (uint32_t i = 0; i < len; ++i) {
+ hash = (hash * 16777619) ^ data[i];
+ }
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
+ return hash;
+}
+
+class DataHash {
+ private:
+ static constexpr bool kUseMurmur3Hash = true;
+
+ public:
+ template <class Container>
+ size_t operator()(const Container& array) const {
+ // Containers that provide the data() function use contiguous storage.
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
+ uint32_t len = sizeof(typename Container::value_type) * array.size();
+ if (kUseMurmur3Hash) {
+ static constexpr uint32_t c1 = 0xcc9e2d51;
+ static constexpr uint32_t c2 = 0x1b873593;
+ static constexpr uint32_t r1 = 15;
+ static constexpr uint32_t r2 = 13;
+ static constexpr uint32_t m = 5;
+ static constexpr uint32_t n = 0xe6546b64;
+
+ uint32_t hash = 0;
+
+ const int nblocks = len / 4;
+ typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
+ int i;
+ for (i = 0; i < nblocks; i++) {
+ uint32_t k = blocks[i];
+ k *= c1;
+ k = (k << r1) | (k >> (32 - r1));
+ k *= c2;
+
+ hash ^= k;
+ hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
+ }
+
+ const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
+ uint32_t k1 = 0;
+
+ switch (len & 3) {
+ case 3:
+ k1 ^= tail[2] << 16;
+ FALLTHROUGH_INTENDED;
+ case 2:
+ k1 ^= tail[1] << 8;
+ FALLTHROUGH_INTENDED;
+ case 1:
+ k1 ^= tail[0];
+
+ k1 *= c1;
+ k1 = (k1 << r1) | (k1 >> (32 - r1));
+ k1 *= c2;
+ hash ^= k1;
+ }
+
+ hash ^= len;
+ hash ^= (hash >> 16);
+ hash *= 0x85ebca6b;
+ hash ^= (hash >> 13);
+ hash *= 0xc2b2ae35;
+ hash ^= (hash >> 16);
+
+ return hash;
+ } else {
+ return HashBytes(data, len);
+ }
+ }
+};
+
+} // namespace art
+
+#endif // ART_LIBARTBASE_BASE_DATA_HASH_H_
diff --git a/libartbase/base/hash_map.h b/libartbase/base/hash_map.h
index 0d7198c..a3bb5b5 100644
--- a/libartbase/base/hash_map.h
+++ b/libartbase/base/hash_map.h
@@ -48,9 +48,12 @@
Fn fn_;
};
-template <class Key, class Value, class EmptyFn,
- class HashFn = std::hash<Key>, class Pred = std::equal_to<Key>,
- class Alloc = std::allocator<std::pair<Key, Value>>>
+template <class Key,
+ class Value,
+ class EmptyFn,
+ class HashFn = DefaultHashFn<Key>,
+ class Pred = DefaultPred<Key>,
+ class Alloc = std::allocator<std::pair<Key, Value>>>
class HashMap : public HashSet<std::pair<Key, Value>,
EmptyFn,
HashMapWrapper<HashFn>,
diff --git a/libartbase/base/hash_set.h b/libartbase/base/hash_set.h
index 2f810ea..2b1a5eb 100644
--- a/libartbase/base/hash_set.h
+++ b/libartbase/base/hash_set.h
@@ -22,16 +22,94 @@
#include <functional>
#include <iterator>
#include <memory>
+#include <string>
#include <type_traits>
#include <utility>
#include <android-base/logging.h>
+#include "base/data_hash.h"
#include "bit_utils.h"
#include "macros.h"
namespace art {
+template <class Elem, class HashSetType>
+class HashSetIterator : std::iterator<std::forward_iterator_tag, Elem> {
+ public:
+ HashSetIterator(const HashSetIterator&) = default;
+ HashSetIterator(HashSetIterator&&) = default;
+ HashSetIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {}
+
+ // Conversion from iterator to const_iterator.
+ template <class OtherElem,
+ class OtherHashSetType,
+ typename = typename std::enable_if<
+ std::is_same<Elem, const OtherElem>::value &&
+ std::is_same<HashSetType, const OtherHashSetType>::value>::type>
+ HashSetIterator(const HashSetIterator<OtherElem, OtherHashSetType>& other)
+ : index_(other.index_), hash_set_(other.hash_set_) {}
+
+ HashSetIterator& operator=(const HashSetIterator&) = default;
+ HashSetIterator& operator=(HashSetIterator&&) = default;
+
+ bool operator==(const HashSetIterator& other) const {
+ return hash_set_ == other.hash_set_ && this->index_ == other.index_;
+ }
+
+ bool operator!=(const HashSetIterator& other) const {
+ return !(*this == other);
+ }
+
+ HashSetIterator operator++() { // Value after modification.
+ this->index_ = hash_set_->NextNonEmptySlot(index_);
+ return *this;
+ }
+
+ HashSetIterator operator++(int) {
+ HashSetIterator temp = *this;
+ ++*this;
+ return temp;
+ }
+
+ Elem& operator*() const {
+ DCHECK(!hash_set_->IsFreeSlot(this->index_));
+ return hash_set_->ElementForIndex(this->index_);
+ }
+
+ Elem* operator->() const {
+ return &**this;
+ }
+
+ private:
+ size_t index_;
+ HashSetType* hash_set_;
+
+ template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+ friend bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs);
+ template <class T, class EmptyFn, class HashFn, class Pred, class Alloc> friend class HashSet;
+ template <class OtherElem, class OtherHashSetType> friend class HashSetIterator;
+};
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs) {
+ static_assert(
+ std::is_convertible<HashSetIterator<Elem1, HashSetType1>,
+ HashSetIterator<Elem2, HashSetType2>>::value ||
+ std::is_convertible<HashSetIterator<Elem2, HashSetType2>,
+ HashSetIterator<Elem1, HashSetType1>>::value, "Bad iterator types.");
+ DCHECK_EQ(lhs.hash_set_, rhs.hash_set_);
+ return lhs.index_ == rhs.index_;
+}
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator!=(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs) {
+ return !(lhs == rhs);
+}
+
// Returns true if an item is empty.
template <class T>
class DefaultEmptyFn {
@@ -55,70 +133,35 @@
}
};
-// Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
-// boxed. Uses linear probing to resolve collisions.
+template <class T>
+using DefaultHashFn = typename std::conditional<std::is_same<T, std::string>::value,
+ DataHash,
+ std::hash<T>>::type;
+
+struct DefaultStringEquals {
+ // Allow comparison with anything that can be compared to std::string, for example StringPiece.
+ template <typename T>
+ bool operator()(const std::string& lhs, const T& rhs) const {
+ return lhs == rhs;
+ }
+};
+
+template <class T>
+using DefaultPred = typename std::conditional<std::is_same<T, std::string>::value,
+ DefaultStringEquals,
+ std::equal_to<T>>::type;
+
+// Low memory version of a hash set, uses less memory than std::unordered_multiset since elements
+// aren't boxed. Uses linear probing to resolve collisions.
// EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item).
// TODO: We could get rid of this requirement by using a bitmap, though maybe this would be slower
// and more complicated.
-template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
- class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
+template <class T,
+ class EmptyFn = DefaultEmptyFn<T>,
+ class HashFn = DefaultHashFn<T>,
+ class Pred = DefaultPred<T>,
+ class Alloc = std::allocator<T>>
class HashSet {
- template <class Elem, class HashSetType>
- class BaseIterator : std::iterator<std::forward_iterator_tag, Elem> {
- public:
- BaseIterator(const BaseIterator&) = default;
- BaseIterator(BaseIterator&&) = default;
- BaseIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {
- }
- BaseIterator& operator=(const BaseIterator&) = default;
- BaseIterator& operator=(BaseIterator&&) = default;
-
- bool operator==(const BaseIterator& other) const {
- return hash_set_ == other.hash_set_ && this->index_ == other.index_;
- }
-
- bool operator!=(const BaseIterator& other) const {
- return !(*this == other);
- }
-
- BaseIterator operator++() { // Value after modification.
- this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
- return *this;
- }
-
- BaseIterator operator++(int) {
- BaseIterator temp = *this;
- this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
- return temp;
- }
-
- Elem& operator*() const {
- DCHECK(!hash_set_->IsFreeSlot(this->index_));
- return hash_set_->ElementForIndex(this->index_);
- }
-
- Elem* operator->() const {
- return &**this;
- }
-
- // TODO: Operator -- --(int) (and use std::bidirectional_iterator_tag)
-
- private:
- size_t index_;
- HashSetType* hash_set_;
-
- size_t NextNonEmptySlot(size_t index, const HashSet* hash_set) const {
- const size_t num_buckets = hash_set->NumBuckets();
- DCHECK_LT(index, num_buckets);
- do {
- ++index;
- } while (index < num_buckets && hash_set->IsFreeSlot(index));
- return index;
- }
-
- friend class HashSet;
- };
-
public:
using value_type = T;
using allocator_type = Alloc;
@@ -126,8 +169,8 @@
using const_reference = const T&;
using pointer = T*;
using const_pointer = const T*;
- using iterator = BaseIterator<T, HashSet>;
- using const_iterator = BaseIterator<const T, const HashSet>;
+ using iterator = HashSetIterator<T, HashSet>;
+ using const_iterator = HashSetIterator<const T, const HashSet>;
using size_type = size_t;
using difference_type = ptrdiff_t;
@@ -136,7 +179,7 @@
static constexpr size_t kMinBuckets = 1000;
// If we don't own the data, this will create a new array which owns the data.
- void Clear() {
+ void clear() {
DeallocateStorage();
num_elements_ = 0;
elements_until_expand_ = 0;
@@ -300,13 +343,12 @@
return const_iterator(this, NumBuckets());
}
- bool Empty() const {
- return Size() == 0;
+ size_t size() const {
+ return num_elements_;
}
- // Return true if the hash set has ownership of the underlying data.
- bool OwnsData() const {
- return owns_data_;
+ bool empty() const {
+ return size() == 0;
}
// Erase algorithm:
@@ -317,7 +359,7 @@
// and set the empty slot to be the location we just moved from.
// Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
// element to its actual location/index.
- iterator Erase(iterator it) {
+ iterator erase(iterator it) {
// empty_index is the index that will become empty.
size_t empty_index = it.index_;
DCHECK(!IsFreeSlot(empty_index));
@@ -368,12 +410,12 @@
// Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
// object in the heap for performance solution.
template <typename K>
- iterator Find(const K& key) {
+ iterator find(const K& key) {
return FindWithHash(key, hashfn_(key));
}
template <typename K>
- const_iterator Find(const K& key) const {
+ const_iterator find(const K& key) const {
return FindWithHash(key, hashfn_(key));
}
@@ -387,14 +429,26 @@
return const_iterator(this, FindIndex(key, hash));
}
+ // Insert an element with hint, allows duplicates.
+ // Note: The hint is not very useful for a HashSet<> unless there are many hash conflicts
+ // and in that case the use of HashSet<> itself should be reconsidered.
+ iterator insert(const_iterator hint ATTRIBUTE_UNUSED, const T& element) {
+ return insert(element);
+ }
+ iterator insert(const_iterator hint ATTRIBUTE_UNUSED, T&& element) {
+ return insert(std::move(element));
+ }
+
// Insert an element, allows duplicates.
- template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
- void Insert(U&& element) {
- InsertWithHash(std::forward<U>(element), hashfn_(element));
+ iterator insert(const T& element) {
+ return InsertWithHash(element, hashfn_(element));
+ }
+ iterator insert(T&& element) {
+ return InsertWithHash(std::move(element), hashfn_(element));
}
template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
- void InsertWithHash(U&& element, size_t hash) {
+ iterator InsertWithHash(U&& element, size_t hash) {
DCHECK_EQ(hash, hashfn_(element));
if (num_elements_ >= elements_until_expand_) {
Expand();
@@ -403,10 +457,7 @@
const size_t index = FirstAvailableSlot(IndexForHash(hash));
data_[index] = std::forward<U>(element);
++num_elements_;
- }
-
- size_t Size() const {
- return num_elements_;
+ return iterator(this, index);
}
void swap(HashSet& other) {
@@ -430,12 +481,12 @@
}
void ShrinkToMaximumLoad() {
- Resize(Size() / max_load_factor_);
+ Resize(size() / max_load_factor_);
}
// Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
// set. No-op if the hash set is already large enough to do this.
- void Reserve(size_t num_elements) {
+ void reserve(size_t num_elements) {
size_t num_buckets = num_elements / max_load_factor_;
// Deal with rounding errors. Add one for rounding.
while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
@@ -466,7 +517,7 @@
// Calculate the current load factor and return it.
double CalculateLoadFactor() const {
- return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
+ return static_cast<double>(size()) / static_cast<double>(NumBuckets());
}
// Make sure that everything reinserts in the right spot. Returns the number of errors.
@@ -510,7 +561,7 @@
// maximum load factor.
const double load_factor = CalculateLoadFactor();
if (load_factor > max_load_factor_) {
- Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+ Resize(size() / ((min_load_factor_ + max_load_factor_) * 0.5));
}
}
@@ -605,7 +656,7 @@
// Expand the set based on the load factors.
void Expand() {
- size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
+ size_t min_index = static_cast<size_t>(size() / min_load_factor_);
// Resize based on the minimum load factor.
Resize(min_index);
}
@@ -615,7 +666,7 @@
if (new_size < kMinBuckets) {
new_size = kMinBuckets;
}
- DCHECK_GE(new_size, Size());
+ DCHECK_GE(new_size, size());
T* const old_data = data_;
size_t old_num_buckets = num_buckets_;
// Reinsert all of the old elements.
@@ -649,6 +700,15 @@
return index;
}
+ size_t NextNonEmptySlot(size_t index) const {
+ const size_t num_buckets = NumBuckets();
+ DCHECK_LT(index, num_buckets);
+ do {
+ ++index;
+ } while (index < num_buckets && IsFreeSlot(index));
+ return index;
+ }
+
// Return new offset.
template <typename Elem>
static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
@@ -679,6 +739,9 @@
double min_load_factor_;
double max_load_factor_;
+ template <class Elem, class HashSetType>
+ friend class HashSetIterator;
+
ART_FRIEND_TEST(InternTableTest, CrossHash);
};
diff --git a/libartbase/base/hash_set_test.cc b/libartbase/base/hash_set_test.cc
index ff745b4..782a68b 100644
--- a/libartbase/base/hash_set_test.cc
+++ b/libartbase/base/hash_set_test.cc
@@ -24,6 +24,8 @@
#include <vector>
#include <gtest/gtest.h>
+
+#include "base/stringpiece.h"
#include "hash_map.h"
namespace art {
@@ -66,16 +68,16 @@
TEST_F(HashSetTest, TestSmoke) {
HashSet<std::string, IsEmptyFnString> hash_set;
const std::string test_string = "hello world 1234";
- ASSERT_TRUE(hash_set.Empty());
- ASSERT_EQ(hash_set.Size(), 0U);
- hash_set.Insert(test_string);
- auto it = hash_set.Find(test_string);
+ ASSERT_TRUE(hash_set.empty());
+ ASSERT_EQ(hash_set.size(), 0U);
+ hash_set.insert(test_string);
+ auto it = hash_set.find(test_string);
ASSERT_EQ(*it, test_string);
- auto after_it = hash_set.Erase(it);
+ auto after_it = hash_set.erase(it);
ASSERT_TRUE(after_it == hash_set.end());
- ASSERT_TRUE(hash_set.Empty());
- ASSERT_EQ(hash_set.Size(), 0U);
- it = hash_set.Find(test_string);
+ ASSERT_TRUE(hash_set.empty());
+ ASSERT_EQ(hash_set.size(), 0U);
+ it = hash_set.find(test_string);
ASSERT_TRUE(it == hash_set.end());
}
@@ -86,26 +88,26 @@
for (size_t i = 0; i < count; ++i) {
// Insert a bunch of elements and make sure we can find them.
strings.push_back(RandomString(10));
- hash_set.Insert(strings[i]);
- auto it = hash_set.Find(strings[i]);
+ hash_set.insert(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
}
- ASSERT_EQ(strings.size(), hash_set.Size());
+ ASSERT_EQ(strings.size(), hash_set.size());
// Try to erase the odd strings.
for (size_t i = 1; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
- hash_set.Erase(it);
+ hash_set.erase(it);
}
// Test removed.
for (size_t i = 1; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it == hash_set.end());
}
for (size_t i = 0; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
}
@@ -119,7 +121,7 @@
for (size_t i = 0; i < count; ++i) {
// Insert a bunch of elements and make sure we can find them.
strings.push_back(RandomString(10));
- hash_set.Insert(strings[i]);
+ hash_set.insert(strings[i]);
}
// Make sure we visit each string exactly once.
std::map<std::string, size_t> found_count;
@@ -133,7 +135,7 @@
// Remove all the elements with iterator erase.
for (auto it = hash_set.begin(); it != hash_set.end();) {
++found_count[*it];
- it = hash_set.Erase(it);
+ it = hash_set.erase(it);
ASSERT_EQ(hash_set.Verify(), 0U);
}
for (size_t i = 0; i < count; ++i) {
@@ -147,14 +149,14 @@
static constexpr size_t count = 1000;
for (size_t i = 0; i < count; ++i) {
strings.push_back(RandomString(10));
- hash_seta.Insert(strings[i]);
+ hash_seta.insert(strings[i]);
}
std::swap(hash_seta, hash_setb);
- hash_seta.Insert("TEST");
- hash_setb.Insert("TEST2");
+ hash_seta.insert("TEST");
+ hash_setb.insert("TEST2");
for (size_t i = 0; i < count; ++i) {
strings.push_back(RandomString(10));
- hash_seta.Insert(strings[i]);
+ hash_seta.insert(strings[i]);
}
}
@@ -163,7 +165,7 @@
std::vector<std::string> strings = {"a", "b", "c", "d", "e", "f", "g"};
for (size_t i = 0; i < strings.size(); ++i) {
// Insert some strings into the beginning of our hash set to establish an initial size
- hash_set.Insert(strings[i]);
+ hash_set.insert(strings[i]);
}
hash_set.ShrinkToMaximumLoad();
@@ -174,12 +176,12 @@
static constexpr size_t count = 1000;
for (size_t i = 0; i < count; ++i) {
random_strings.push_back(RandomString(10));
- hash_set.Insert(random_strings[i]);
+ hash_set.insert(random_strings[i]);
}
// Erase all the extra strings which guarantees that our load factor will be really bad.
for (size_t i = 0; i < count; ++i) {
- hash_set.Erase(hash_set.Find(random_strings[i]));
+ hash_set.erase(hash_set.find(random_strings[i]));
}
const double bad_load = hash_set.CalculateLoadFactor();
@@ -191,7 +193,7 @@
// Make sure all the initial elements we had are still there
for (const std::string& initial_string : strings) {
- EXPECT_NE(hash_set.end(), hash_set.Find(initial_string))
+ EXPECT_NE(hash_set.end(), hash_set.find(initial_string))
<< "expected to find " << initial_string;
}
}
@@ -201,7 +203,7 @@
static constexpr size_t kStringCount = 1000;
static constexpr double kEpsilon = 0.01;
for (size_t i = 0; i < kStringCount; ++i) {
- hash_set.Insert(RandomString(i % 10 + 1));
+ hash_set.insert(RandomString(i % 10 + 1));
}
// Check that changing the load factor resizes the table to be within the target range.
EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
@@ -228,29 +230,29 @@
SetSeed(seed);
LOG(INFO) << "Starting stress test with seed " << seed;
for (size_t i = 0; i < operations; ++i) {
- ASSERT_EQ(hash_set.Size(), std_set.size());
+ ASSERT_EQ(hash_set.size(), std_set.size());
size_t delta = std::abs(static_cast<ssize_t>(target_size) -
- static_cast<ssize_t>(hash_set.Size()));
+ static_cast<ssize_t>(hash_set.size()));
size_t n = PRand();
if (n % target_size == 0) {
- hash_set.Clear();
+ hash_set.clear();
std_set.clear();
- ASSERT_TRUE(hash_set.Empty());
+ ASSERT_TRUE(hash_set.empty());
ASSERT_TRUE(std_set.empty());
} else if (n % target_size < delta) {
// Skew towards adding elements until we are at the desired size.
const std::string& s = strings[PRand() % string_count];
- hash_set.Insert(s);
+ hash_set.insert(s);
std_set.insert(s);
- ASSERT_EQ(*hash_set.Find(s), *std_set.find(s));
+ ASSERT_EQ(*hash_set.find(s), *std_set.find(s));
} else {
const std::string& s = strings[PRand() % string_count];
- auto it1 = hash_set.Find(s);
+ auto it1 = hash_set.find(s);
auto it2 = std_set.find(s);
ASSERT_EQ(it1 == hash_set.end(), it2 == std_set.end());
if (it1 != hash_set.end()) {
ASSERT_EQ(*it1, *it2);
- hash_set.Erase(it1);
+ hash_set.erase(it1);
std_set.erase(it2);
}
}
@@ -268,13 +270,13 @@
TEST_F(HashSetTest, TestHashMap) {
HashMap<std::string, int, IsEmptyStringPair> hash_map;
- hash_map.Insert(std::make_pair(std::string("abcd"), 123));
- hash_map.Insert(std::make_pair(std::string("abcd"), 124));
- hash_map.Insert(std::make_pair(std::string("bags"), 444));
- auto it = hash_map.Find(std::string("abcd"));
+ hash_map.insert(std::make_pair(std::string("abcd"), 123));
+ hash_map.insert(std::make_pair(std::string("abcd"), 124));
+ hash_map.insert(std::make_pair(std::string("bags"), 444));
+ auto it = hash_map.find(std::string("abcd"));
ASSERT_EQ(it->second, 123);
- hash_map.Erase(it);
- it = hash_map.Find(std::string("abcd"));
+ hash_map.erase(it);
+ it = hash_map.find(std::string("abcd"));
ASSERT_EQ(it->second, 124);
}
@@ -325,33 +327,50 @@
TEST_F(HashSetTest, TestLookupByAlternateKeyType) {
HashSet<std::vector<int>, IsEmptyFnVectorInt, VectorIntHashEquals, VectorIntHashEquals> hash_set;
- hash_set.Insert(std::vector<int>({1, 2, 3, 4}));
- hash_set.Insert(std::vector<int>({4, 2}));
- ASSERT_EQ(hash_set.end(), hash_set.Find(std::vector<int>({1, 1, 1, 1})));
- ASSERT_NE(hash_set.end(), hash_set.Find(std::vector<int>({1, 2, 3, 4})));
- ASSERT_EQ(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 1, 1, 1})));
- ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
+ hash_set.insert(std::vector<int>({1, 2, 3, 4}));
+ hash_set.insert(std::vector<int>({4, 2}));
+ ASSERT_EQ(hash_set.end(), hash_set.find(std::vector<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.find(std::vector<int>({1, 2, 3, 4})));
+ ASSERT_EQ(hash_set.end(), hash_set.find(std::forward_list<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.find(std::forward_list<int>({1, 2, 3, 4})));
}
TEST_F(HashSetTest, TestReserve) {
HashSet<std::string, IsEmptyFnString> hash_set;
std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
for (size_t size : sizes) {
- hash_set.Reserve(size);
+ hash_set.reserve(size);
const size_t buckets_before = hash_set.NumBuckets();
// Check that we expanded enough.
CHECK_GE(hash_set.ElementsUntilExpand(), size);
// Try inserting elements until we are at our reserve size and ensure the hash set did not
// expand.
- while (hash_set.Size() < size) {
- hash_set.Insert(std::to_string(hash_set.Size()));
+ while (hash_set.size() < size) {
+ hash_set.insert(std::to_string(hash_set.size()));
}
CHECK_EQ(hash_set.NumBuckets(), buckets_before);
}
// Check the behaviour for shrinking, it does not necessarily resize down.
constexpr size_t size = 100;
- hash_set.Reserve(size);
+ hash_set.reserve(size);
CHECK_GE(hash_set.ElementsUntilExpand(), size);
}
+TEST_F(HashSetTest, IteratorConversion) {
+ const char* test_string = "dummy";
+ HashSet<std::string> hash_set;
+ HashSet<std::string>::iterator it = hash_set.insert(test_string);
+ HashSet<std::string>::const_iterator cit = it;
+ ASSERT_TRUE(it == cit);
+ ASSERT_EQ(*it, *cit);
+}
+
+TEST_F(HashSetTest, StringSearchyStringPiece) {
+ const char* test_string = "dummy";
+ HashSet<std::string> hash_set;
+ HashSet<std::string>::iterator insert_pos = hash_set.insert(test_string);
+ HashSet<std::string>::iterator it = hash_set.find(StringPiece(test_string));
+ ASSERT_TRUE(it == insert_pos);
+}
+
} // namespace art
diff --git a/libartbase/base/scoped_arena_containers.h b/libartbase/base/scoped_arena_containers.h
index 6c78bad..80144d2 100644
--- a/libartbase/base/scoped_arena_containers.h
+++ b/libartbase/base/scoped_arena_containers.h
@@ -66,15 +66,15 @@
template <typename T,
typename EmptyFn = DefaultEmptyFn<T>,
- typename HashFn = std::hash<T>,
- typename Pred = std::equal_to<T>>
+ typename HashFn = DefaultHashFn<T>,
+ typename Pred = DefaultPred<T>>
using ScopedArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ScopedArenaAllocatorAdapter<T>>;
template <typename Key,
typename Value,
typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
- typename HashFn = std::hash<Key>,
- typename Pred = std::equal_to<Key>>
+ typename HashFn = DefaultHashFn<Key>,
+ typename Pred = DefaultPred<Key>>
using ScopedArenaHashMap = HashMap<Key,
Value,
EmptyFn,
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 73c1c22..6e3b78e 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -244,20 +244,6 @@
}
}
-// Hash bytes using a relatively fast hash.
-static inline size_t HashBytes(const uint8_t* data, size_t len) {
- size_t hash = 0x811c9dc5;
- for (uint32_t i = 0; i < len; ++i) {
- hash = (hash * 16777619) ^ data[i];
- }
- hash += hash << 13;
- hash ^= hash >> 7;
- hash += hash << 3;
- hash ^= hash >> 17;
- hash += hash << 5;
- return hash;
-}
-
} // namespace art
#endif // ART_LIBARTBASE_BASE_UTILS_H_
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index 78db8b9..d435945 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -1746,8 +1746,8 @@
ErrorStringPrintf("Item %d offset is 0", i);
return false;
}
- DCHECK(offset_to_type_map_.Find(aligned_offset) == offset_to_type_map_.end());
- offset_to_type_map_.Insert(std::pair<uint32_t, uint16_t>(aligned_offset, kType));
+ DCHECK(offset_to_type_map_.find(aligned_offset) == offset_to_type_map_.end());
+ offset_to_type_map_.insert(std::pair<uint32_t, uint16_t>(aligned_offset, kType));
}
aligned_offset = ptr_ - begin_;
@@ -1951,7 +1951,7 @@
bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) {
DCHECK_NE(offset, 0u);
- auto it = offset_to_type_map_.Find(offset);
+ auto it = offset_to_type_map_.find(offset);
if (UNLIKELY(it == offset_to_type_map_.end())) {
ErrorStringPrintf("No data map entry found @ %zx; expected %x", offset, type);
return false;
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 748e24e..2f249d5 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -57,7 +57,7 @@
// The name of the profile entry in the dex metadata file.
// DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
-const char* ProfileCompilationInfo::kDexMetadataProfileEntry = "primary.prof";
+const char ProfileCompilationInfo::kDexMetadataProfileEntry[] = "primary.prof";
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
@@ -1181,8 +1181,8 @@
// Allow archives without the profile entry. In this case, create an empty profile.
// This gives more flexible when ure-using archives that may miss the entry.
// (e.g. dex metadata files)
- LOG(WARNING) << std::string("Could not find entry ") + kDexMetadataProfileEntry +
- " in the zip archive. Creating an empty profile.";
+ LOG(WARNING) << "Could not find entry " << kDexMetadataProfileEntry
+ << " in the zip archive. Creating an empty profile.";
source->reset(ProfileSource::Create(nullptr));
return kProfileLoadSuccess;
}
@@ -2021,9 +2021,9 @@
return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
}
-std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors(
+HashSet<std::string> ProfileCompilationInfo::GetClassDescriptors(
const std::vector<const DexFile*>& dex_files) {
- std::unordered_set<std::string> ret;
+ HashSet<std::string> ret;
for (const DexFile* dex_file : dex_files) {
const DexFileData* data = FindDexData(dex_file);
if (data != nullptr) {
@@ -2032,7 +2032,7 @@
// Something went bad. The profile is probably corrupted. Abort and return an emtpy set.
LOG(WARNING) << "Corrupted profile: invalid type index "
<< type_idx.index_ << " in dex " << dex_file->GetLocation();
- return std::unordered_set<std::string>();
+ return HashSet<std::string>();
}
const DexFile::TypeId& type_id = dex_file->GetTypeId(type_idx);
ret.insert(dex_file->GetTypeDescriptor(type_id));
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index e28c5f1..3596f3e 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -24,6 +24,7 @@
#include "base/arena_object.h"
#include "base/atomic.h"
#include "base/bit_memory_region.h"
+#include "base/hash_set.h"
#include "base/malloc_arena_pool.h"
#include "base/mem_map.h"
#include "base/safe_map.h"
@@ -73,7 +74,7 @@
static const uint8_t kProfileMagic[];
static const uint8_t kProfileVersion[];
- static const char* kDexMetadataProfileEntry;
+ static const char kDexMetadataProfileEntry[];
static constexpr uint8_t kIndividualInlineCacheSize = 5;
@@ -426,7 +427,7 @@
ArenaAllocator* GetAllocator() { return &allocator_; }
// Return all of the class descriptors in the profile for a set of dex files.
- std::unordered_set<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
+ HashSet<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
// Return true if the fd points to a profile file.
bool IsProfileFile(int fd);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c374e03..1172bb1 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1242,12 +1242,12 @@
ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (space->HasAddress(klass.Ptr())) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
- auto it = new_class_set->Find(ClassTable::TableSlot(klass));
+ auto it = new_class_set->find(ClassTable::TableSlot(klass));
DCHECK(it != new_class_set->end());
DCHECK_EQ(it->Read(), klass);
ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- auto it2 = new_class_set->Find(ClassTable::TableSlot(super_class));
+ auto it2 = new_class_set->find(ClassTable::TableSlot(super_class));
DCHECK(it2 != new_class_set->end());
DCHECK_EQ(it2->Read(), super_class);
}
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index e313ec5..a233357 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -37,7 +37,7 @@
ReaderMutexLock mu(Thread::Current(), lock_);
TableSlot slot(klass);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(slot);
+ auto it = class_set.find(slot);
if (it != class_set.end()) {
return it->Read() == klass;
}
@@ -49,7 +49,7 @@
ReaderMutexLock mu(Thread::Current(), lock_);
TableSlot slot(klass);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(slot);
+ auto it = class_set.find(slot);
if (it != class_set.end()) {
return it->Read();
}
@@ -119,14 +119,14 @@
ReaderMutexLock mu(Thread::Current(), lock_);
size_t sum = 0;
for (size_t i = 0; i < classes_.size() - 1; ++i) {
- sum += classes_[i].Size();
+ sum += classes_[i].size();
}
return sum;
}
size_t ClassTable::NumReferencedNonZygoteClasses() const {
ReaderMutexLock mu(Thread::Current(), lock_);
- return classes_.back().Size();
+ return classes_.back().size();
}
mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
@@ -145,12 +145,12 @@
TableSlot slot(klass);
WriterMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(slot);
+ auto it = class_set.find(slot);
if (it != class_set.end()) {
return it->Read();
}
}
- classes_.back().Insert(slot);
+ classes_.back().insert(slot);
return klass;
}
@@ -163,12 +163,12 @@
void ClassTable::CopyWithoutLocks(const ClassTable& source_table) {
if (kIsDebugBuild) {
for (ClassSet& class_set : classes_) {
- CHECK(class_set.Empty());
+ CHECK(class_set.empty());
}
}
for (const ClassSet& class_set : source_table.classes_) {
for (const TableSlot& slot : class_set) {
- classes_.back().Insert(slot);
+ classes_.back().insert(slot);
}
}
}
@@ -187,9 +187,9 @@
DescriptorHashPair pair(descriptor, ComputeModifiedUtf8Hash(descriptor));
WriterMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(pair);
+ auto it = class_set.find(pair);
if (it != class_set.end()) {
- class_set.Erase(it);
+ class_set.erase(it);
return true;
}
}
@@ -268,7 +268,7 @@
// default in case classes were pruned.
for (const ClassSet& class_set : classes_) {
for (const TableSlot& root : class_set) {
- combined.Insert(root);
+ combined.insert(root);
}
}
const size_t ret = combined.WriteToMemory(ptr);
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2db8815..c8aaa21 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -366,7 +366,7 @@
size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
size_t read_count = 0;
UnorderedSet set(ptr, /*make copy*/false, &read_count);
- if (set.Empty()) {
+ if (set.empty()) {
// Avoid inserting empty sets.
return read_count;
}
@@ -392,7 +392,7 @@
table_to_write = &combined;
for (UnorderedSet& table : tables_) {
for (GcRoot<mirror::String>& string : table) {
- combined.Insert(string);
+ combined.insert(string);
}
}
} else {
@@ -403,9 +403,9 @@
void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
for (UnorderedSet& table : tables_) {
- auto it = table.Find(GcRoot<mirror::String>(s));
+ auto it = table.find(GcRoot<mirror::String>(s));
if (it != table.end()) {
- table.Erase(it);
+ table.erase(it);
return;
}
}
@@ -415,7 +415,7 @@
ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
for (UnorderedSet& table : tables_) {
- auto it = table.Find(GcRoot<mirror::String>(s));
+ auto it = table.find(GcRoot<mirror::String>(s));
if (it != table.end()) {
return it->Read();
}
@@ -426,7 +426,7 @@
ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
for (UnorderedSet& table : tables_) {
- auto it = table.Find(string);
+ auto it = table.find(string);
if (it != table.end()) {
return it->Read();
}
@@ -442,7 +442,7 @@
// Always insert the last table, the image tables are before and we avoid inserting into these
// to prevent dirty pages.
DCHECK(!tables_.empty());
- tables_.back().Insert(GcRoot<mirror::String>(s));
+ tables_.back().insert(GcRoot<mirror::String>(s));
}
void InternTable::Table::VisitRoots(RootVisitor* visitor) {
@@ -467,7 +467,7 @@
mirror::Object* object = it->Read<kWithoutReadBarrier>();
mirror::Object* new_object = visitor->IsMarked(object);
if (new_object == nullptr) {
- it = set->Erase(it);
+ it = set->erase(it);
} else {
*it = GcRoot<mirror::String>(new_object->AsString());
++it;
@@ -480,7 +480,7 @@
tables_.end(),
0U,
[](size_t sum, const UnorderedSet& set) {
- return sum + set.Size();
+ return sum + set.size();
});
}