Perf tweaks to compiling and oat writing.
Make hot quick compiler routines inlinable.
Remove computation/use of SSA strings.
Use vector insert when writing to the end of a vector in the output stream, to
avoid a memset followed by a memcpy.
Generating boot.oat/art these changes improve performance by around 2.5%.
Change-Id: I3d0bdb01333efe8f0eda4bdf97225e0b307f934d
diff --git a/src/compiler/dex/arena_bit_vector.h b/src/compiler/dex/arena_bit_vector.h
index a66147b..a950e82 100644
--- a/src/compiler/dex/arena_bit_vector.h
+++ b/src/compiler/dex/arena_bit_vector.h
@@ -99,13 +99,19 @@
void Copy(ArenaBitVector* src);
void Intersect(const ArenaBitVector* src2);
void Union(const ArenaBitVector* src);
- bool Equal(const ArenaBitVector* src);
+ // Are we equal to another bit vector? Note: expandability attributes must also match.
+ bool Equal(const ArenaBitVector* src) {
+ return (storage_size_ == src->GetStorageSize()) &&
+ (expandable_ == src->IsExpandable()) &&
+ (memcmp(storage_, src->GetRawStorage(), storage_size_ * 4) == 0);
+ }
int NumSetBits();
uint32_t GetStorageSize() const { return storage_size_; }
bool IsExpandable() const { return expandable_; }
uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
uint32_t* GetRawStorage() { return storage_; }
+ const uint32_t* GetRawStorage() const { return storage_; }
private:
ArenaAllocator* const arena_;