Modernise code to use override specifier
Generated by clang-tidy, with IgnoreDestructors option enabled.
Test: m checkbuild
Bug: 116509795
Change-Id: I5dafa10c2cf605165581b8cf7dd2633ed101ed65
diff --git a/build/Android.bp b/build/Android.bp
index 3ee7f92..7b807d5 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -100,6 +100,10 @@
// We use it to implement OFFSETOF_MEMBER - see macros.h.
"-Wno-invalid-offsetof",
+ // Enable inconsistent-missing-override warning. This warning is disabled by default in
+ // Android.
+ "-Winconsistent-missing-override",
+
// Enable thread annotations for std::mutex, etc.
"-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
],
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 0289e9c..dde39d4 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -176,7 +176,7 @@
public:
InternalCodeAllocator() : size_(0) { }
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
size_ = size;
memory_.reset(new uint8_t[size]);
return memory_.get();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4670b3f..fedad0c 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2934,7 +2934,7 @@
size_t ComputeHashCode() const override { return 0; }
// The null constant representation is a 0-bit pattern.
- virtual bool IsZeroBitPattern() const { return true; }
+ bool IsZeroBitPattern() const override { return true; }
DECLARE_INSTRUCTION(NullConstant);
@@ -6316,7 +6316,7 @@
bool CanBeMoved() const override { return true; }
- bool InstructionDataEquals(const HInstruction* other) const;
+ bool InstructionDataEquals(const HInstruction* other) const override;
size_t ComputeHashCode() const override { return type_index_.index_; }
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 16131e1..f0e7e55 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -94,7 +94,7 @@
void AllocateRegisters() override;
- bool Validate(bool log_fatal_on_failure);
+ bool Validate(bool log_fatal_on_failure) override;
private:
// Collect all intervals and prepare for register allocation.
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 875593b..4c7a3bb 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -55,7 +55,7 @@
: codegen_(down_cast<CodeGeneratorARMType*>(codegen)) {}
// Default visitor for instructions not handled specifically below.
- void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) {
+ void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) override {
last_visited_latency_ = kArmIntegerOpLatency;
}
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 7f6549d..ba5a743 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -58,7 +58,7 @@
class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
public:
// Default visitor for instructions not handled specifically below.
- void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) {
+ void VisitInstruction(HInstruction* ATTRIBUTE_UNUSED) override {
last_visited_latency_ = kArm64IntegerOpLatency;
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 674bf12..0b1b6d2 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -188,7 +188,7 @@
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) override;
// Create a new label that can be used with Jump/Bind calls.
std::unique_ptr<JNIMacroLabel> CreateLabel() override;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index fdecab8..9e01a70 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -81,7 +81,7 @@
const uint8_t* CodeBufferBaseAddress() const override;
// Copy instructions out of assembly buffer into the given region of memory.
- void FinalizeInstructions(const MemoryRegion& region);
+ void FinalizeInstructions(const MemoryRegion& region) override;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 69189a4..a24071d 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -287,7 +287,7 @@
size_t CodeSize() const override { return Assembler::CodeSize(); }
size_t CodePosition() override;
- DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() override { return Assembler::cfi(); }
virtual ~MipsAssembler() {
for (auto& branch : branches_) {
@@ -1372,7 +1372,7 @@
void FinalizeCode() override;
// Emit branches and finalize all instructions.
- void FinalizeInstructions(const MemoryRegion& region);
+ void FinalizeInstructions(const MemoryRegion& region) override;
// Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS,
// must be used instead of MipsLabel::GetPosition()).
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 2f991e9..b331cee 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -440,7 +440,7 @@
}
size_t CodeSize() const override { return Assembler::CodeSize(); }
- DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() override { return Assembler::cfi(); }
// Emit Machine Instructions.
void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
@@ -1437,7 +1437,7 @@
void FinalizeCode() override;
// Emit branches and finalize all instructions.
- void FinalizeInstructions(const MemoryRegion& region);
+ void FinalizeInstructions(const MemoryRegion& region) override;
// Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS64,
// must be used instead of Mips64Label::GetPosition()).
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 465ebbe..4c2fd8f 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -91,7 +91,7 @@
void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
// Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
void CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset64 thr_offs,
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 7209fbf..a0f1093 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -75,7 +75,7 @@
class ImageTest : public CommonCompilerDriverTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
ReserveImageSpace();
CommonCompilerTest::SetUp();
}
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index 81d55fc..215bf88 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -65,7 +65,7 @@
return c;
}
- int sync() {
+ int sync() override {
return out_sbuf_->pubsync();
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index aaa1ee6..39fd8c8 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -160,7 +160,7 @@
explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
~Mutex();
- virtual bool IsMutex() const { return true; }
+ bool IsMutex() const override { return true; }
// Block until mutex is free then acquire exclusive access.
void ExclusiveLock(Thread* self) ACQUIRE();
@@ -200,7 +200,7 @@
return recursion_count_;
}
- virtual void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
// For negative capabilities in clang annotations.
const Mutex& operator!() const { return *this; }
@@ -249,7 +249,7 @@
explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
~ReaderWriterMutex();
- virtual bool IsReaderWriterMutex() const { return true; }
+ bool IsReaderWriterMutex() const override { return true; }
// Block until ReaderWriterMutex is free then acquire exclusive access.
void ExclusiveLock(Thread* self) ACQUIRE();
@@ -321,7 +321,7 @@
// one or more readers.
pid_t GetExclusiveOwnerTid() const;
- virtual void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
// For negative capabilities in clang annotations.
const ReaderWriterMutex& operator!() const { return *this; }
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 70f35c8..bfae8a1 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -28,7 +28,7 @@
public:
void SetUp() override;
- virtual void PreRuntimeCreate();
+ void PreRuntimeCreate() override;
void PostRuntimeCreate() override;
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index ff9597c..75cfdba 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -227,7 +227,7 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
@@ -301,7 +301,7 @@
void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
// Revoke all the thread-local buffers.
- void RevokeAllThreadLocalBuffers();
+ void RevokeAllThreadLocalBuffers() override;
// Whether or not we count how many of each type of object were scanned.
static constexpr bool kCountScannedTypes = false;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 6fab371..f23d416 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -158,7 +158,7 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ override REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
@@ -192,14 +192,14 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
- void ProcessMarkStack()
+ void ProcessMarkStack() override
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
- void RevokeAllThreadLocalBuffers();
+ void RevokeAllThreadLocalBuffers() override;
// Current space, we check this space first to avoid searching for the appropriate space for an
// object.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 6d9fd04..3e4961a 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -87,12 +87,12 @@
}
// Override capacity so that we only return the possibly limited capacity
- size_t Capacity() const {
+ size_t Capacity() const override {
return growth_end_ - begin_;
}
// The total amount of memory reserved for the space.
- size_t NonGrowthLimitCapacity() const {
+ size_t NonGrowthLimitCapacity() const override {
return GetMemMap()->Size();
}
@@ -107,18 +107,18 @@
// Reset the space to empty.
void Clear() override REQUIRES(!block_lock_);
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
- size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_);
- size_t RevokeAllThreadLocalBuffers()
+ size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!block_lock_);
+ size_t RevokeAllThreadLocalBuffers() override
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
void AssertAllThreadLocalBuffersAreRevoked()
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
- uint64_t GetBytesAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
+ uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
- uint64_t GetObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
+ uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
bool IsEmpty() const {
return Begin() == End();
@@ -128,7 +128,7 @@
return true;
}
- bool Contains(const mirror::Object* obj) const {
+ bool Contains(const mirror::Object* obj) const override {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return byte_obj >= Begin() && byte_obj < End();
}
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index e91602f..930f557 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -102,7 +102,7 @@
#ifndef NDEBUG
// Override only in the debug build.
- void CheckMoreCoreForPrecondition();
+ void CheckMoreCoreForPrecondition() override;
#endif
void* GetMspace() const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index e27810d..c020dc1 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -35,7 +35,7 @@
// An image space is a space backed with a memory mapped image.
class ImageSpace : public MemMapSpace {
public:
- SpaceType GetType() const {
+ SpaceType GetType() const override {
return kSpaceTypeImageSpace;
}
@@ -107,7 +107,7 @@
return live_bitmap_.get();
}
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
// Sweeping image spaces is a NOP.
void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 86ecd85..4d1cbc0 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -97,7 +97,7 @@
return End() - Begin();
}
// Return true if we contain the specified address.
- bool Contains(const mirror::Object* obj) const {
+ bool Contains(const mirror::Object* obj) const override {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
@@ -153,14 +153,14 @@
// of malloc.
static LargeObjectMapSpace* Create(const std::string& name);
// Return the storage space required by obj.
- size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override REQUIRES(!lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override
REQUIRES(!lock_);
- size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
+ size_t Free(Thread* self, mirror::Object* ptr) override REQUIRES(!lock_);
void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
- bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+ bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS;
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
@@ -193,7 +193,7 @@
override REQUIRES(!lock_);
size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
- void Dump(std::ostream& os) const REQUIRES(!lock_);
+ void Dump(std::ostream& os) const override REQUIRES(!lock_);
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 9a90dfd..5000656 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -40,7 +40,7 @@
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
- SpaceType GetType() const {
+ SpaceType GetType() const override {
return kSpaceTypeMallocSpace;
}
@@ -49,14 +49,14 @@
size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) = 0;
// Allocate num_bytes without allowing the underlying space to grow.
- virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
+ mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) override = 0;
// Return the storage space required by obj. If usable_size isn't null then it is set to the
// amount of the storage space that may be used by obj.
- virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
- virtual size_t Free(Thread* self, mirror::Object* ptr)
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override = 0;
+ size_t Free(Thread* self, mirror::Object* ptr) override
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Returns the maximum bytes that could be allocated for the given
@@ -98,12 +98,12 @@
}
// Override capacity so that we only return the possibly limited capacity
- size_t Capacity() const {
+ size_t Capacity() const override {
return growth_limit_;
}
// The total amount of memory reserved for the alloc space.
- size_t NonGrowthLimitCapacity() const {
+ size_t NonGrowthLimitCapacity() const override {
return GetMemMap()->Size();
}
@@ -111,7 +111,7 @@
// shrinking is supported.
void ClampGrowthLimit();
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
void SetGrowthLimit(size_t growth_limit);
@@ -129,8 +129,8 @@
// aggressive in releasing unused pages. Invalidates the space its called on.
ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
- virtual uint64_t GetBytesAllocated() = 0;
- virtual uint64_t GetObjectsAllocated() = 0;
+ uint64_t GetBytesAllocated() override = 0;
+ uint64_t GetObjectsAllocated() override = 0;
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
@@ -170,7 +170,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(lock_);
- virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return &SweepCallback;
}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index d8b54e2..0bbc76a 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -134,15 +134,15 @@
// growth limit.
void ClampGrowthLimit(size_t new_capacity) REQUIRES(!region_lock_);
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
// Dump region containing object `obj`. Precondition: `obj` is in the region space.
void DumpRegionForObject(std::ostream& os, mirror::Object* obj) REQUIRES(!region_lock_);
void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
- size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!region_lock_);
void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
- size_t RevokeAllThreadLocalBuffers()
+ size_t RevokeAllThreadLocalBuffers() override
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
void AssertAllThreadLocalBuffersAreRevoked()
@@ -165,10 +165,10 @@
template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
- uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
+ uint64_t GetBytesAllocated() override REQUIRES(!region_lock_) {
return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
}
- uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
+ uint64_t GetObjectsAllocated() override REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
}
uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
@@ -194,7 +194,7 @@
return true;
}
- bool Contains(const mirror::Object* obj) const {
+ bool Contains(const mirror::Object* obj) const override {
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return byte_obj >= Begin() && byte_obj < Limit();
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9e95c16..00f5ab2 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -130,8 +130,8 @@
uint64_t GetBytesAllocated() override;
uint64_t GetObjectsAllocated() override;
- size_t RevokeThreadLocalBuffers(Thread* thread);
- size_t RevokeAllThreadLocalBuffers();
+ size_t RevokeThreadLocalBuffers(Thread* thread) override;
+ size_t RevokeAllThreadLocalBuffers() override;
void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllThreadLocalBuffersAreRevoked();
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 3a42f98..05ff55b 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -419,7 +419,7 @@
bool IsContinuousMemMapAllocSpace() const override {
return true;
}
- ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
+ ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() override {
return this;
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 03e2ec8..09db40e 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -36,7 +36,7 @@
accounting::ContinuousSpaceBitmap* mark_bitmap)
REQUIRES_SHARED(Locks::mutator_lock_);
- void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const override;
SpaceType GetType() const override {
return kSpaceTypeZygoteSpace;
@@ -63,11 +63,11 @@
return 0U;
}
- uint64_t GetBytesAllocated() {
+ uint64_t GetBytesAllocated() override {
return Size();
}
- uint64_t GetObjectsAllocated() {
+ uint64_t GetObjectsAllocated() override {
return objects_allocated_.load();
}
@@ -81,7 +81,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
- virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return &SweepCallback;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f4222ae..bdbb697 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -4235,7 +4235,7 @@
void Thread::ClearAllInterpreterCaches() {
static struct ClearInterpreterCacheClosure : Closure {
- virtual void Run(Thread* thread) {
+ void Run(Thread* thread) override {
thread->GetInterpreterCache()->Clear(thread);
}
} closure;
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 55d2f44..1d600d7 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -94,7 +94,7 @@
void ProcessDexFile(const DexFile& dex_file) override;
void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) override;
- void Dump(std::ostream& os, uint64_t total_size) const;
+ void Dump(std::ostream& os, uint64_t total_size) const override;
private:
// Total string ids loaded from dex code.