Fix cpplint whitespace/blank_line issues
Change-Id: Ice937e95e23dd622c17054551d4ae4cebd0ef8a2
diff --git a/runtime/atomic_integer.h b/runtime/atomic_integer.h
index 117e837..6711722 100644
--- a/runtime/atomic_integer.h
+++ b/runtime/atomic_integer.h
@@ -70,10 +70,11 @@
bool success = android_atomic_cas(expected_value, new_value, &value_) == 0;
return success;
}
+
private:
volatile int32_t value_;
};
-}
+} // namespace art
#endif // ART_RUNTIME_ATOMIC_INTEGER_H_
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 250d468..a644998 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -60,4 +60,4 @@
CHECK(!count_) << "Attempted to destroy barrier with non zero count";
}
-}
+} // namespace art
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index d26ae9e..298ae56 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -32,9 +32,7 @@
: barrier_(barrier),
count1_(count1),
count2_(count2),
- count3_(count3) {
-
- }
+ count3_(count3) {}
void Run(Thread* self) {
LOG(INFO) << "Before barrier 1 " << *self;
@@ -50,6 +48,7 @@
virtual void Finalize() {
delete this;
}
+
private:
Barrier* const barrier_;
AtomicInteger* const count1_;
@@ -100,9 +99,7 @@
CheckPassTask(Barrier* barrier, AtomicInteger* count, size_t subtasks)
: barrier_(barrier),
count_(count),
- subtasks_(subtasks) {
-
- }
+ subtasks_(subtasks) {}
void Run(Thread* self) {
for (size_t i = 0; i < subtasks_; ++i) {
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index bbca603..d572cf9 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -212,7 +212,6 @@
DCHECK_GT(cumulative_perc_.size(), 0ull);
size_t idx, upper_idx = 0, lower_idx = 0;
for (idx = 0; idx < cumulative_perc_.size(); idx++) {
-
if (per <= cumulative_perc_[idx]) {
upper_idx = idx;
break;
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index dfb556b..33a1e65 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -30,7 +30,6 @@
// Designed to be simple and used with timing logger in art.
template <class Value> class Histogram {
-
const double kAdjust;
const Value kBucketWidth;
const size_t kInitialBucketCount;
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index 816cbea..0f00a04 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -50,9 +50,7 @@
} // namespace base
class CumulativeLogger {
-
public:
-
explicit CumulativeLogger(const std::string& name);
void prepare_stats();
~CumulativeLogger();
@@ -68,7 +66,6 @@
void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_);
private:
-
void AddPair(const std::string &label, uint64_t delta_time)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DumpHistogram(std::ostream &os) EXCLUSIVE_LOCKS_REQUIRED(lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5a31c87..b502c9a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -155,7 +155,6 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostException(thread, throw_location, catch_method, catch_dex_pc, exception_object);
}
-
} gDebugInstrumentationListener;
// JDWP is allowed unless the Zygote forbids it.
@@ -761,7 +760,6 @@
JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>& counts)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-
std::vector<mirror::Class*> classes;
counts.clear();
for (size_t i = 0; i < class_ids.size(); ++i) {
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index e915d77..1975e48 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -120,7 +120,6 @@
}
private:
-
ClassDataItemIterator& GetIterator() const {
CHECK(it_.get() != NULL);
return *it_.get();
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 7622604..5edea95 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -40,7 +40,6 @@
SpaceSetMap* set = *it;
set->Visit(visitor);
}
-
}
} // namespace accounting
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index f4b725c..1710579 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -106,7 +106,6 @@
explicit HeapBitmap(Heap* heap) : heap_(heap) {}
private:
-
const Heap* const heap_;
void AddContinuousSpaceBitmap(SpaceBitmap* bitmap);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 19f1128..6edc067 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -64,9 +64,7 @@
}
// Clean up any resources associated with the bitmap.
-SpaceBitmap::~SpaceBitmap() {
-
-}
+SpaceBitmap::~SpaceBitmap() {}
void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 5a1bfe3..bf4c1ed 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -174,6 +174,7 @@
const size_t index = OffsetToIndex(offset);
return &bitmap_begin_[index];
}
+
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index a22faac..1684664 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -79,7 +79,6 @@
void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
protected:
-
// The initial phase. Done without mutators paused.
virtual void InitializePhase() = 0;
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index fde2b41..bdda9fa 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -78,7 +78,6 @@
void Dump(std::ostream& os) const;
private:
-
// Tries to initialize an ImageSpace from the given image path,
// returning NULL on error.
//
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index f7d776f..6aedd9c 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -49,9 +49,7 @@
LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
: LargeObjectSpace(name),
- lock_("large object map space lock", kAllocSpaceLock) {
-
-}
+ lock_("large object map space lock", kAllocSpaceLock) {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
return new LargeObjectMapSpace(name);
@@ -147,9 +145,7 @@
AddFreeChunk(begin_, end_ - begin_, NULL);
}
-FreeListSpace::~FreeListSpace() {
-
-}
+FreeListSpace::~FreeListSpace() {}
void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
Chunk* chunk = ChunkFromAddr(address);
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index db845db..20a4867 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -60,7 +60,6 @@
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
protected:
-
explicit LargeObjectSpace(const std::string& name);
// Approximate number of bytes which have been allocated into the space.
@@ -165,6 +164,7 @@
DCHECK(m_previous == NULL ||
(m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
}
+
private:
size_t m_size;
Chunk* m_previous;
diff --git a/runtime/image_test.cc b/runtime/image_test.cc
index 9ab1d74..ee50118 100644
--- a/runtime/image_test.cc
+++ b/runtime/image_test.cc
@@ -31,7 +31,6 @@
namespace art {
class ImageTest : public CommonTest {
-
protected:
virtual void SetUp() {
ReserveImageSpace();
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2fb272c..45314c2 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -366,7 +366,6 @@
{
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), rcvr.get(), arg0.get());
-
}
result->SetL(soa.Decode<Object*>(jresult));
ScopedThreadStateChange tsc(self, kNative);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 8ef146c..e141496 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -361,7 +361,6 @@
static JdwpError VM_CapabilitiesNew(JdwpState*, Request& request, ExpandBuf* reply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-
// The first few capabilities are the same as those reported by the older call.
VM_Capabilities(NULL, request, reply);
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index d909058..bbebece 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -497,13 +497,9 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod);
};
-class MANAGED Method : public AbstractMethod {
+class MANAGED Method : public AbstractMethod {};
-};
-
-class MANAGED Constructor : public AbstractMethod {
-
-};
+class MANAGED Constructor : public AbstractMethod {};
class MANAGED AbstractMethodClass : public Class {
private:
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 2d2130c..e490d97 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -320,13 +320,11 @@
Class* java_lang_Class = GetClass();
Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass();
return this == java_lang_reflect_Field;
-
}
bool Class::IsMethodClass() const {
return (this == AbstractMethod::GetMethodClass()) ||
- (this == AbstractMethod::GetConstructorClass());
-
+ (this == AbstractMethod::GetConstructorClass());
}
void Class::SetClassLoader(ClassLoader* new_class_loader) {
diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h
index d92ff19..aaf93f7 100644
--- a/runtime/oat/runtime/argument_visitor.h
+++ b/runtime/oat/runtime/argument_visitor.h
@@ -199,7 +199,6 @@
uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
return (low_half & 0xffffffffULL) | (high_half << 32);
-
}
void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -244,6 +243,6 @@
bool is_split_long_or_double_;
};
-}
+} // namespace art
#endif // ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index bb8341e..6562633 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -122,7 +122,6 @@
}
bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base) {
-
char* absolute_path = realpath(elf_filename.c_str(), NULL);
if (absolute_path == NULL) {
return false;
diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc
index cbdefe8..d703db2 100644
--- a/runtime/runtime_support_llvm.cc
+++ b/runtime/runtime_support_llvm.cc
@@ -50,7 +50,6 @@
using namespace art;
extern "C" {
-
class ShadowFrameCopyVisitor : public StackVisitor {
public:
explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
@@ -844,5 +843,4 @@
void art_portable_constructor_barrier() {
LOG(FATAL) << "Implemented by IRBuilder.";
}
-
} // extern "C"
diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h
index 566f7bc..43ea953 100644
--- a/runtime/runtime_support_llvm.h
+++ b/runtime/runtime_support_llvm.h
@@ -18,13 +18,10 @@
#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
extern "C" {
-
//----------------------------------------------------------------------------
// Runtime Support Function Lookup Callback
//----------------------------------------------------------------------------
-
void* art_portable_find_runtime_support_func(void* context, const char* name);
-
} // extern "C"
#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
diff --git a/runtime/stack.h b/runtime/stack.h
index 0e2c4c5..99ba898 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -554,7 +554,6 @@
static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
-
instrumentation::InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const;
void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -567,6 +566,7 @@
size_t num_frames_;
// Depth of the frame we're currently at.
size_t cur_depth_;
+
protected:
Context* const context_;
};
@@ -638,6 +638,7 @@
spill_shifts--; // wind back one as we want the last match
return spill_shifts;
}
+
private:
const uint16_t* table_;
};
diff --git a/runtime/thread.cc b/runtime/thread.cc
index dd55195..a1fb862 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2104,9 +2104,7 @@
class RootCallbackVisitor {
public:
- RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {
-
- }
+ RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
visitor_(obj, arg_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 784a7ca..067ef2d 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -180,10 +180,7 @@
WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name,
size_t stack_size)
- : ThreadPoolWorker(thread_pool, name, stack_size),
- task_(NULL) {
-
-}
+ : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {}
void WorkStealingWorker::Run() {
Thread* self = Thread::Current();
@@ -254,9 +251,7 @@
}
}
-WorkStealingWorker::~WorkStealingWorker() {
-
-}
+WorkStealingWorker::~WorkStealingWorker() {}
WorkStealingThreadPool::WorkStealingThreadPool(size_t num_threads)
: ThreadPool(0),
@@ -288,8 +283,6 @@
return NULL;
}
-WorkStealingThreadPool::~WorkStealingThreadPool() {
-
-}
+WorkStealingThreadPool::~WorkStealingThreadPool() {}
} // namespace art
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index b9f185d..7b626fb 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -124,9 +124,7 @@
class WorkStealingTask : public Task {
public:
- WorkStealingTask() : ref_count_(0) {
-
- }
+ WorkStealingTask() : ref_count_(0) {}
size_t GetRefCount() const {
return ref_count_;
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 9b66318..98178bc 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -105,9 +105,7 @@
TreeTask(ThreadPool* const thread_pool, AtomicInteger* count, int depth)
: thread_pool_(thread_pool),
count_(count),
- depth_(depth) {
-
- }
+ depth_(depth) {}
void Run(Thread* self) {
if (depth_ > 1) {
diff --git a/runtime/trace.h b/runtime/trace.h
index 5bd6a8d..bd9c140 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -78,6 +78,7 @@
mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
explicit Trace(File* trace_file, int buffer_size, int flags);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 5a70f2a..ff7f594 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3749,7 +3749,6 @@
}
MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() {
-
// It is risky to rely on reg_types for sharpening in cases of soft
// verification, we might end up sharpening to a wrong implementation. Just abort.
if (!failure_messages_.empty()) {
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index c66e7cb..5b806c4 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -309,6 +309,7 @@
// Destroy the singleton instance.
static void Destroy();
+
private:
ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -338,6 +339,7 @@
// Destroy the singleton instance.
static void Destroy();
+
private:
UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -875,6 +877,7 @@
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -909,6 +912,7 @@
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index f37edff..d2c9dd6 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -414,7 +414,6 @@
EXPECT_EQ(expected, unresolved_merged.Dump());
}
-
TEST_F(RegTypeReferenceTest, JavalangString) {
// Add a class to the cache then look for the same class and make sure it is a
// Hit the second time. Then check for the same effect when using
@@ -433,8 +432,8 @@
const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
-
}
+
TEST_F(RegTypeReferenceTest, JavalangObject) {
// Add a class to the cache then look for the same class and make sure it is a
// Hit the second time. Then I am checking for the same effect when using
@@ -474,7 +473,6 @@
TEST_F(RegTypeTest, ConstPrecision) {
-
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 3a2145b9..d2abaac 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -254,7 +254,6 @@
SetRegisterTypeWide(vdst, type_l, type_h); // also sets the high
result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
-
}
}