ART: Refactor for bugprone-argument-comment
Handles runtime.
Bug: 116054210
Test: WITH_TIDY=1 mmma art
Change-Id: Ibc0d5086809d647f0ce4df5452eb84442d27ecf0
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2c5465e..c1a03ab 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -174,7 +174,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierMarkReg12 = nullptr; // Cannot use register 12 (IP) to pass arguments.
qpoints->pReadBarrierMarkReg13 = nullptr; // Cannot use register 13 (SP) to pass arguments.
qpoints->pReadBarrierMarkReg14 = nullptr; // Cannot use register 14 (LR) to pass arguments.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 4c43b7e..e681d63 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -190,7 +190,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierMarkReg16 = nullptr; // IP0 is used as a temp by the asm stub.
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
}
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 05172db..cbf5681 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -184,7 +184,7 @@
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
// Alloc
- ResetQuickAllocEntryPoints(qpoints, /*is_active*/ false);
+ ResetQuickAllocEntryPoints(qpoints, /*is_active=*/ false);
// Cast
qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
@@ -445,7 +445,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
// Cannot use the following registers to pass arguments:
// 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
// Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 2acfe14..741d41a 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -191,7 +191,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
// Cannot use the following registers to pass arguments:
// 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
// Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index e8df90e..de19317 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1899,7 +1899,7 @@
LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
ImtConflictTable* empty_conflict_table =
- Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
+ Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc);
void* data = linear_alloc->Alloc(
self,
ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index ffb0c94..3db4ede 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -98,7 +98,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierMarkReg04 = nullptr; // Cannot use register 4 (ESP) to pass arguments.
// x86 has only 8 core registers.
qpoints->pReadBarrierMarkReg08 = nullptr;
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 6bae69c..db011ba 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -120,7 +120,7 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+ UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
qpoints->pReadBarrierMarkReg04 = nullptr; // Cannot use register 4 (RSP) to pass arguments.
// x86-64 has only 16 core registers.
qpoints->pReadBarrierMarkReg16 = nullptr;
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 6cbd9e4..e20e7f3 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -47,7 +47,7 @@
ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
DCHECK(GetDeclaringClass()->IsProxyClass());
ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupClass(
- Thread::Current(), descriptor, /* class_loader */ nullptr);
+ Thread::Current(), descriptor, /* class_loader= */ nullptr);
DCHECK(klass != nullptr);
return klass;
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 68ccfee..4a19b10 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -324,12 +324,12 @@
if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
if (IsStatic()) {
art::interpreter::EnterInterpreterFromInvoke(
- self, this, nullptr, args, result, /*stay_in_interpreter*/ true);
+ self, this, nullptr, args, result, /*stay_in_interpreter=*/ true);
} else {
mirror::Object* receiver =
reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr();
art::interpreter::EnterInterpreterFromInvoke(
- self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
+ self, this, receiver, args + 1, result, /*stay_in_interpreter=*/ true);
}
} else {
DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 851c23f..50b42d4 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -58,7 +58,7 @@
size = RoundUp(size, kPageSize);
std::string error_msg;
MemMap map = MemMap::MapAnonymous(name,
- /* addr */ nullptr,
+ /* addr= */ nullptr,
size,
PROT_READ | PROT_WRITE,
low_4gb,
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index c11e3d1..9952283 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1250,9 +1250,9 @@
#undef UPDATE_CURRENT_LOCK_LEVEL
// List of mutexes that we may hold when accessing a weak ref.
- AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock*/ false);
- AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock*/ false);
- AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
InitConditions();
}
diff --git a/runtime/cha.cc b/runtime/cha.cc
index b600df6..de4aebe 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -363,7 +363,7 @@
// non-single-implementation already.
VerifyNonSingleImplementation(klass->GetSuperClass()->GetSuperClass(),
method_in_super->GetMethodIndex(),
- nullptr /* excluded_method */);
+ /* excluded_method= */ nullptr);
return;
}
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 2536b23..0dc62d3 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -315,7 +315,7 @@
// Check if the invoke type matches the class type.
ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
- if (CheckInvokeClassMismatch</* kThrow */ false>(dex_cache, type, method_idx, class_loader)) {
+ if (CheckInvokeClassMismatch</* kThrow= */ false>(dex_cache, type, method_idx, class_loader)) {
return nullptr;
}
// Check access.
@@ -366,7 +366,7 @@
// Check if the invoke type matches the class type.
ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
- if (CheckInvokeClassMismatch</* kThrow */ true>(dex_cache, type, method_idx, class_loader)) {
+ if (CheckInvokeClassMismatch</* kThrow= */ true>(dex_cache, type, method_idx, class_loader)) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 7549c04..c18abab 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -496,7 +496,7 @@
// Space (LOS) -- see the comment about the dirty card scanning logic in
// art::gc::collector::ConcurrentCopying::MarkingPhase.
Handle<mirror::Class> java_lang_String(hs.NewHandle(
- AllocClass</* kMovable */ false>(
+ AllocClass</* kMovable= */ false>(
self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
java_lang_String->SetStringClass();
mirror::Class::SetStatus(java_lang_String, ClassStatus::kResolved, self);
@@ -1039,8 +1039,8 @@
std::vector<std::unique_ptr<const DexFile>> dex_files;
if (!AddImageSpace(image_space,
ScopedNullHandle<mirror::ClassLoader>(),
- /*dex_elements*/nullptr,
- /*dex_location*/nullptr,
+ /*dex_elements=*/nullptr,
+ /*dex_location=*/nullptr,
/*out*/&dex_files,
error_msg)) {
return false;
@@ -1127,7 +1127,10 @@
}
return true; // Continue with the next Element.
};
- bool error = VisitClassLoaderDexElements(soa, handle, add_element_names, /* error */ false);
+ bool error = VisitClassLoaderDexElements(soa,
+ handle,
+ add_element_names,
+ /* defaultReturn= */ false);
if (error) {
// An error occurred during DexPathList Element visiting.
return false;
@@ -1259,16 +1262,16 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
if (space.HasAddress(obj)) {
if (obj->IsDexCache()) {
- obj->VisitReferences</* kVisitNativeRoots */ true,
- kVerifyNone,
- kWithoutReadBarrier>(visitor, visitor);
+ obj->VisitReferences</*kVisitNativeRoots=*/ true,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
} else {
// Don't visit native roots for non-dex-cache as they can't contain
// native references to strings. This is verified during compilation
// by ImageWriter::VerifyNativeGCRootInvariants.
- obj->VisitReferences</* kVisitNativeRoots */ false,
- kVerifyNone,
- kWithoutReadBarrier>(visitor, visitor);
+ obj->VisitReferences</*kVisitNativeRoots=*/ false,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
}
}
});
@@ -2241,7 +2244,7 @@
for (const ClassLoaderData& data : class_loaders_) {
// CHA unloading analysis is not needed. No negative consequences are expected because
// all the classloaders are deleted at the same time.
- DeleteClassLoader(self, data, false /*cleanup_cha*/);
+ DeleteClassLoader(self, data, /*cleanup_cha=*/ false);
}
class_loaders_.clear();
}
@@ -2345,7 +2348,7 @@
// in the `klass_` field of one of its instances allocated in the Large-Object
// Space (LOS) -- see the comment about the dirty card scanning logic in
// art::gc::collector::ConcurrentCopying::MarkingPhase.
- return AllocClass</* kMovable */ false>(
+ return AllocClass</* kMovable= */ false>(
self, java_lang_Class, mirror::Array::ClassSize(image_pointer_size_));
}
@@ -3441,7 +3444,7 @@
CHECK(dex_cache != nullptr) << dex_file.GetLocation();
boot_class_path_.push_back(&dex_file);
WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
- RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
+ RegisterDexFileLocked(dex_file, dex_cache, /* class_loader= */ nullptr);
}
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
@@ -5012,7 +5015,7 @@
ArtField* art_field = ResolveField(field.GetIndex(),
dex_cache,
class_loader,
- /* is_static */ true);
+ /* is_static= */ true);
if (Runtime::Current()->IsActiveTransaction()) {
value_it.ReadValueToField<true>(art_field);
} else {
@@ -6412,8 +6415,8 @@
unimplemented_method,
conflict_method,
klass,
- /*create_conflict_tables*/true,
- /*ignore_copied_methods*/false,
+ /*create_conflict_tables=*/true,
+ /*ignore_copied_methods=*/false,
&new_conflict,
&imt_data[0]);
}
@@ -6901,8 +6904,8 @@
unimplemented_method,
imt_conflict_method,
klass.Get(),
- /*create_conflict_table*/false,
- /*ignore_copied_methods*/true,
+ /*create_conflict_tables=*/false,
+ /*ignore_copied_methods=*/true,
/*out*/new_conflict,
/*out*/imt);
}
@@ -8120,7 +8123,7 @@
// Check if the invoke type matches the class type.
if (kResolveMode == ResolveMode::kCheckICCEAndIAE &&
- CheckInvokeClassMismatch</* kThrow */ true>(
+ CheckInvokeClassMismatch</* kThrow= */ true>(
dex_cache.Get(), type, [klass]() { return klass; })) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
@@ -9088,7 +9091,7 @@
}
for (ClassLoaderData& data : to_delete) {
// CHA unloading analysis and SingleImplementaion cleanups are required.
- DeleteClassLoader(self, data, true /*cleanup_cha*/);
+ DeleteClassLoader(self, data, /*cleanup_cha=*/ true);
}
}
@@ -9234,11 +9237,11 @@
InvokeType type);
// Instantiate ClassLinker::AllocClass.
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ true>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ true>(
Thread* self,
ObjPtr<mirror::Class> java_lang_Class,
uint32_t class_size);
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ false>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ false>(
Thread* self,
ObjPtr<mirror::Class> java_lang_Class,
uint32_t class_size);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index b6f1f86..a48dfaf 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1043,12 +1043,12 @@
public:
// This slot must become a default conflict method.
static MethodTranslation CreateConflictingMethod() {
- return MethodTranslation(Type::kConflict, /*translation*/nullptr);
+ return MethodTranslation(Type::kConflict, /*translation=*/nullptr);
}
// This slot must become an abstract method.
static MethodTranslation CreateAbstractMethod() {
- return MethodTranslation(Type::kAbstract, /*translation*/nullptr);
+ return MethodTranslation(Type::kAbstract, /*translation=*/nullptr);
}
// Use the given method as the current value for this vtable slot during translation.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ab7182a..27ac90b 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1034,8 +1034,8 @@
// Force initialization to turn the class erroneous.
bool initialized = class_linker_->EnsureInitialized(soa.Self(),
klass,
- /* can_init_fields */ true,
- /* can_init_parents */ true);
+ /* can_init_fields= */ true,
+ /* can_init_parents= */ true);
EXPECT_FALSE(initialized);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
@@ -1320,15 +1320,15 @@
ObjPtr<mirror::Class> uninit = ResolveVerifyAndClinit(type_idx,
clinit,
soa.Self(),
- /* can_run_clinit */ true,
- /* verify_access */ false);
+ /* can_run_clinit= */ true,
+ /* verify_access= */ false);
EXPECT_TRUE(uninit != nullptr);
EXPECT_FALSE(uninit->IsInitialized());
ObjPtr<mirror::Class> init = ResolveVerifyAndClinit(type_idx,
getS0,
soa.Self(),
- /* can_run_clinit */ true,
- /* verify_access */ false);
+ /* can_run_clinit= */ true,
+ /* verify_access= */ false);
EXPECT_TRUE(init != nullptr);
EXPECT_TRUE(init->IsInitialized());
}
@@ -1530,7 +1530,7 @@
{
WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
// Check that inserting with a UTF16 name works.
- class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader */ nullptr);
+ class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader= */ nullptr);
}
}
@@ -1699,14 +1699,14 @@
jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
- VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+ VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
}
TEST_F(ClassLinkerClassLoaderTest, CreateDelegateLastClassLoader) {
jobject class_loader_a = LoadDexInDelegateLastClassLoader("ForClassLoaderA", nullptr);
VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
- VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+ VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
}
TEST_F(ClassLinkerClassLoaderTest, CreateClassLoaderChain) {
@@ -1753,7 +1753,7 @@
VerifyClassResolution("LDefinedInAC;", class_loader_d, class_loader_a);
// Sanity check that we don't find an undefined class.
- VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find*/ false);
+ VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find=*/ false);
}
} // namespace art
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 5c8d685..dd10f3c 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -223,7 +223,7 @@
if (!dex_file_loader.Open(location.c_str(),
location.c_str(),
Runtime::Current()->IsVerificationEnabled(),
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg,
&info.opened_dex_files)) {
// If we fail to open the dex file because it's been stripped, try to open the dex file
@@ -298,12 +298,12 @@
}
std::string ClassLoaderContext::EncodeContextForDex2oat(const std::string& base_dir) const {
- return EncodeContext(base_dir, /*for_dex2oat*/ true, /*stored_context*/ nullptr);
+ return EncodeContext(base_dir, /*for_dex2oat=*/ true, /*stored_context=*/ nullptr);
}
std::string ClassLoaderContext::EncodeContextForOatFile(const std::string& base_dir,
ClassLoaderContext* stored_context) const {
- return EncodeContext(base_dir, /*for_dex2oat*/ false, stored_context);
+ return EncodeContext(base_dir, /*for_dex2oat=*/ false, stored_context);
}
std::string ClassLoaderContext::EncodeContext(const std::string& base_dir,
@@ -663,7 +663,7 @@
Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
- std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files*/ false));
+ std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files=*/ false));
if (result->AddInfoToContextFromClassLoader(soa, h_class_loader, h_dex_elements)) {
return result;
} else {
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index 5e3f48c..ea624f1 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -125,7 +125,7 @@
std::unique_ptr<ClassLoaderContext> ParseContextWithChecksums(const std::string& context_spec) {
std::unique_ptr<ClassLoaderContext> context(new ClassLoaderContext());
- if (!context->Parse(context_spec, /*parse_checksums*/ true)) {
+ if (!context->Parse(context_spec, /*parse_checksums=*/ true)) {
return nullptr;
}
return context;
@@ -263,7 +263,7 @@
"PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
"DLC[" + dex_name + "]");
- ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+ ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
VerifyContextSize(context.get(), 2);
@@ -314,7 +314,7 @@
"PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
"DLC[" + dex_name + "]");
- ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+ ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
std::vector<std::unique_ptr<const DexFile>> all_dex_files0 = OpenTestDexFiles("MultiDex");
std::vector<std::unique_ptr<const DexFile>> myclass_dex_files = OpenTestDexFiles("MyClass");
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 78ad568..945d659 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -160,7 +160,7 @@
VisitClassLoaderDexFiles<decltype(helper), void*>(soa,
class_loader,
helper,
- /* default */ nullptr);
+ /* default= */ nullptr);
}
} // namespace art
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1460562..774f19e 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -78,8 +78,8 @@
const ArtDexFileLoader dex_file_loader;
CHECK(dex_file_loader.Open(input_jar.c_str(),
input_jar.c_str(),
- /*verify*/ true,
- /*verify_checksum*/ true,
+ /*verify=*/ true,
+ /*verify_checksum=*/ true,
&error_msg,
&dex_files)) << error_msg;
EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 7199d5e..7a08cb3 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -105,10 +105,10 @@
}
void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file) {
- ThrowException("Ljava/lang/AbstractMethodError;", /* referrer */ nullptr,
+ ThrowException("Ljava/lang/AbstractMethodError;", /* referrer= */ nullptr,
StringPrintf("abstract method \"%s\"",
dex_file.PrettyMethod(method_idx,
- /* with_signature */ true).c_str()).c_str());
+ /* with_signature= */ true).c_str()).c_str());
}
// ArithmeticException
@@ -324,7 +324,7 @@
void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method) {
DCHECK(method != nullptr);
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- /*referrer*/nullptr,
+ /*referrer=*/nullptr,
StringPrintf("Conflicting default method implementations %s",
ArtMethod::PrettyMethod(method).c_str()).c_str());
}
@@ -633,7 +633,7 @@
ArtField* field =
Runtime::Current()->GetClassLinker()->ResolveField(instr.VRegC_22c(), method, false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
break;
}
case Instruction::IGET_QUICK:
@@ -647,9 +647,9 @@
ArtField* field = nullptr;
CHECK_NE(field_idx, DexFile::kDexNoIndex16);
field = Runtime::Current()->GetClassLinker()->ResolveField(
- field_idx, method, /* is_static */ false);
+ field_idx, method, /* is_static= */ false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
break;
}
case Instruction::IPUT:
@@ -660,9 +660,9 @@
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
ArtField* field = Runtime::Current()->GetClassLinker()->ResolveField(
- instr.VRegC_22c(), method, /* is_static */ false);
+ instr.VRegC_22c(), method, /* is_static= */ false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
break;
}
case Instruction::IPUT_QUICK:
@@ -676,9 +676,9 @@
ArtField* field = nullptr;
CHECK_NE(field_idx, DexFile::kDexNoIndex16);
field = Runtime::Current()->GetClassLinker()->ResolveField(
- field_idx, method, /* is_static */ false);
+ field_idx, method, /* is_static= */ false);
Thread::Current()->ClearException(); // Resolution may fail, ignore.
- ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+ ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
break;
}
case Instruction::AGET:
diff --git a/runtime/debug_print.cc b/runtime/debug_print.cc
index cb334b5..2939b00 100644
--- a/runtime/debug_print.cc
+++ b/runtime/debug_print.cc
@@ -37,7 +37,7 @@
std::ostringstream oss;
gc::Heap* heap = Runtime::Current()->GetHeap();
gc::space::ContinuousSpace* cs =
- heap->FindContinuousSpaceFromObject(klass, /* fail_ok */ true);
+ heap->FindContinuousSpaceFromObject(klass, /* fail_ok= */ true);
if (cs != nullptr) {
if (cs->IsImageSpace()) {
gc::space::ImageSpace* ispace = cs->AsImageSpace();
@@ -50,7 +50,7 @@
}
} else {
gc::space::DiscontinuousSpace* ds =
- heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok */ true);
+ heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok= */ true);
if (ds != nullptr) {
oss << "discontinuous;" << ds->GetName();
} else {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 9b5b84a..099cadc 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -944,7 +944,7 @@
JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>* counts) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+ heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
VariableSizedHandleScope hs(Thread::Current());
std::vector<Handle<mirror::Class>> classes;
counts->clear();
@@ -965,7 +965,7 @@
std::vector<JDWP::ObjectId>* instances) {
gc::Heap* heap = Runtime::Current()->GetHeap();
// We only want reachable instances, so do a GC.
- heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+ heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
JDWP::JdwpError error;
ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
if (c == nullptr) {
@@ -975,7 +975,7 @@
std::vector<Handle<mirror::Object>> raw_instances;
Runtime::Current()->GetHeap()->GetInstances(hs,
hs.NewHandle(c),
- /* use_is_assignable_from */ false,
+ /* use_is_assignable_from= */ false,
max_count,
raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
@@ -987,7 +987,7 @@
JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>* referring_objects) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+ heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
JDWP::JdwpError error;
ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
if (o == nullptr) {
@@ -3075,7 +3075,7 @@
Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
std::unique_ptr<Context> context(Context::Create());
CatchLocationFinder clf(self, h_exception, context.get());
- clf.WalkStack(/* include_transitions */ false);
+ clf.WalkStack(/* include_transitions= */ false);
JDWP::EventLocation exception_throw_location;
SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
JDWP::EventLocation exception_catch_location;
@@ -3734,7 +3734,7 @@
bool timed_out;
ThreadList* const thread_list = Runtime::Current()->GetThreadList();
suspended_thread = thread_list->SuspendThreadByPeer(thread_peer,
- /* request_suspension */ true,
+ /* request_suspension= */ true,
SuspendReason::kForDebugger,
&timed_out);
}
@@ -4745,7 +4745,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
uint8_t state = ExamineNativeObject(start);
- AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
+ AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true);
startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
}
}
@@ -4757,7 +4757,7 @@
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
// If it's the same, we should combine them.
uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
- AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
+ AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false);
startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
}
}
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index b50a430..fb63c82 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -1251,7 +1251,7 @@
// WellKnownClasses may not be initialized yet, so `klass` may be null.
if (klass != nullptr) {
// Lookup using the boot class path loader should yield the annotation class.
- CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+ CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader= */ nullptr));
}
}
}
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 2cbf557..fbcee39 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -87,7 +87,7 @@
std::vector<std::unique_ptr<const DexFile>> multi1;
ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc1().c_str(),
GetMultiDexSrc1().c_str(),
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_msg,
&multi1)) << error_msg;
@@ -96,7 +96,7 @@
std::vector<std::unique_ptr<const DexFile>> multi2;
ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc2().c_str(),
GetMultiDexSrc2().c_str(),
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_msg,
&multi2)) << error_msg;
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 429ecd3..13f5fcb 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -206,7 +206,7 @@
reinterpret_cast<uint8_t*>(start),
end - start,
PROT_NONE,
- /* low_4gb*/ false,
+ /* low_4gb=*/ false,
&error_msg));
ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index ce742fe..4e5fe5f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -86,7 +86,7 @@
bool low_4gb,
std::string* error_msg) {
std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
- new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only */ false));
+ new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only= */ false));
if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
return nullptr;
}
@@ -1163,7 +1163,7 @@
vaddr_size,
PROT_NONE,
low_4gb,
- /* reuse */ false,
+ /* reuse= */ false,
reservation,
error_msg);
if (!local_reservation.IsValid()) {
@@ -1237,10 +1237,10 @@
flags,
file->Fd(),
program_header->p_offset,
- /* low4_gb */ false,
+ /* low_4gb= */ false,
file->GetPath().c_str(),
- /* reuse */ true, // implies MAP_FIXED
- /* reservation */ nullptr,
+ /* reuse= */ true, // implies MAP_FIXED
+ /* reservation= */ nullptr,
error_msg);
if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
@@ -1262,9 +1262,9 @@
p_vaddr + program_header->p_filesz,
program_header->p_memsz - program_header->p_filesz,
prot,
- /* low_4gb */ false,
- /* reuse */ true,
- /* reservation */ nullptr,
+ /* low_4gb= */ false,
+ /* reuse= */ true,
+ /* reservation= */ nullptr,
error_msg);
if (!segment.IsValid()) {
*error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
@@ -1763,7 +1763,7 @@
PROT_READ,
MAP_PRIVATE,
file->Fd(),
- /* start */ 0,
+ /* start= */ 0,
low_4gb,
file->GetPath().c_str(),
error_msg);
@@ -1886,7 +1886,7 @@
}
bool ElfFile::Strip(File* file, std::string* error_msg) {
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, error_msg));
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb=*/false, error_msg));
if (elf_file.get() == nullptr) {
return false;
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 35bfa91..120a0e9 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -191,7 +191,7 @@
return nullptr;
}
// CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
- return klass->Alloc</*kInstrumented*/true>(
+ return klass->Alloc</*kInstrumented=*/true>(
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
}
@@ -216,7 +216,7 @@
// Pass in false since the object cannot be finalizable.
// CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
// instrumented.
- return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
+ return klass->Alloc</*kInstrumented=*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
}
// Pass in false since the object cannot be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
@@ -287,11 +287,11 @@
}
gc::Heap* heap = Runtime::Current()->GetHeap();
// CheckArrayAlloc can cause thread suspension which means we may now be instrumented.
- return mirror::Array::Alloc</*kInstrumented*/true>(self,
- klass,
- component_count,
- klass->GetComponentSizeShift(),
- heap->GetCurrentAllocator());
+ return mirror::Array::Alloc</*kInstrumented=*/true>(self,
+ klass,
+ component_count,
+ klass->GetComponentSizeShift(),
+ heap->GetCurrentAllocator());
}
return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
klass->GetComponentSizeShift(), allocator_type);
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5421f69..12136bf 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -244,7 +244,7 @@
result.outer_method = outer_caller_and_pc.first;
uintptr_t caller_pc = outer_caller_and_pc.second;
result.caller =
- DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check */ true);
+ DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check= */ true);
return result;
}
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 8e784c1..ce12fde 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -31,7 +31,7 @@
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
// Alloc
- ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true);
+ ResetQuickAllocEntryPoints(qpoints, /* is_marking= */ true);
// Resolution and initialization
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index c782c9c..2431bce 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -74,9 +74,9 @@
JValue return_value;
return_value.SetJ(0); // we never deoptimize from compiled code with an invoke result.
self->PushDeoptimizationContext(return_value,
- false /* is_reference */,
+ /* is_reference= */ false,
self->GetException(),
- true /* from_code */,
+ /* from_code= */ true,
DeoptimizationMethodType::kDefault);
artDeoptimizeImpl(self, kind, true);
}
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index c4d85a3..e939982 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -140,7 +140,7 @@
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_klass = hs.NewHandle(klass);
bool success = class_linker->EnsureInitialized(
- self, h_klass, /* can_init_fields */ true, /* can_init_parents */ true);
+ self, h_klass, /* can_init_fields= */ true, /* can_init_parents= */ true);
if (UNLIKELY(!success)) {
return nullptr;
}
@@ -157,8 +157,8 @@
ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
caller,
self,
- /* can_run_clinit */ false,
- /* verify_access */ false);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ false);
if (LIKELY(result != nullptr) && CanReferenceBss(caller_and_outer.outer_method, caller)) {
StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
}
@@ -175,8 +175,8 @@
ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
caller,
self,
- /* can_run_clinit */ false,
- /* verify_access */ true);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ true);
// Do not StoreTypeInBss(); access check entrypoint is never used together with .bss.
return result.Ptr();
}
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index d38e3ed..56232c5 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -392,7 +392,7 @@
constexpr ReadBarrierOption kReadBarrierOption =
kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
mirror::Object* result =
- ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kReadBarrierOption>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>(
obj,
MemberOffset(offset),
ref_addr);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index ba7fb6b..2e447ec 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -67,7 +67,7 @@
ScopedQuickEntrypointChecks sqec(self);
// We come from an explicit check in the generated code. This path is triggered
// only if the object is indeed null.
- ThrowNullPointerExceptionFromDexPC(/* check_address */ false, 0U);
+ ThrowNullPointerExceptionFromDexPC(/* check_address= */ false, 0U);
self->QuickDeliverException();
}
@@ -75,7 +75,7 @@
extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
+ ThrowNullPointerExceptionFromDexPC(/* check_address= */ true, addr);
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 84631c3..1472490 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -763,7 +763,7 @@
uint16_t num_regs = accessor.RegistersSize();
// No last shadow coming from quick.
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
+ CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
@@ -820,7 +820,7 @@
result,
shorty[0] == 'L' || shorty[0] == '[', /* class or array */
force_frame_pop ? nullptr : self->GetException(),
- false /* from_code */,
+ /* from_code= */ false,
DeoptimizationMethodType::kDefault);
// Set special exception to cause deoptimization.
@@ -912,7 +912,7 @@
uint32_t shorty_len = 0;
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
BuildQuickArgumentVisitor local_ref_visitor(
- sp, /* is_static */ false, shorty, shorty_len, &soa, &args);
+ sp, /* is_static= */ false, shorty, shorty_len, &soa, &args);
local_ref_visitor.VisitArguments();
DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
@@ -975,7 +975,7 @@
const char* shorty,
uint32_t shorty_len,
size_t arg_pos)
- : QuickArgumentVisitor(sp, /* is_static */ false, shorty, shorty_len),
+ : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len),
cur_pos_(0u),
arg_pos_(arg_pos),
ref_arg_(nullptr) {
@@ -1061,7 +1061,7 @@
<< proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
uint32_t shorty_len = 0;
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
- GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /* is_static */ false, shorty, shorty_len);
+ GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len);
ref_args_visitor.VisitArguments();
std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
return ref_args;
@@ -2709,7 +2709,7 @@
conflict_method,
interface_method,
method,
- /*force_new_conflict_method*/false);
+ /*force_new_conflict_method=*/false);
if (new_conflict_method != conflict_method) {
// Update the IMT if we create a new conflict method. No fence needed here, as the
// data is consistent.
@@ -2784,7 +2784,7 @@
const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
const size_t first_arg = 0;
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc);
+ CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
ScopedStackedShadowFramePusher
frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
@@ -2877,7 +2877,7 @@
const size_t first_arg = 0;
const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, caller_method, dex_pc);
+ CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
ScopedStackedShadowFramePusher
frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 10af10d..313b2b4 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,10 @@
void Init() {
std::string error_msg;
mem_map_ = MemMap::MapAnonymous(name_.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bb2beaa..80c4c76 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,10 @@
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7cddec6..9a5bde8 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,10 @@
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("card table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity + 256,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 40dc6e1..b4026fc 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -462,7 +462,7 @@
for (mirror::HeapReference<mirror::Object>* obj_ptr : references) {
if (obj_ptr->AsMirrorPtr() != nullptr) {
all_null = false;
- visitor->MarkHeapReference(obj_ptr, /*do_atomic_update*/ false);
+ visitor->MarkHeapReference(obj_ptr, /*do_atomic_update=*/ false);
}
}
count += references.size();
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 8bdf6da..b369a66 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,10 @@
static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
std::string error_msg;
mem_map_ = MemMap::MapAnonymous("read barrier table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
<< "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 9dea2f8..fba62c3 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -75,7 +75,7 @@
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
*contains_reference_to_target_space_ = true;
- collector_->MarkHeapReference(ref_ptr, /*do_atomic_update*/ false);
+ collector_->MarkHeapReference(ref_ptr, /*do_atomic_update=*/ false);
DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
}
}
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2946486..76d5d9d 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,10 @@
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
bitmap_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 0dbafde..8cc0c4e 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -92,10 +92,10 @@
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3095f9f..8fd235f 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -76,8 +76,8 @@
// we can avoid an expensive CAS.
// For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
// set.
- success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
- /* rb_state */ ReadBarrier::GrayState());
+ success = ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+ /* rb_state= */ ReadBarrier::GrayState());
} else {
success = !bitmap->AtomicTestAndSet(ref);
}
@@ -113,8 +113,8 @@
}
// This may or may not succeed, which is ok because the object may already be gray.
bool success =
- ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
- /* rb_state */ ReadBarrier::GrayState());
+ ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+ /* rb_state= */ ReadBarrier::GrayState());
if (success) {
MutexLock mu(self, immune_gray_stack_lock_);
immune_gray_stack_.push_back(ref);
@@ -186,7 +186,7 @@
region_space_->Unprotect();
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
- heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
UNREACHABLE();
}
} else {
@@ -209,8 +209,8 @@
if (UNLIKELY(mark_from_read_barrier_measurements_)) {
ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
} else {
- ret = Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self,
- from_ref);
+ ret = Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+ from_ref);
}
// Only set the mark bit for baker barrier.
if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 46cc79c..2ae4676 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,10 @@
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"concurrent copying sweep array free buffer",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -488,7 +488,7 @@
TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
// Only change live bytes for full CC.
cc->region_space_->SetFromSpace(
- cc->rb_table_, evac_mode, /*clear_live_bytes*/ !cc->young_gen_);
+ cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
}
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -601,7 +601,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
// If an object is not gray, it should only have references to things in the immune spaces.
if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
- obj->VisitReferences</*kVisitNativeRoots*/true,
+ obj->VisitReferences</*kVisitNativeRoots=*/true,
kDefaultVerifyFlags,
kWithoutReadBarrier>(visitor, visitor);
}
@@ -669,8 +669,8 @@
// Objects on clean cards should never have references to newly allocated regions. Note
// that aged cards are also not clean.
if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
- VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
- obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+ VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj);
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>(
internal_visitor, internal_visitor);
}
};
@@ -742,7 +742,7 @@
TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
Thread* const self = Thread::Current();
- using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>;
VisitorType visitor(self);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
@@ -769,11 +769,11 @@
: card;
},
/* card modified visitor */ VoidFunctor());
- card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
- space->Begin(),
- space->End(),
- visitor,
- gc::accounting::CardTable::kCardAged);
+ card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardAged);
}
}
}
@@ -781,7 +781,7 @@
void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
- using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
+ using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>;
Thread* const self = Thread::Current();
VisitorType visitor(self);
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -791,11 +791,11 @@
// Don't need to scan aged cards since we did these before the pause. Note that scanning cards
// also handles the mod-union table cards.
- card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
- space->Begin(),
- space->End(),
- visitor,
- gc::accounting::CardTable::kCardDirty);
+ card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ gc::accounting::CardTable::kCardDirty);
if (table != nullptr) {
// Add the cards to the mod-union table so that we can clear cards to save RAM.
table->ProcessCards();
@@ -1376,7 +1376,7 @@
space::RegionSpace* region_space = RegionSpace();
CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
VerifyNoFromSpaceRefsFieldVisitor visitor(this);
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor,
visitor);
if (kUseBakerReadBarrier) {
@@ -1558,8 +1558,8 @@
MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
if (mark_stack_mode == kMarkStackModeThreadLocal) {
// Process the thread-local mark stacks and the GC mark stack.
- count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
- /* checkpoint_callback */ nullptr);
+ count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
+ /* checkpoint_callback= */ nullptr);
while (!gc_mark_stack_->IsEmpty()) {
mirror::Object* to_ref = gc_mark_stack_->PopBack();
ProcessMarkStackRef(to_ref);
@@ -1734,7 +1734,7 @@
CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
AssertToSpaceInvariantFieldVisitor visitor(this);
- to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor,
visitor);
}
@@ -1769,7 +1769,7 @@
DisableWeakRefAccessCallback dwrac(this);
// Process the thread local mark stacks one last time after switching to the shared mark stack
// mode and disable weak ref accesses.
- ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ true, &dwrac);
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
if (kVerboseMode) {
LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
}
@@ -1833,7 +1833,7 @@
void ConcurrentCopying::Sweep(bool swap_bitmaps) {
if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
// Only sweep objects on the live stack.
- SweepArray(heap_->GetLiveStack(), /* swap_bitmaps */ false);
+ SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
} else {
{
TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
@@ -2060,7 +2060,7 @@
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Sweep(/* swap_bitmaps */ false);
+ Sweep(/* swap_bitmaps= */ false);
SwapBitmaps();
heap_->UnBindBitmaps();
@@ -2171,7 +2171,7 @@
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
LOG(FATAL) << "Invalid reference " << ref
<< " referenced from object " << obj << " at offset " << offset;
}
@@ -2264,12 +2264,12 @@
LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
LOG(FATAL) << "Invalid reference " << ref;
}
} else {
// Check to-space invariant in non-moving space.
- AssertToSpaceInvariantInNonMovingSpace(/* obj */ nullptr, ref);
+ AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref);
}
}
}
@@ -2440,7 +2440,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
- collector_->MarkRoot</*kGrayImmuneObject*/false>(thread_, root);
+ collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root);
}
private:
@@ -2462,7 +2462,7 @@
DCHECK_EQ(Thread::Current(), thread_running_gc_);
RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
// Disable the read barrier for a performance reason.
- to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
@@ -2476,10 +2476,10 @@
DCHECK_EQ(Thread::Current(), thread_running_gc_);
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, kNoUnEvac, /*kFromGCThread*/true>(
+ mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>(
thread_running_gc_,
ref,
- /*holder*/ obj,
+ /*holder=*/ obj,
offset);
if (to_ref == ref) {
return;
@@ -2553,7 +2553,7 @@
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
// kGrayImmuneObject is true because this is used for the thread flip.
- MarkRoot</*kGrayImmuneObject*/true>(self, root);
+ MarkRoot</*kGrayImmuneObject=*/true>(self, root);
}
}
}
@@ -2702,7 +2702,7 @@
if (UNLIKELY(klass == nullptr)) {
// Remove memory protection from the region space and log debugging information.
region_space_->Unprotect();
- heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
}
// There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
// Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
@@ -2716,7 +2716,7 @@
size_t bytes_allocated = 0U;
size_t dummy;
bool fall_back_to_non_moving = false;
- mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
+ mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (LIKELY(to_ref != nullptr)) {
@@ -2790,7 +2790,7 @@
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
// Free the large alloc.
- region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
+ region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated);
} else {
// Record the lost copy for later reuse.
heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
@@ -3017,7 +3017,7 @@
// AtomicSetReadBarrierState since it will fault if the address is not
// valid.
region_space_->Unprotect();
- heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
}
// Not marked nor on the allocation stack. Try to mark it.
// This may or may not succeed, which is ok.
@@ -3131,7 +3131,7 @@
} while (!field->CasWeakRelaxed(from_ref, to_ref));
} else {
// TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
- field->Assign</* kIsVolatile */ true>(to_ref);
+ field->Assign</* kIsVolatile= */ true>(to_ref);
}
}
return true;
@@ -3151,7 +3151,7 @@
// We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
+ /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
@@ -3169,7 +3169,8 @@
ScopedTrace tr(__FUNCTION__);
const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
mirror::Object* ret =
- Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, from_ref);
+ Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+ from_ref);
if (measure_read_barrier_slow_path_) {
rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
}
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 3b59618..3c20e51 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -57,7 +57,7 @@
if (image_oat_file != nullptr) {
intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
reinterpret_cast<uintptr_t>(image_oat_file->End()),
- /*image*/false));
+ /*image=*/false));
}
}
intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 3f85c71..0e5fac1 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -32,7 +32,7 @@
class DummyOatFile : public OatFile {
public:
- DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
+ DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
begin_ = begin;
end_ = end;
}
@@ -45,7 +45,7 @@
std::unique_ptr<DummyOatFile>&& oat_file,
MemMap&& oat_map)
: ImageSpace("DummyImageSpace",
- /*image_location*/"",
+ /*image_location=*/"",
std::move(map),
std::move(live_bitmap),
map.End()),
@@ -87,7 +87,7 @@
image_begin,
image_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
+ /*low_4gb=*/true,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << error_str;
@@ -100,7 +100,7 @@
oat_begin,
oat_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
+ /*low_4gb=*/true,
&error_str);
if (!oat_map.IsValid()) {
LOG(ERROR) << error_str;
@@ -110,23 +110,23 @@
// Create image header.
ImageSection sections[ImageHeader::kSectionCount];
new (map.Begin()) ImageHeader(
- /*image_begin*/PointerToLowMemUInt32(map.Begin()),
- /*image_size*/map.Size(),
+ /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
+ /*image_size=*/map.Size(),
sections,
- /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
- /*oat_checksum*/0u,
+ /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
+ /*oat_checksum=*/0u,
// The oat file data in the header is always right after the image space.
- /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
- /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
- /*boot_image_begin*/0u,
- /*boot_image_size*/0u,
- /*boot_oat_begin*/0u,
- /*boot_oat_size*/0u,
- /*pointer_size*/sizeof(void*),
+ /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
+ /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+ /*boot_image_begin=*/0u,
+ /*boot_image_size=*/0u,
+ /*boot_oat_begin=*/0u,
+ /*boot_oat_size=*/0u,
+ /*pointer_size=*/sizeof(void*),
ImageHeader::kStorageModeUncompressed,
- /*storage_size*/0u);
+ /*data_size=*/0u);
return new DummyImageSpace(std::move(map),
std::move(live_bitmap),
std::move(oat_file),
@@ -138,10 +138,10 @@
static uint8_t* GetContinuousMemoryRegion(size_t size) {
std::string error_str;
MemMap map = MemMap::MapAnonymous("reserve",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_str);
if (!map.IsValid()) {
LOG(ERROR) << "Failed to allocate memory region " << error_str;
@@ -163,7 +163,7 @@
space::kGcRetentionPolicyNeverCollect,
begin,
end,
- /*limit*/end) {}
+ /*limit=*/end) {}
space::SpaceType GetType() const override {
return space::kSpaceTypeMallocSpace;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5f44a72..399f9ff 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,10 @@
std::string error_msg;
sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
"mark sweep sweep array free buffer",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(sweep_array_free_buffer_mem_map_.IsValid())
<< "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -283,9 +283,9 @@
// cards (during the call to Heap::ProcessCard) are not reordered
// *after* marking actually starts?
heap_->ProcessCards(GetTimings(),
- /* use_rem_sets */ false,
- /* process_alloc_space_cards */ true,
- /* clear_alloc_space_cards */ GetGcType() != kGcTypeSticky);
+ /* use_rem_sets= */ false,
+ /* process_alloc_space_cards= */ true,
+ /* clear_alloc_space_cards= */ GetGcType() != kGcTypeSticky);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
MarkRoots(self);
MarkReachableObjects();
@@ -446,7 +446,7 @@
!large_object_space->Contains(obj)))) {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
// which is called from LOG(FATAL) but before the abort message.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c58b59d..19b1fc7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -728,7 +728,7 @@
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
MarkObjectVisitor visitor(this);
// Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index af9000b..e253dfb 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -129,10 +129,10 @@
if (!self->IsExceptionPending()) {
// AllocObject will pick up the new allocator type, and instrumented as true is the safe
// default.
- return AllocObject</*kInstrumented*/true>(self,
- klass,
- byte_count,
- pre_fence_visitor);
+ return AllocObject</*kInstrumented=*/true>(self,
+ klass,
+ byte_count,
+ pre_fence_visitor);
}
return nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 78e8422..a31cbe7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -433,8 +433,8 @@
request_begin,
capacity_,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
- /* reuse */ false,
+ /* low_4gb= */ true,
+ /* reuse= */ false,
heap_reservation.IsValid() ? &heap_reservation : nullptr,
&error_str);
}
@@ -463,7 +463,7 @@
initial_size,
size,
size,
- /* can_move_objects */ false);
+ /* can_move_objects= */ false);
CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
<< non_moving_space_mem_map_begin;
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
@@ -505,11 +505,11 @@
// Create bump pointer spaces instead of a backup space.
main_mem_map_2.Reset();
bump_pointer_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
CHECK(bump_pointer_space_ != nullptr);
AddSpace(bump_pointer_space_);
temp_space_ = space::BumpPointerSpace::Create(
- "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
CHECK(temp_space_ != nullptr);
AddSpace(temp_space_);
} else if (main_mem_map_2.IsValid()) {
@@ -519,7 +519,7 @@
growth_limit_,
capacity_,
name,
- /* can_move_objects */ true));
+ /* can_move_objects= */ true));
CHECK(main_space_backup_.get() != nullptr);
// Add the space so its accounted for in the heap_begin and heap_end.
AddSpace(main_space_backup_.get());
@@ -634,13 +634,13 @@
}
if (MayUseCollector(kCollectorTypeCC)) {
concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
- /*young_gen*/false,
+ /*young_gen=*/false,
"",
measure_gc_performance);
if (kEnableGenerationalConcurrentCopyingCollection) {
young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
this,
- /*young_gen*/true,
+ /*young_gen=*/true,
"young",
measure_gc_performance);
}
@@ -671,7 +671,7 @@
bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
- MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
@@ -696,7 +696,7 @@
request_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb*/ true,
+ /* low_4gb=*/ true,
out_error_str);
if (map.IsValid() || request_begin == nullptr) {
return map;
@@ -1323,7 +1323,7 @@
// Invoke CC full compaction.
CollectGarbageInternal(collector::kGcTypeFull,
kGcCauseCollectorTransition,
- /*clear_soft_references*/false);
+ /*clear_soft_references=*/false);
} else {
VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
}
@@ -1783,7 +1783,7 @@
break;
}
// Try to transition the heap if the allocation failure was due to the space being full.
- if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
+ if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
// If we aren't out of memory then the OOM was probably from the non moving space being
// full. Attempt to disable compaction and turn the main space into a non moving space.
DisableMovingGc();
@@ -3870,7 +3870,7 @@
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
+ RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true);
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
@@ -3916,7 +3916,7 @@
<< " IsVariableSize=" << c->IsVariableSize()
<< " ObjectSize=" << c->GetObjectSize()
<< " sizeof(Class)=" << sizeof(mirror::Class)
- << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
+ << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
CHECK_GE(byte_count, sizeof(mirror::Object));
}
@@ -4012,7 +4012,7 @@
{
static constexpr size_t kMaxFrames = 16u;
FixedSizeBacktrace<kMaxFrames> backtrace;
- backtrace.Collect(/* skip_frames */ 2);
+ backtrace.Collect(/* skip_count= */ 2);
uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
@@ -4023,7 +4023,7 @@
if (new_backtrace) {
StackHandleScope<1> hs(self);
auto h = hs.NewHandleWrapper(obj);
- CollectGarbage(/* clear_soft_references */ false);
+ CollectGarbage(/* clear_soft_references= */ false);
unique_backtrace_count_.fetch_add(1);
} else {
seen_backtrace_count_.fetch_add(1);
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 05a04f2..a133a10 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -37,7 +37,7 @@
gc::Heap::kPreferredAllocSpaceBegin,
16 * KB,
PROT_READ,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_msg);
ASSERT_TRUE(reserved_.IsValid()) << error_msg;
CommonRuntimeTest::SetUp();
@@ -77,7 +77,7 @@
}
}
}
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
}
TEST_F(HeapTest, HeapBitmapCapacityTest) {
@@ -91,7 +91,7 @@
}
TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
Runtime::Current()->SetDumpGCPerformanceOnShutdown(true);
}
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c212bad..d4af117 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,16 +60,16 @@
static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
- reference_class->SetFieldBoolean</* kTransactionActive */ false, /* kCheckTransaction */ false>(
+ reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
slow_path_offset, enabled ? 1 : 0);
}
void ReferenceProcessor::EnableSlowPath() {
- SetSlowPathFlag(/* enabled */ true);
+ SetSlowPathFlag(/* enabled= */ true);
}
void ReferenceProcessor::DisableSlowPath(Thread* self) {
- SetSlowPathFlag(/* enabled */ false);
+ SetSlowPathFlag(/* enabled= */ false);
condition_.Broadcast(self);
}
@@ -238,13 +238,13 @@
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
// do_atomic_update needs to be true because this happens outside of the reference processing
// phase.
- if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
if (UNLIKELY(collector->IsTransactionActive())) {
// In transaction mode, keep the referent alive and avoid any reference processing to avoid the
// issue of rolling back reference processing. do_atomic_update needs to be true because this
// happens outside of the reference processing phase.
if (!referent->IsNull()) {
- collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
+ collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
}
return;
}
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index e25e279..5c11e50 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -136,7 +136,7 @@
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
// do_atomic_update is false because this happens during the reference processing phase where
// Reference.clear() would block.
- if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -158,7 +158,7 @@
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
// do_atomic_update is false because this happens during the reference processing phase where
// Reference.clear() would block.
- if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
@@ -187,7 +187,7 @@
if (referent_addr->AsMirrorPtr() != nullptr) {
// do_atomic_update is false because mutators can't access the referent due to the weak ref
// access blocking.
- visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false);
+ visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
}
ref = ref->GetPendingNext();
} while (LIKELY(ref != head));
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 80af700..497a0c2 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -32,7 +32,7 @@
requested_begin,
capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 36d2161..73582a0 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -54,7 +54,7 @@
end,
limit,
growth_limit,
- /* create_bitmaps */ true,
+ /* create_bitmaps= */ true,
can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index b783cfe..a7f82f6 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,10 @@
size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
num_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (UNLIKELY(!mem_map.IsValid())) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -353,7 +353,7 @@
requested_begin,
size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +372,10 @@
std::string error_msg;
allocation_info_map_ =
MemMap::MapAnonymous("large object free list space allocation info map",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
alloc_info_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 445560a..be75efe 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -109,7 +109,7 @@
requested_begin,
*capacity,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index bda1f1c..8cb079d 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -409,7 +409,7 @@
} else {
DCHECK(reg->IsLargeTail());
}
- reg->Clear(/*zero_and_release_pages*/true);
+ reg->Clear(/*zero_and_release_pages=*/true);
if (kForEvac) {
--num_evac_regions_;
} else {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index eba6fac..31bbfb8 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,7 @@
requested_begin,
capacity + kRegionSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
if (mem_map.IsValid() || requested_begin == nullptr) {
break;
@@ -393,7 +393,7 @@
uint8_t* clear_block_begin = nullptr;
uint8_t* clear_block_end = nullptr;
auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
- r->Clear(/*zero_and_release_pages*/false);
+ r->Clear(/*zero_and_release_pages=*/false);
if (clear_block_end != r->Begin()) {
// Region `r` is not adjacent to the current clear block; zero and release
// pages within the current block and restart a new clear block at the
@@ -656,7 +656,7 @@
if (!r->IsFree()) {
--num_non_free_regions_;
}
- r->Clear(/*zero_and_release_pages*/true);
+ r->Clear(/*zero_and_release_pages=*/true);
}
SetNonFreeRegionLimit(0);
DCHECK_EQ(num_non_free_regions_, 0u);
@@ -735,7 +735,7 @@
RevokeThreadLocalBuffersLocked(self);
// Retain sufficient free regions for full evacuation.
- Region* r = AllocateRegion(/*for_evac*/ false);
+ Region* r = AllocateRegion(/*for_evac=*/ false);
if (r != nullptr) {
r->is_a_tlab_ = true;
r->thread_ = self;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 5af1dd3..cc371b8 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -206,12 +206,12 @@
// Go through all of the blocks and visit the continuous objects.
template <typename Visitor>
ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
- WalkInternal<false /* kToSpaceOnly */>(visitor);
+ WalkInternal</* kToSpaceOnly= */ false>(visitor);
}
template <typename Visitor>
ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
REQUIRES(Locks::mutator_lock_) {
- WalkInternal<true /* kToSpaceOnly */>(visitor);
+ WalkInternal</* kToSpaceOnly= */ true>(visitor);
}
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index f16ed2d..4fe8027 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -145,7 +145,7 @@
cswh.Set(GcRoot<mirror::Object>(s.Get()));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -166,7 +166,7 @@
cswh.Set(GcRoot<mirror::Object>(mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -190,7 +190,7 @@
cswh.Set(GcRoot<mirror::Object>(s.Get()));
// Trigger a GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expect the holder to have been called.
ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -205,7 +205,7 @@
Runtime::Current()->RemoveSystemWeakHolder(&cswh);
// Trigger another GC.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
// Expectation: no change in the numbers.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 0281eee..47c54bd 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -87,7 +87,7 @@
bool fatal) const {
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT);
- MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index d091e7f..f61c700 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -199,7 +199,7 @@
inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self)
: BaseHandleScope(self->GetTopHandleScope()),
self_(self) {
- current_scope_ = new LocalScopeType(/*link*/ nullptr);
+ current_scope_ = new LocalScopeType(/*link=*/ nullptr);
self_->PushHandleScope(this);
}
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 580224e..c16e7f3 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -242,9 +242,9 @@
AccessMethod access_method)
REQUIRES_SHARED(Locks::mutator_lock_) {
bool is_caller_trusted =
- detail::IsCallerTrusted(/* caller */ nullptr, caller_class_loader, caller_dex_cache);
+ detail::IsCallerTrusted(/* caller= */ nullptr, caller_class_loader, caller_dex_cache);
return GetMemberAction(member,
- /* thread */ nullptr,
+ /* thread= */ nullptr,
[is_caller_trusted] (Thread*) { return is_caller_trusted; },
access_method);
}
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d205225..6db4790 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -80,10 +80,10 @@
const size_t table_bytes = max_count * sizeof(IrtEntry);
table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
error_msg);
if (!table_mem_map_.IsValid() && error_msg->empty()) {
*error_msg = "Unable to map memory for indirect ref table";
@@ -223,10 +223,10 @@
const size_t table_bytes = new_size * sizeof(IrtEntry);
MemMap new_map = MemMap::MapAnonymous("indirect ref table",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
table_bytes,
PROT_READ | PROT_WRITE,
- /* is_low_4gb */ false,
+ /* low_4gb= */ false,
error_msg);
if (!new_map.IsValid()) {
return false;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 5c7b0ae..d533054 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1495,8 +1495,8 @@
DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method);
self->PushDeoptimizationContext(return_value,
return_shorty == 'L' || return_shorty == '[',
- nullptr /* no pending exception */,
- false /* from_code */,
+ /* exception= */ nullptr ,
+ /* from_code= */ false,
deopt_method_type);
return GetTwoWordSuccessValue(*return_pc,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 31cfeb6..d973689 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -509,9 +509,9 @@
ASSERT_TRUE(method->IsDirect());
ASSERT_TRUE(method->GetDeclaringClass() == klass);
TestEvent(instrumentation::Instrumentation::kMethodEntered,
- /*event_method*/ method,
- /*event_field*/ nullptr,
- /*with_object*/ true);
+ /*event_method=*/ method,
+ /*event_field=*/ nullptr,
+ /*with_object=*/ true);
}
TEST_F(InstrumentationTest, MethodExitObjectEvent) {
@@ -529,9 +529,9 @@
ASSERT_TRUE(method->IsDirect());
ASSERT_TRUE(method->GetDeclaringClass() == klass);
TestEvent(instrumentation::Instrumentation::kMethodExited,
- /*event_method*/ method,
- /*event_field*/ nullptr,
- /*with_object*/ true);
+ /*event_method=*/ method,
+ /*event_field=*/ nullptr,
+ /*with_object=*/ true);
}
TEST_F(InstrumentationTest, MethodExitPrimEvent) {
@@ -548,9 +548,9 @@
ASSERT_TRUE(method->IsDirect());
ASSERT_TRUE(method->GetDeclaringClass() == klass);
TestEvent(instrumentation::Instrumentation::kMethodExited,
- /*event_method*/ method,
- /*event_field*/ nullptr,
- /*with_object*/ false);
+ /*event_method=*/ method,
+ /*event_field=*/ nullptr,
+ /*with_object=*/ false);
}
TEST_F(InstrumentationTest, MethodUnwindEvent) {
@@ -582,9 +582,9 @@
ASSERT_TRUE(field != nullptr);
TestEvent(instrumentation::Instrumentation::kFieldWritten,
- /*event_method*/ nullptr,
- /*event_field*/ field,
- /*with_object*/ true);
+ /*event_method=*/ nullptr,
+ /*event_field=*/ field,
+ /*with_object=*/ true);
}
TEST_F(InstrumentationTest, FieldWritePrimEvent) {
@@ -600,9 +600,9 @@
ASSERT_TRUE(field != nullptr);
TestEvent(instrumentation::Instrumentation::kFieldWritten,
- /*event_method*/ nullptr,
- /*event_field*/ field,
- /*with_object*/ false);
+ /*event_method=*/ nullptr,
+ /*event_field=*/ field,
+ /*with_object=*/ false);
}
TEST_F(InstrumentationTest, ExceptionHandledEvent) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 8a31985..b37a278 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -587,8 +587,8 @@
accessor,
*shadow_frame,
value,
- /* stay_in_interpreter */ true,
- /* from_deoptimize */ true);
+ /* stay_in_interpreter= */ true,
+ /* from_deoptimize= */ true);
}
ShadowFrame* old_frame = shadow_frame;
shadow_frame = shadow_frame->GetLink();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index cb91953..2cee813 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -714,12 +714,12 @@
if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
static const bool kIsRange = false;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
} else {
DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
static const bool kIsRange = true;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
}
}
@@ -731,12 +731,12 @@
if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
static const bool kIsRange = false;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
} else {
DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
static const bool kIsRange = true;
return DoMethodHandleInvokeCommon<kIsRange>(
- self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
}
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 26bfba9..7055e8a 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -290,7 +290,7 @@
if (jit != nullptr) {
jit->InvokeVirtualOrInterface(
receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
- jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
+ jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/false);
}
// No need to check since we've been quickened.
return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index d9f76ee..4757b57 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -167,7 +167,7 @@
#define HOTNESS_UPDATE() \
do { \
if (jit != nullptr) { \
- jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/ true); \
+ jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/ true); \
} \
} while (false)
@@ -1754,7 +1754,7 @@
case Instruction::INVOKE_POLYMORPHIC: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokePolymorphic<false /* is_range */>(
+ bool success = DoInvokePolymorphic</* is_range= */ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
break;
@@ -1762,7 +1762,7 @@
case Instruction::INVOKE_POLYMORPHIC_RANGE: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokePolymorphic<true /* is_range */>(
+ bool success = DoInvokePolymorphic</* is_range= */ true>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
break;
@@ -1770,7 +1770,7 @@
case Instruction::INVOKE_CUSTOM: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokeCustom<false /* is_range */>(
+ bool success = DoInvokeCustom</* is_range= */ false>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
@@ -1778,7 +1778,7 @@
case Instruction::INVOKE_CUSTOM_RANGE: {
PREAMBLE();
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- bool success = DoInvokeCustom<true /* is_range */>(
+ bool success = DoInvokeCustom</* is_range= */ true>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index c9a8adc..4b6f430 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -220,7 +220,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokeCustom<false /* is_range */>(
+ return DoInvokeCustom</* is_range= */ false>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -231,7 +231,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokePolymorphic<false /* is_range */>(
+ return DoInvokePolymorphic</* is_range= */ false>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -297,7 +297,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokeCustom<true /* is_range */>(self, *shadow_frame, inst, inst_data, result_register);
+ return DoInvokeCustom</*is_range=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
}
extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
@@ -307,7 +307,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvokePolymorphic<true /* is_range */>(
+ return DoInvokePolymorphic</* is_range= */ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -375,8 +375,8 @@
ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
shadow_frame->GetMethod(),
self,
- /* can_run_clinit */ false,
- /* verify_access */ false);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ false);
if (UNLIKELY(c == nullptr)) {
return true;
}
@@ -463,8 +463,8 @@
ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
shadow_frame->GetMethod(),
self,
- /* can_run_clinit */ false,
- /* verify_access */ false);
+ /* can_run_clinit= */ false,
+ /* verify_access= */ false);
if (LIKELY(c != nullptr)) {
if (UNLIKELY(c->IsStringClass())) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -682,8 +682,8 @@
if (kIsPrimitive) {
if (kIsRead) {
PrimType value = UNLIKELY(is_volatile)
- ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset)
- : obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset);
+ ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
+ : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
if (sizeof(PrimType) == sizeof(uint64_t)) {
shadow_frame->SetVRegLong(vRegA, value); // Set two consecutive registers.
} else {
@@ -694,9 +694,9 @@
? shadow_frame->GetVRegLong(vRegA)
: shadow_frame->GetVReg(vRegA);
if (UNLIKELY(is_volatile)) {
- obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset, value);
+ obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
} else {
- obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset, value);
+ obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
}
}
} else { // Object.
@@ -708,9 +708,9 @@
} else { // Write.
ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
if (UNLIKELY(is_volatile)) {
- obj->SetFieldObjectVolatile</*kTransactionActive*/ false>(offset, value);
+ obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
} else {
- obj->SetFieldObject</*kTransactionActive*/ false>(offset, value);
+ obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
}
}
}
@@ -729,7 +729,7 @@
shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
ArtMethod* referrer = shadow_frame->GetMethod();
uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
field_idx, referrer, self, sizeof(PrimType));
if (UNLIKELY(field == nullptr)) {
DCHECK(self->IsExceptionPending());
@@ -770,7 +770,7 @@
: tls_value;
if (kIsDebugBuild) {
uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
DCHECK_EQ(offset, field->GetOffset().SizeValue());
}
@@ -779,7 +779,7 @@
: MakeObjPtr(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
if (LIKELY(obj != nullptr)) {
MterpFieldAccess<PrimType, kAccessType>(
- inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile */ false);
+ inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
return true;
}
}
@@ -798,7 +798,7 @@
if (LIKELY(field != nullptr)) {
bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized();
if (LIKELY(initialized)) {
- DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
field_idx, referrer, self, sizeof(PrimType))));
ObjPtr<mirror::Object> obj = kIsStatic
? field->GetDeclaringClass().Ptr()
@@ -930,7 +930,7 @@
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
- jit->AddSamples(self, method, count, /*with_backedges*/ true);
+ jit->AddSamples(self, method, count, /*with_backedges=*/ true);
}
return MterpSetUpHotnessCountdown(method, shadow_frame, self);
}
@@ -955,7 +955,7 @@
osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
if (offset <= 0) {
// Keep updating hotness in case a compilation request was dropped. Eventually it will retry.
- jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true);
+ jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
}
did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 38ecc5a..07afba4 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -865,10 +865,10 @@
// checking version, however, does.
if (Runtime::Current()->IsActiveTransaction()) {
dst->AssignableCheckingMemcpy<true>(
- dst_pos, src, src_pos, length, true /* throw_exception */);
+ dst_pos, src, src_pos, length, /* throw_exception= */ true);
} else {
dst->AssignableCheckingMemcpy<false>(
- dst_pos, src, src_pos, length, true /* throw_exception */);
+ dst_pos, src, src_pos, length, /* throw_exception= */ true);
}
}
} else if (src_type->IsPrimitiveByte()) {
@@ -1478,9 +1478,9 @@
reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
ReadBarrier::Barrier<
mirror::Object,
- /* kIsVolatile */ false,
+ /* kIsVolatile= */ false,
kWithReadBarrier,
- /* kAlwaysUpdateField */ true>(
+ /* kAlwaysUpdateField= */ true>(
obj,
MemberOffset(offset),
field_addr);
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index bd2705d..3fafc31 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -695,7 +695,7 @@
{ ld2, ld2 }
};
- TestCeilFloor(true /* ceil */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+ TestCeilFloor(/* ceil= */ true, self, tmp.get(), test_pairs, arraysize(test_pairs));
}
TEST_F(UnstartedRuntimeTest, Floor) {
@@ -722,7 +722,7 @@
{ ld2, ld2 }
};
- TestCeilFloor(false /* floor */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+ TestCeilFloor(/* ceil= */ false, self, tmp.get(), test_pairs, arraysize(test_pairs));
}
TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c1f69b8..ef893ee 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -599,12 +599,12 @@
void Run(Thread* self) override {
ScopedObjectAccess soa(self);
if (kind_ == kCompile) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false);
} else if (kind_ == kCompileOsr) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true);
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true);
} else {
DCHECK(kind_ == kAllocateProfile);
- if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
+ if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
}
}
@@ -673,7 +673,7 @@
if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
if ((new_count >= WarmMethodThreshold()) &&
(method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
- bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
+ bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
if (success) {
VLOG(jit) << "Start profiling " << method->PrettyMethod();
}
@@ -741,7 +741,7 @@
if (np_method->IsCompilable()) {
if (!np_method->IsNative()) {
// The compiler requires a ProfilingInfo object for non-native methods.
- ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
+ ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
}
JitCompileTask compile_task(method, JitCompileTask::kCompile);
// Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
@@ -761,7 +761,7 @@
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, profiling_info->GetSavedEntryPoint());
} else {
- AddSamples(thread, method, 1, /* with_backedges */false);
+ AddSamples(thread, method, 1, /* with_backedges= */false);
}
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 63cb6a4..8600b41 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -221,7 +221,7 @@
unique_fd mem_fd;
// Bionic supports memfd_create, but the call may fail on older kernels.
- mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
+ mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
if (mem_fd.get() < 0) {
VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
<< strerror(errno);
@@ -281,8 +281,8 @@
kProtRW,
base_flags,
mem_fd,
- /* start */ 0,
- /* low_4gb */ true,
+ /* start= */ 0,
+ /* low_4gb= */ true,
"data-code-cache",
&error_str);
} else {
@@ -303,12 +303,12 @@
base_flags = MAP_PRIVATE | MAP_ANON;
data_pages = MemMap::MapAnonymous(
"data-code-cache",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
data_capacity + exec_capacity,
kProtRW,
- /* low_4gb */ true,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /* low_4gb= */ true,
+ /* reuse= */ false,
+ /* reservation= */ nullptr,
&error_str);
}
@@ -347,8 +347,8 @@
kProtR,
base_flags,
mem_fd,
- /* start */ data_capacity,
- /* low_4GB */ false,
+ /* start= */ data_capacity,
+ /* low_4GB= */ false,
"jit-code-cache-rw",
&error_str);
if (!non_exec_pages.IsValid()) {
@@ -1008,7 +1008,7 @@
// Simply discard the compiled code. Clear the counter so that it may be recompiled later.
// Hopefully the class hierarchy will be more stable when compilation is retried.
single_impl_still_valid = false;
- ClearMethodCounter(method, /*was_warm*/ false);
+ ClearMethodCounter(method, /*was_warm=*/ false);
break;
}
}
@@ -1156,7 +1156,7 @@
// method. The compiled code for the method (if there is any) must not be in any threads call stack.
void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
MutexLock mu(Thread::Current(), lock_);
- RemoveMethodLocked(method, /* release_memory */ true);
+ RemoveMethodLocked(method, /* release_memory= */ true);
}
// This invalidates old_method. Once this function returns one can no longer use old_method to
@@ -1314,7 +1314,7 @@
// its stack frame, it is not the method owning return_pc_. We just pass null to
// LookupMethodHeader: the method is only checked against in debug builds.
OatQuickMethodHeader* method_header =
- code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
+ code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr);
if (method_header != nullptr) {
const void* code = method_header->GetCode();
CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
@@ -1438,7 +1438,7 @@
<< PrettySize(CodeCacheSize())
<< ", data=" << PrettySize(DataCacheSize());
- DoCollection(self, /* collect_profiling_info */ do_full_collection);
+ DoCollection(self, /* collect_profiling_info= */ do_full_collection);
VLOG(jit) << "After code cache collection, code="
<< PrettySize(CodeCacheSize())
@@ -1551,7 +1551,7 @@
info->SetSavedEntryPoint(nullptr);
// We are going to move this method back to interpreter. Clear the counter now to
// give it a chance to be hot again.
- ClearMethodCounter(info->GetMethod(), /*was_warm*/ true);
+ ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
}
}
} else if (kIsDebugBuild) {
@@ -1933,7 +1933,7 @@
VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
// Because the counter is not atomic, there are some rare cases where we may not hit the
// threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
- ClearMethodCounter(method, /*was_warm*/ false);
+ ClearMethodCounter(method, /*was_warm=*/ false);
return false;
}
@@ -2009,7 +2009,7 @@
// and clear the counter to get the method Jitted again.
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, GetQuickToInterpreterBridge());
- ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr);
+ ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
} else {
MutexLock mu(Thread::Current(), lock_);
auto it = osr_code_map_.find(method);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 9043f26..e3248ea 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -129,7 +129,7 @@
}
total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs();
}
- FetchAndCacheResolvedClassesAndMethods(/*startup*/ true);
+ FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true);
// When we save without waiting for JIT notifications we use a simple
@@ -183,7 +183,7 @@
uint16_t number_of_new_methods = 0;
uint64_t start_work = NanoTime();
- bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods);
+ bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save=*/false, &number_of_new_methods);
// Update the notification counter based on result. Note that there might be contention on this
// but we don't care about to be 100% precise.
if (!profile_saved_to_disk) {
@@ -501,7 +501,7 @@
// We only need to do this once, not once per dex location.
// TODO: Figure out a way to only do it when stuff has changed? It takes 30-50ms.
- FetchAndCacheResolvedClassesAndMethods(/*startup*/ false);
+ FetchAndCacheResolvedClassesAndMethods(/*startup=*/ false);
for (const auto& it : tracked_locations) {
if (!force_save && ShuttingDown(Thread::Current())) {
@@ -521,7 +521,7 @@
}
{
ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
- if (!info.Load(filename, /*clear_if_invalid*/ true)) {
+ if (!info.Load(filename, /*clear_if_invalid=*/ true)) {
LOG(WARNING) << "Could not forcefully load profile " << filename;
continue;
}
@@ -607,9 +607,9 @@
Runtime* runtime = Runtime::Current();
bool attached = runtime->AttachCurrentThread("Profile Saver",
- /*as_daemon*/true,
+ /*as_daemon=*/true,
runtime->GetSystemThreadGroup(),
- /*create_peer*/true);
+ /*create_peer=*/true);
if (!attached) {
CHECK(runtime->IsShuttingDown(Thread::Current()));
return nullptr;
@@ -751,7 +751,7 @@
// Force save everything before destroying the thread since we want profiler_pthread_ to remain
// valid.
- instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+ instance_->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
// Wait for the saver thread to stop.
CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
@@ -838,7 +838,7 @@
// but we only use this in testing when we now this won't happen.
// Refactor the way we handle the instance so that we don't end up in this situation.
if (saver != nullptr) {
- saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+ saver->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
}
}
@@ -846,7 +846,7 @@
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
- if (!info.Load(profile, /*clear_if_invalid*/false)) {
+ if (!info.Load(profile, /*clear_if_invalid=*/false)) {
return false;
}
ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref);
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a3dae83..f6139bb 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -125,7 +125,7 @@
}
bool IsInUseByCompiler() const {
- return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) ||
+ return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) ||
(current_inline_uses_ > 0);
}
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 6f61f5e..48f9981 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -286,7 +286,7 @@
// to get reasonable stacks and environment, rather than relying on
// tombstoned.
JNIEnv* env;
- Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thread_args */ nullptr);
+ Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thr_args= */ nullptr);
std::string tmp = android::base::StringPrintf(
"a thread (tid %" PRId64 " is making JNI calls without being attached",
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 5200607..52509fd 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -82,7 +82,7 @@
static constexpr bool kWarnJniAbort = false;
static bool IsCallerTrusted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
- return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames */ 1));
+ return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames= */ 1));
}
template<typename T>
@@ -106,9 +106,9 @@
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
Thread* self = Thread::Current();
- ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
- /*check_suspended*/ true,
- /*abort_on_error*/ false);
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+ /*check_suspended=*/ true,
+ /*abort_on_error=*/ false);
if (cur_method == nullptr) {
// Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -133,9 +133,9 @@
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
Thread* self = Thread::Current();
- ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
- /*check_suspended*/ true,
- /*abort_on_error*/ false);
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+ /*check_suspended=*/ true,
+ /*abort_on_error=*/ false);
if (cur_method == nullptr) {
// Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -157,9 +157,9 @@
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
Thread* self = Thread::Current();
- ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
- /*check_suspended*/ true,
- /*abort_on_error*/ false);
+ ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+ /*check_suspended=*/ true,
+ /*abort_on_error=*/ false);
if (cur_method == nullptr) {
// Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 4ad4c14..57346b7 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -962,11 +962,11 @@
// Make sure we can actually use it.
jstring s = env_->NewStringUTF("poop");
if (mirror::kUseStringCompression) {
- ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible */ true),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible= */ true),
env_->GetIntField(s, fid2));
// Create incompressible string
jstring s_16 = env_->NewStringUTF("\u0444\u0444");
- ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible */ false),
+ ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible= */ false),
env_->GetIntField(s_16, fid2));
} else {
ASSERT_EQ(4, env_->GetIntField(s, fid2));
@@ -1485,7 +1485,7 @@
ASSERT_NE(weak_global, nullptr);
env_->DeleteLocalRef(local_ref);
// GC should clear the weak global.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
jobject new_global_ref = env_->NewGlobalRef(weak_global);
EXPECT_EQ(new_global_ref, nullptr);
jobject new_local_ref = env_->NewLocalRef(weak_global);
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 570fc48..86ad32e 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -745,7 +745,7 @@
callee_type,
self,
shadow_frame,
- method_handle /* receiver */,
+ /* receiver= */ method_handle,
operands,
result);
} else {
@@ -1103,7 +1103,7 @@
if (IsInvokeVarHandle(handle_kind)) {
return DoVarHandleInvokeTranslation(self,
shadow_frame,
- /*invokeExact*/ false,
+ /*invokeExact=*/ false,
method_handle,
callsite_type,
operands,
@@ -1155,7 +1155,7 @@
} else if (IsInvokeVarHandle(handle_kind)) {
return DoVarHandleInvokeTranslation(self,
shadow_frame,
- /*invokeExact*/ true,
+ /*invokeExact=*/ true,
method_handle,
callsite_type,
operands,
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 31bc5e4..50b1b90 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -1073,8 +1073,8 @@
T old_value = GetFieldPtrWithSize<T, kVerifyFlags>(member_offset, pointer_size);
T new_value = visitor(old_value, address);
if (old_value != new_value) {
- dest->SetFieldPtrWithSize</* kTransactionActive */ false,
- /* kCheckTransaction */ true,
+ dest->SetFieldPtrWithSize</* kTransactionActive= */ false,
+ /* kCheckTransaction= */ true,
kVerifyNone>(member_offset, new_value, pointer_size);
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 26dba02..6a378f0 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -83,7 +83,7 @@
Thread* self = Thread::Current();
if (name == nullptr) {
// Note: ThrowNullPointerException() requires a message which we deliberately want to omit.
- self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg */ nullptr);
+ self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg= */ nullptr);
} else {
self->ThrowNewException("Ljava/lang/ClassNotFoundException;", name->ToModifiedUtf8().c_str());
}
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index e9e7ca8..36c5ae2 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -108,7 +108,7 @@
EXPECT_NE(klass1->NumStaticFields(), 0u);
for (ArtField& field : klass2->GetSFields()) {
EXPECT_FALSE(
- klass1->ResolvedFieldAccessTest</*throw_on_failure*/ false>(
+ klass1->ResolvedFieldAccessTest</*throw_on_failure=*/ false>(
klass2.Get(),
&field,
klass1->GetDexCache(),
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index fbe002a..8ae79a8 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -80,11 +80,11 @@
}
inline mirror::Object* Object::MonitorEnter(Thread* self) {
- return Monitor::MonitorEnter(self, this, /*trylock*/false);
+ return Monitor::MonitorEnter(self, this, /*trylock=*/false);
}
inline mirror::Object* Object::MonitorTryEnter(Thread* self) {
- return Monitor::MonitorEnter(self, this, /*trylock*/true);
+ return Monitor::MonitorEnter(self, this, /*trylock=*/true);
}
inline bool Object::MonitorExit(Thread* self) {
@@ -738,7 +738,7 @@
inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset,
ObjPtr<Object> new_value) {
VerifyTransaction<kTransactionActive, kCheckTransaction>();
- VerifyCAS<kVerifyFlags>(new_value, /*old_value*/ nullptr);
+ VerifyCAS<kVerifyFlags>(new_value, /*old_value=*/ nullptr);
uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 8689e4d..ee84997 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -131,7 +131,7 @@
UNREACHABLE();
}
DCHECK(kUseBakerReadBarrier);
- LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile*/false>(MonitorOffset()));
+ LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset()));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 8fa2c6c..3752d6d 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -237,7 +237,7 @@
template <bool kIsInstrumented>
inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
- const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible */ true);
+ const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible= */ true);
SetStringCountVisitor visitor(length_with_flag);
return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 02aa1a8..0f0a378 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -289,7 +289,7 @@
// Is this the requested frame?
if (current_frame_number_ == wanted_frame_number_) {
method_ = m;
- dex_pc_ = GetDexPc(false /* abort_on_error*/);
+ dex_pc_ = GetDexPc(/* abort_on_failure=*/ false);
return false;
}
@@ -385,7 +385,7 @@
} else {
return false;
}
- AtraceMonitorLock(self, GetObject(), false /* is_wait */);
+ AtraceMonitorLock(self, GetObject(), /* is_wait= */ false);
return true;
}
@@ -777,7 +777,7 @@
AtraceMonitorUnlock(); // For the implict Unlock() just above. This will only end the deepest
// nesting, but that is enough for the visualization, and corresponds to
// the single Lock() we do afterwards.
- AtraceMonitorLock(self, GetObject(), true /* is_wait */);
+ AtraceMonitorLock(self, GetObject(), /* is_wait= */ true);
bool was_interrupted = false;
bool timed_out = false;
@@ -1042,7 +1042,7 @@
// No ordering required for preceding lockword read, since we retest.
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
- AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+ AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
}
continue; // Go again.
@@ -1060,8 +1060,8 @@
// Only this thread pays attention to the count. Thus there is no need for stronger
// than relaxed memory ordering.
if (!kUseReadBarrier) {
- h_obj->SetLockWord(thin_locked, false /* volatile */);
- AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+ h_obj->SetLockWord(thin_locked, /* as_volatile= */ false);
+ AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
} else {
// Use CAS to preserve the read barrier state.
@@ -1069,7 +1069,7 @@
thin_locked,
CASMode::kWeak,
std::memory_order_relaxed)) {
- AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+ AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
}
}
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 0b168f8..8610899 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -361,7 +361,7 @@
thread_pool.AddTask(self, new TryLockTask(obj1));
thread_pool.StartWorkers(self);
ScopedThreadSuspension sts(self, kSuspended);
- thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false);
+ thread_pool.Wait(Thread::Current(), /*do_work=*/false, /*may_hold_locks=*/false);
}
// Test that the trylock actually locks the object.
{
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 6becd36..69f7648 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -174,10 +174,10 @@
std::string error_message;
size_t length = static_cast<size_t>(end - start);
MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
length,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_message);
if (!dex_mem_map.IsValid()) {
ScopedObjectAccess soa(env);
@@ -196,8 +196,8 @@
std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
0,
std::move(dex_mem_map),
- /* verify */ true,
- /* verify_location */ true,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_message));
if (dex_file == nullptr) {
ScopedObjectAccess soa(env);
@@ -551,7 +551,7 @@
}
OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set,
- false /* load_executable */);
+ /* load_executable= */ false);
return env->NewStringUTF(oat_file_assistant.GetStatusDump().c_str());
}
@@ -774,7 +774,7 @@
OatFileAssistant oat_file_assistant(filename.c_str(),
target_instruction_set,
- false /* load_executable */);
+ /* load_executable= */ false);
std::unique_ptr<OatFile> best_oat_file = oat_file_assistant.GetBestOatFile();
if (best_oat_file == nullptr) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 96f15de..24c8d14 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -367,7 +367,7 @@
VariableSizedHandleScope hs2(soa.Self());
std::vector<Handle<mirror::Object>> raw_instances;
- heap->GetInstances(hs2, h_class, includeAssignable, /* max_count */ 0, raw_instances);
+ heap->GetInstances(hs2, h_class, includeAssignable, /* max_count= */ 0, raw_instances);
jobjectArray array = env->NewObjectArray(raw_instances.size(),
WellKnownClasses::java_lang_Object,
nullptr);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 861d1db..2a3ea46 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -404,7 +404,7 @@
const DexFile* dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_idx);
ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
- field_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+ field_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
if (klass == nullptr) {
return;
}
@@ -432,12 +432,12 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ObjPtr<mirror::Class> klass = class_linker->LookupResolvedType(
- method_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+ method_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
if (klass == nullptr) {
return;
}
// Call FindResolvedMethod to populate the dex cache.
- class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader */ nullptr, method_idx);
+ class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader= */ nullptr, method_idx);
}
struct DexCacheStats {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index e3932df..32733a8 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -59,7 +59,7 @@
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
Thread* thread = thread_list->SuspendThreadByPeer(peer,
- /* request_suspension */ true,
+ /* request_suspension= */ true,
SuspendReason::kInternal,
&timed_out);
if (thread != nullptr) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 72dae47..f54bf87 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -152,7 +152,8 @@
// Drop the shared mutator lock.
ScopedThreadSuspension sts(self, art::ThreadState::kNative);
// Get exclusive mutator lock with suspend all.
- ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+ ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!",
+ /*long_suspend=*/false);
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
}
@@ -399,7 +400,7 @@
env,
is_system_server,
Runtime::NativeBridgeAction::kUnload,
- /*isa*/ nullptr,
+ /*isa=*/ nullptr,
profile_system_server);
}
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index f5039d1..6d94fa1 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -647,7 +647,7 @@
ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
annotation_array_class,
- /* length */ 0);
+ /* length= */ 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
}
return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForClass(klass));
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index b7f0a7a..67ad0a4 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -147,7 +147,7 @@
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
Thread* thread = thread_list->SuspendThreadByPeer(peer,
- /* request_suspension */ true,
+ /* request_suspension= */ true,
SuspendReason::kInternal,
&timed_out);
if (thread != nullptr) {
diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.cc b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
index 1f2bf09..0b26bd7 100644
--- a/runtime/native/java_lang_invoke_MethodHandleImpl.cc
+++ b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
@@ -48,7 +48,7 @@
if (handle_kind >= mirror::MethodHandle::kFirstAccessorKind) {
ArtField* const field = handle->GetTargetField();
h_object.Assign(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
- soa.Self(), field, false /* force_resolve */));
+ soa.Self(), field, /* force_resolve= */ false));
} else {
ArtMethod* const method = handle->GetTargetMethod();
if (method->IsConstructor()) {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 4644480..e021b77 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -74,8 +74,8 @@
mirror::HeapReference<mirror::Object>* field_addr =
reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset));
- ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kWithReadBarrier,
- /* kAlwaysUpdateField */ true>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier,
+ /* kAlwaysUpdateField= */ true>(
obj.Ptr(),
MemberOffset(offset),
field_addr);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index f16c46b..7c320d8 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -269,7 +269,7 @@
vdex_filename,
writable,
low_4gb,
- /* unquicken*/ false,
+ /* unquicken=*/ false,
error_msg);
if (vdex_.get() == nullptr) {
*error_msg = StringPrintf("Failed to load vdex file '%s' %s",
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index ba08e5e..4294baf 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -226,12 +226,12 @@
// A representation of an invalid OatClass, used when an OatClass can't be found.
// See FindOatClass().
static OatClass Invalid() {
- return OatClass(/* oat_file */ nullptr,
+ return OatClass(/* oat_file= */ nullptr,
ClassStatus::kErrorUnresolved,
kOatClassNoneCompiled,
- /* bitmap_size */ 0,
- /* bitmap_pointer */ nullptr,
- /* methods_pointer */ nullptr);
+ /* bitmap_size= */ 0,
+ /* bitmap_pointer= */ nullptr,
+ /* methods_pointer= */ nullptr);
}
private:
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 754aa40..a06be4c 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -91,8 +91,8 @@
: isa_(isa),
load_executable_(load_executable),
only_load_system_executable_(only_load_system_executable),
- odex_(this, /*is_oat_location*/ false),
- oat_(this, /*is_oat_location*/ true),
+ odex_(this, /*is_oat_location=*/ false),
+ oat_(this, /*is_oat_location=*/ true),
zip_fd_(zip_fd) {
CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
@@ -700,9 +700,9 @@
}
} else {
vdex = VdexFile::Open(vdex_filename,
- false /*writeable*/,
- false /*low_4gb*/,
- false /*unquicken*/,
+ /*writable=*/ false,
+ /*low_4gb=*/ false,
+ /*unquicken=*/ false,
&error_msg);
}
if (vdex == nullptr) {
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index aba2eae..521e419 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -182,8 +182,8 @@
EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(
CompilerFilter::kDefaultCompilerFilter,
- /* downgrade */ false,
- /* profile_changed */ false,
+ /* profile_changed= */ false,
+ /* downgrade= */ false,
relative_context.get()));
}
@@ -336,7 +336,7 @@
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ false);
+ /* with_alternate_image= */ false);
android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
@@ -375,7 +375,7 @@
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ false);
+ /* with_alternate_image= */ false);
android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
@@ -385,7 +385,7 @@
false,
false,
vdex_fd.get(),
- -1 /* oat_fd */,
+ /* oat_fd= */ -1,
zip_fd.get());
EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -408,7 +408,7 @@
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ false);
+ /* with_alternate_image= */ false);
android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
@@ -417,7 +417,7 @@
kRuntimeISA,
false,
false,
- -1 /* vdex_fd */,
+ /* vdex_fd= */ -1,
odex_fd.get(),
zip_fd.get());
@@ -441,8 +441,8 @@
kRuntimeISA,
false,
false,
- -1 /* vdex_fd */,
- -1 /* oat_fd */,
+ /* vdex_fd= */ -1,
+ /* oat_fd= */ -1,
zip_fd);
EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -637,7 +637,7 @@
// Strip the dex file.
Copy(GetStrippedDexSrc1(), dex_location);
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable*/false);
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable=*/false);
// Because the dex file is stripped, the odex file is considered the source
// of truth for the dex checksums. The oat file should be considered
@@ -730,7 +730,7 @@
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kSpeed,
- /* with_alternate_image */ true);
+ /* with_alternate_image= */ true);
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -765,7 +765,7 @@
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kExtract,
- /* with_alternate_image */ true);
+ /* with_alternate_image= */ true);
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -1167,7 +1167,7 @@
dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
dex_location_.c_str(),
Runtime::Current()->GetSystemClassLoader(),
- /*dex_elements*/nullptr,
+ /*dex_elements=*/nullptr,
&oat_file,
&error_msgs);
CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n');
@@ -1213,7 +1213,7 @@
tasks.push_back(std::move(task));
}
thread_pool.StartWorkers(self);
- thread_pool.Wait(self, /* do_work */ true, /* may_hold_locks */ false);
+ thread_pool.Wait(self, /* do_work= */ true, /* may_hold_locks= */ false);
// Verify that tasks which got an oat file got a unique one.
std::set<const OatFile*> oat_files;
@@ -1335,8 +1335,8 @@
EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
oat_file_assistant.GetDexOptNeeded(
CompilerFilter::kDefaultCompilerFilter,
- /* downgrade */ false,
- /* profile_changed */ false,
+ /* profile_changed= */ false,
+ /* downgrade= */ false,
updated_context.get()));
}
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index b9e9d38..7ac1ab4 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -181,7 +181,7 @@
private:
static BitVector GenerateTypeIndexes(const DexFile* dex_file) {
- BitVector type_indexes(/*start_bits*/0, /*expandable*/true, Allocator::GetMallocAllocator());
+ BitVector type_indexes(/*start_bits=*/0, /*expandable=*/true, Allocator::GetMallocAllocator());
for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
uint16_t type_idx = class_def.class_idx_.index_;
@@ -302,12 +302,12 @@
std::priority_queue<DexFileAndClassPair> queue;
for (size_t i = 0; i < dex_files_loaded.size(); ++i) {
if (loaded_types[i].GetIterator() != loaded_types[i].GetIteratorEnd()) {
- queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat*/true);
+ queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat=*/true);
}
}
for (size_t i = 0; i < dex_files_unloaded.size(); ++i) {
if (unloaded_types[i].GetIterator() != unloaded_types[i].GetIteratorEnd()) {
- queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat*/false);
+ queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat=*/false);
}
}
@@ -385,8 +385,8 @@
// the oat file without addition checks
ClassLoaderContext::VerificationResult result = context->VerifyClassLoaderContextMatch(
oat_file->GetClassLoaderContext(),
- /*verify_names*/ true,
- /*verify_checksums*/ true);
+ /*verify_names=*/ true,
+ /*verify_checksums=*/ true);
switch (result) {
case ClassLoaderContext::VerificationResult::kForcedToSkipChecks:
return CheckCollisionResult::kSkippedClassLoaderContextSharedLibrary;
diff --git a/runtime/proxy_test.h b/runtime/proxy_test.h
index 411dc7a..23e536d 100644
--- a/runtime/proxy_test.h
+++ b/runtime/proxy_test.h
@@ -47,7 +47,7 @@
// Builds the interfaces array.
jobjectArray proxyClassInterfaces =
- soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement */ nullptr);
+ soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement= */ nullptr);
soa.Self()->AssertNoPendingException();
for (size_t i = 0; i < interfaces.size(); ++i) {
soa.Env()->SetObjectArrayElement(proxyClassInterfaces, i,
@@ -62,7 +62,7 @@
jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(
methods_count,
soa.AddLocalReference<jclass>(GetClassRoot<mirror::Method>()),
- /* initialElement */ nullptr);
+ /* initialElement= */ nullptr);
soa.Self()->AssertNoPendingException();
jsize array_index = 0;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 36a6b7f..afdfefa 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -126,7 +126,7 @@
exception_handler_->SetHandlerDexPc(found_dex_pc);
exception_handler_->SetHandlerQuickFramePc(
GetCurrentOatQuickMethodHeader()->ToNativeQuickPc(
- method, found_dex_pc, /* is_catch_handler */ true));
+ method, found_dex_pc, /* is_for_catch_handler= */ true));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
return false; // End stack walk.
@@ -218,7 +218,10 @@
}
// Walk the stack to find catch handler.
- CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this, /*skip*/already_popped);
+ CatchBlockStackVisitor visitor(self_, context_,
+ &exception_ref,
+ this,
+ /*skip_frames=*/already_popped);
visitor.WalkStack(true);
uint32_t new_pop_count = handler_frame_depth_;
DCHECK_GE(new_pop_count, already_popped);
@@ -606,7 +609,7 @@
<< deopt_method->PrettyMethod()
<< " due to "
<< GetDeoptimizationKindName(kind);
- DumpFramesWithType(self_, /* details */ true);
+ DumpFramesWithType(self_, /* details= */ true);
}
if (Runtime::Current()->UseJitCompilation()) {
Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4d77b9d..7fa5607 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -305,15 +305,15 @@
// Very few things are actually capable of distinguishing between the peer & peerless states so
// this should be fine.
bool thread_attached = AttachCurrentThread("Shutdown thread",
- /* as_daemon */ false,
+ /* as_daemon= */ false,
GetSystemThreadGroup(),
- /* Create peer */ IsStarted());
+ /* create_peer= */ IsStarted());
if (UNLIKELY(!thread_attached)) {
LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer.";
CHECK(AttachCurrentThread("Shutdown thread (no java peer)",
- /* as_daemon */ false,
- /* thread_group*/ nullptr,
- /* Create peer */ false));
+ /* as_daemon= */ false,
+ /* thread_group=*/ nullptr,
+ /* create_peer= */ false));
}
self = Thread::Current();
} else {
@@ -614,7 +614,7 @@
bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options) {
Locks::Init();
- InitLogging(/* argv */ nullptr, Abort); // Calls Locks::Init() as a side effect.
+ InitLogging(/* argv= */ nullptr, Abort); // Calls Locks::Init() as a side effect.
bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
if (!parsed) {
LOG(ERROR) << "Failed to parse options";
@@ -815,7 +815,7 @@
? NativeBridgeAction::kInitialize
: NativeBridgeAction::kUnload;
InitNonZygoteOrPostFork(self->GetJniEnv(),
- /* is_system_server */ false,
+ /* is_system_server= */ false,
action,
GetInstructionSetString(kRuntimeISA));
}
@@ -1002,9 +1002,9 @@
std::string error_msg;
std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
- false /* writable */,
- false /* low_4gb */,
- false, /* unquicken */
+ /* writable= */ false,
+ /* low_4gb= */ false,
+ /* unquicken= */ false,
&error_msg));
if (vdex_file.get() == nullptr) {
return false;
@@ -1015,15 +1015,15 @@
return false;
}
std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
- false /* writable */,
- false /* program_header_only */,
- false /* low_4gb */,
+ /* writable= */ false,
+ /* program_header_only= */ false,
+ /* low_4gb= */ false,
&error_msg));
if (elf_file.get() == nullptr) {
return false;
}
std::unique_ptr<const OatFile> oat_file(
- OatFile::OpenWithElfFile(/* zip_fd */ -1,
+ OatFile::OpenWithElfFile(/* zip_fd= */ -1,
elf_file.release(),
vdex_file.release(),
oat_location,
@@ -1117,7 +1117,7 @@
CHECK(klass != nullptr);
gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast(
- klass->Alloc</* kIsInstrumented */ true>(self, allocator_type));
+ klass->Alloc</* kIsInstrumented= */ true>(self, allocator_type));
CHECK(exception_object != nullptr);
*exception = GcRoot<mirror::Throwable>(exception_object);
// Initialize the "detailMessage" field.
@@ -1127,7 +1127,7 @@
ArtField* detailMessageField =
throwable->FindDeclaredInstanceField("detailMessage", "Ljava/lang/String;");
CHECK(detailMessageField != nullptr);
- detailMessageField->SetObject</* kTransactionActive */ false>(exception->Read(), message);
+ detailMessageField->SetObject</* kTransactionActive= */ false>(exception->Read(), message);
}
bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
@@ -1160,8 +1160,8 @@
reinterpret_cast<uint8_t*>(kSentinelAddr),
kPageSize,
PROT_NONE,
- /* low_4g */ true,
- /* error_msg */ nullptr);
+ /* low_4gb= */ true,
+ /* error_msg= */ nullptr);
if (!protected_fault_page_.IsValid()) {
LOG(WARNING) << "Could not reserve sentinel fault page";
} else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
@@ -1371,13 +1371,13 @@
arena_pool_.reset(new MallocArenaPool());
jit_arena_pool_.reset(new MallocArenaPool());
} else {
- arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false));
- jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false, "CompilerMetadata"));
+ arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false));
+ jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false, "CompilerMetadata"));
}
if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
// 4gb, no malloc. Explanation in header.
- low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ true));
+ low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ true));
}
linear_alloc_.reset(CreateLinearAlloc());
@@ -2148,7 +2148,7 @@
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
}
// Create empty conflict table.
- method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
+ method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count=*/0u, linear_alloc),
pointer_size);
return method;
}
@@ -2280,7 +2280,7 @@
LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
return;
}
- if (!OS::FileExists(profile_output_filename.c_str(), false /*check_file_type*/)) {
+ if (!OS::FileExists(profile_output_filename.c_str(), /*check_file_type=*/ false)) {
LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
return;
}
@@ -2519,12 +2519,12 @@
const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
imt_unimplemented_method_->SetImtConflictTable(
- ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
pointer_size);
}
if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
imt_conflict_method_->SetImtConflictTable(
- ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
pointer_size);
}
}
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 4bd3b3a..55ba293 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -30,8 +30,8 @@
HandleUnexpectedSignalCommon(signal_number,
info,
raw_context,
- /* handle_timeout_signal */ false,
- /* dump_on_stderr */ false);
+ /* handle_timeout_signal= */ false,
+ /* dump_on_stderr= */ false);
// Run the old signal handler.
old_action.sa_sigaction(signal_number, info, raw_context);
@@ -44,7 +44,7 @@
if (android_root != nullptr && strcmp(android_root, "/system") != 0) {
InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid,
&old_action,
- /* handle_timeout_signal */ false);
+ /* handle_timeout_signal= */ false);
}
}
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 89f3124..20b3327 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -191,10 +191,10 @@
TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
std::string error_msg;
MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
128 * kPageSize, // Just some small stack.
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(stack.IsValid()) << error_msg;
@@ -505,10 +505,10 @@
self,
// Just a random class
soa.Decode<mirror::Class>(WellKnownClasses::java_util_Collections).Ptr(),
- /*ms*/0,
- /*ns*/0,
- /*interruptShouldThrow*/false,
- /*why*/kWaiting);
+ /*ms=*/0,
+ /*ns=*/0,
+ /*interruptShouldThrow=*/false,
+ /*why=*/kWaiting);
}
}
ASSERT_TRUE(cb_.saw_wait_start_);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 6313553..cfa8ea6 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -31,8 +31,8 @@
HandleUnexpectedSignalCommon(signal_number,
info,
raw_context,
- /* handle_timeout_signal */ true,
- /* dump_on_stderr */ true);
+ /* handle_timeout_signal= */ true,
+ /* dump_on_stderr= */ true);
if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
pid_t tid = GetTid();
@@ -77,7 +77,7 @@
// On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux,
nullptr,
- /* handle_timeout_signal */ true);
+ /* handle_timeout_signal= */ true);
}
} // namespace art
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index f4a27b8..38ea9cc 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -118,7 +118,7 @@
ScopedThreadStateChange tsc(Thread::Current(), kWaitingForSignalCatcherOutput);
- std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage */));
+ std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage= */));
bool success = file->WriteFully(s.data(), s.size());
if (success) {
success = file->FlushCloseOrErase() == 0;
@@ -169,7 +169,7 @@
void SignalCatcher::HandleSigUsr1() {
LOG(INFO) << "SIGUSR1 forcing GC (no HPROF) and profile save";
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
ProfileSaver::ForceProcessProfiles();
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 5f44286..811e23b 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -358,7 +358,7 @@
ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map) const {
if (stack_map.HasDexRegisterMap()) {
DexRegisterMap map(number_of_dex_registers_, DexRegisterLocation::Invalid());
- DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register */ 0, &map);
+ DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register= */ 0, &map);
return map;
}
return DexRegisterMap(0, DexRegisterLocation::None());
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index aac547e..106c7f1 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -237,7 +237,7 @@
static SubtypeCheckInfo::State EnsureInitialized(ClassPtr klass)
REQUIRES(Locks::subtype_check_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return InitializeOrAssign(klass, /*assign*/false).GetState();
+ return InitializeOrAssign(klass, /*assign=*/false).GetState();
}
// Force this class's SubtypeCheckInfo state into Assigned|Overflowed.
@@ -250,7 +250,7 @@
static SubtypeCheckInfo::State EnsureAssigned(ClassPtr klass)
REQUIRES(Locks::subtype_check_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return InitializeOrAssign(klass, /*assign*/true).GetState();
+ return InitializeOrAssign(klass, /*assign=*/true).GetState();
}
// Resets the SubtypeCheckInfo into the Uninitialized state.
@@ -398,7 +398,7 @@
// Force all ancestors to Assigned | Overflowed.
ClassPtr parent_klass = GetParentClass(klass);
- size_t parent_depth = InitializeOrAssign(parent_klass, /*assign*/true).GetDepth();
+ size_t parent_depth = InitializeOrAssign(parent_klass, /*assign=*/true).GetDepth();
if (kIsDebugBuild) {
SubtypeCheckInfo::State parent_state = GetSubtypeCheckInfo(parent_klass).GetState();
DCHECK(parent_state == SubtypeCheckInfo::kAssigned ||
@@ -542,17 +542,17 @@
int32_t new_value)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current() != nullptr && Runtime::Current()->IsActiveTransaction()) {
- return klass->template CasField32</*kTransactionActive*/true>(offset,
- old_value,
- new_value,
- CASMode::kWeak,
- std::memory_order_seq_cst);
- } else {
- return klass->template CasField32</*kTransactionActive*/false>(offset,
+ return klass->template CasField32</*kTransactionActive=*/true>(offset,
old_value,
new_value,
CASMode::kWeak,
std::memory_order_seq_cst);
+ } else {
+ return klass->template CasField32</*kTransactionActive=*/false>(offset,
+ old_value,
+ new_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
}
}
diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h
index 462f203..23d8ac3 100644
--- a/runtime/subtype_check_bits.h
+++ b/runtime/subtype_check_bits.h
@@ -56,9 +56,9 @@
*
* See subtype_check.h and subtype_check_info.h for more details.
*/
-BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size*/ BitString::BitStructSizeOf() + 1u)
- BitStructField<BitString, /*lsb*/ 0> bitstring_;
- BitStructUint</*lsb*/ BitString::BitStructSizeOf(), /*width*/ 1> overflow_;
+BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size=*/ BitString::BitStructSizeOf() + 1u)
+ BitStructField<BitString, /*lsb=*/ 0> bitstring_;
+ BitStructUint</*lsb=*/ BitString::BitStructSizeOf(), /*width=*/ 1> overflow_;
BITSTRUCT_DEFINE_END(SubtypeCheckBits);
} // namespace art
diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h
index 321a723..eec6e21 100644
--- a/runtime/subtype_check_bits_and_status.h
+++ b/runtime/subtype_check_bits_and_status.h
@@ -68,11 +68,11 @@
static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast));
static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits.");
BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>())
- BitStructField<SubtypeCheckBits, /*lsb*/ 0> subtype_check_info_;
+ BitStructField<SubtypeCheckBits, /*lsb=*/ 0> subtype_check_info_;
BitStructField<ClassStatus,
- /*lsb*/ SubtypeCheckBits::BitStructSizeOf(),
- /*width*/ kClassStatusBitSize> status_;
- BitStructInt</*lsb*/ 0, /*width*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
+ /*lsb=*/ SubtypeCheckBits::BitStructSizeOf(),
+ /*width=*/ kClassStatusBitSize> status_;
+ BitStructInt</*lsb=*/ 0, /*width=*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus);
// Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data.
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index 9bd135e..44a2a69 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -87,7 +87,7 @@
struct SubtypeCheckInfoTest : public ::testing::Test {
protected:
void SetUp() override {
- android::base::InitLogging(/*argv*/nullptr);
+ android::base::InitLogging(/*argv=*/nullptr);
}
void TearDown() override {
@@ -158,33 +158,33 @@
// Illegal values during construction would cause a Dcheck failure and crash.
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/0u),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/0u),
GetExpectedMessageForDeathTest("Path was too long for the depth"));
ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({1u, 1u}),
- /*overflow*/false,
- /*depth*/0u),
+ /*overflow=*/false,
+ /*depth=*/0u),
GetExpectedMessageForDeathTest("Bitstring too long for depth"));
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/1u),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/1u),
GetExpectedMessageForDeathTest("Expected \\(Assigned\\|Initialized\\) "
"state to have >0 Next value"));
ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({0u, 2u, 1u}),
- /*overflow*/false,
- /*depth*/2u),
+ /*overflow=*/false,
+ /*depth=*/2u),
GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 2u}),
- /*next*/MakeBitStringChar(1u),
- /*overflow*/false,
- /*depth*/2u),
+ /*next=*/MakeBitStringChar(1u),
+ /*overflow=*/false,
+ /*depth=*/2u),
GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 1u, 1u}),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/3u),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/3u),
GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
// These are really slow (~1sec per death test on host),
@@ -194,62 +194,62 @@
TEST_F(SubtypeCheckInfoTest, States) {
EXPECT_EQ(SubtypeCheckInfo::kUninitialized, MakeSubtypeCheckInfo().GetState());
EXPECT_EQ(SubtypeCheckInfo::kInitialized,
- MakeSubtypeCheckInfo(/*path*/{}, /*next*/MakeBitStringChar(1)).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/{}, /*next=*/MakeBitStringChar(1)).GetState());
EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
- MakeSubtypeCheckInfo(/*path*/{},
- /*next*/MakeBitStringChar(1),
- /*overflow*/true,
- /*depth*/1u).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/{},
+ /*next=*/MakeBitStringChar(1),
+ /*overflow=*/true,
+ /*depth=*/1u).GetState());
EXPECT_EQ(SubtypeCheckInfo::kAssigned,
- MakeSubtypeCheckInfo(/*path*/MakeBitString({1u}),
- /*next*/MakeBitStringChar(1),
- /*overflow*/false,
- /*depth*/1u).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitString({1u}),
+ /*next=*/MakeBitStringChar(1),
+ /*overflow=*/false,
+ /*depth=*/1u).GetState());
// Test edge conditions: depth == BitString::kCapacity (No Next value).
EXPECT_EQ(SubtypeCheckInfo::kAssigned,
- MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/BitString::kCapacity).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity).GetState());
EXPECT_EQ(SubtypeCheckInfo::kInitialized,
- MakeSubtypeCheckInfo(/*path*/MakeBitStringMax<BitString::kCapacity - 1u>(),
- /*next*/MakeBitStringChar(0),
- /*overflow*/false,
- /*depth*/BitString::kCapacity).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax<BitString::kCapacity - 1u>(),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity).GetState());
// Test edge conditions: depth > BitString::kCapacity (Must overflow).
EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
- MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
- /*next*/MakeBitStringChar(0),
- /*overflow*/true,
- /*depth*/BitString::kCapacity + 1u).GetState());
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+ /*next=*/MakeBitStringChar(0),
+ /*overflow=*/true,
+ /*depth=*/BitString::kCapacity + 1u).GetState());
}
TEST_F(SubtypeCheckInfoTest, NextValue) {
// Validate "Next" is correctly aliased as the Bitstring[Depth] character.
EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
- /*overflow*/false,
- /*depth*/0u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/0u).GetNext());
EXPECT_EQ(MakeBitStringChar(2u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
- /*overflow*/false,
- /*depth*/1u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/1u).GetNext());
EXPECT_EQ(MakeBitStringChar(3u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
- /*overflow*/false,
- /*depth*/2u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/2u).GetNext());
EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({0u, 2u, 1u}),
- /*overflow*/false,
- /*depth*/2u).GetNext());
+ /*overflow=*/false,
+ /*depth=*/2u).GetNext());
// Test edge conditions: depth == BitString::kCapacity (No Next value).
EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity)));
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity)));
// Anything with depth >= BitString::kCapacity has no next value.
EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity + 1u)));
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity + 1u)));
EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax(),
- /*overflow*/false,
- /*depth*/std::numeric_limits<size_t>::max())));
+ /*overflow=*/false,
+ /*depth=*/std::numeric_limits<size_t>::max())));
}
template <size_t kPos = BitString::kCapacity>
@@ -259,10 +259,10 @@
using StorageType = BitString::StorageType;
SubtypeCheckInfo sci =
- MakeSubtypeCheckInfo(/*path_to_root*/MakeBitStringMax(),
- /*next*/BitStringChar{},
- /*overflow*/false,
- /*depth*/BitString::kCapacity);
+ MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+ /*next=*/BitStringChar{},
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity);
// 0b000...111 where LSB == 1, and trailing 1s = the maximum bitstring representation.
EXPECT_EQ(MaxInt<StorageType>(LenForPos()), sci.GetEncodedPathToRoot());
@@ -275,8 +275,8 @@
SubtypeCheckInfo sci2 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity);
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity);
#define MAKE_ENCODED_PATH(pos0, pos1, pos2) \
(((pos0) << 0) | \
@@ -290,8 +290,8 @@
SubtypeCheckInfo sci3 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
- /*overflow*/false,
- /*depth*/BitString::kCapacity - 1u);
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity - 1u);
EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
sci3.GetEncodedPathToRoot());
@@ -300,8 +300,8 @@
SubtypeCheckInfo sci4 =
MakeSubtypeCheckInfoUnchecked(MakeBitString({0b1010101u}),
- /*overflow*/false,
- /*depth*/BitString::kCapacity - 2u);
+ /*overflow=*/false,
+ /*depth=*/BitString::kCapacity - 2u);
EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b0000, 0b0), sci4.GetEncodedPathToRoot());
EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b0000, 0b0),
@@ -320,7 +320,7 @@
SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
- SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign.
EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -331,7 +331,7 @@
// CopyCleared is just a thin wrapper around value-init and providing the depth.
SubtypeCheckInfo cleared_copy_value =
- SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth*/1u);
+ SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth=*/1u);
EXPECT_EQ(SubtypeCheckInfo::kUninitialized, cleared_copy_value.GetState());
EXPECT_EQ(MakeBitString({}), GetPathToRoot(cleared_copy_value));
}
@@ -340,7 +340,7 @@
SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
- SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign.
EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -350,17 +350,17 @@
SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
- SubtypeCheckInfo childA = root.CreateChild(/*assign*/false);
+ SubtypeCheckInfo childA = root.CreateChild(/*assign_next=*/false);
EXPECT_EQ(SubtypeCheckInfo::kInitialized, childA.GetState());
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); // Next unchanged for Initialize.
EXPECT_EQ(MakeBitString({}), GetPathToRoot(childA));
- SubtypeCheckInfo childB = root.CreateChild(/*assign*/false);
+ SubtypeCheckInfo childB = root.CreateChild(/*assign_next=*/false);
EXPECT_EQ(SubtypeCheckInfo::kInitialized, childB.GetState());
EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); // Next unchanged for Initialize.
EXPECT_EQ(MakeBitString({}), GetPathToRoot(childB));
- SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign.
EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -369,19 +369,19 @@
size_t cur_depth = 1u;
SubtypeCheckInfo latest_child = childC;
while (cur_depth != BitString::kCapacity) {
- latest_child = latest_child.CreateChild(/*assign*/true);
+ latest_child = latest_child.CreateChild(/*assign_next=*/true);
ASSERT_EQ(SubtypeCheckInfo::kAssigned, latest_child.GetState());
ASSERT_EQ(cur_depth + 1u, GetPathToRoot(latest_child).Length());
cur_depth++;
}
// Future assignments will result in a too-deep overflow.
- SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep.GetState());
EXPECT_EQ(GetPathToRoot(latest_child), GetPathToRoot(child_of_deep));
// Assignment of too-deep overflow also causes overflow.
- SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep_2.GetState());
EXPECT_EQ(GetPathToRoot(child_of_deep), GetPathToRoot(child_of_deep_2));
}
@@ -393,7 +393,7 @@
break;
}
- SubtypeCheckInfo child = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/true);
ASSERT_EQ(SubtypeCheckInfo::kAssigned, child.GetState());
ASSERT_EQ(MakeBitStringChar(cur_next+1u), root.GetNext());
ASSERT_EQ(MakeBitString({cur_next}), GetPathToRoot(child));
@@ -403,20 +403,20 @@
// Now the root will be in a state that further assigns will be too-wide overflow.
// Initialization still succeeds.
- SubtypeCheckInfo child = root.CreateChild(/*assign*/false);
+ SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/false);
EXPECT_EQ(SubtypeCheckInfo::kInitialized, child.GetState());
EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
EXPECT_EQ(MakeBitString({}), GetPathToRoot(child));
// Assignment goes to too-wide Overflow.
- SubtypeCheckInfo child_of = root.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of = root.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of.GetState());
EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
EXPECT_EQ(MakeBitString({}), GetPathToRoot(child_of));
// Assignment of overflowed child still succeeds.
// The path to root is the same.
- SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign*/true);
+ SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign_next=*/true);
EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of2.GetState());
EXPECT_EQ(GetPathToRoot(child_of), GetPathToRoot(child_of2));
}
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index 9aa3032..719e5d9 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -302,7 +302,7 @@
struct SubtypeCheckTest : public ::testing::Test {
protected:
void SetUp() override {
- android::base::InitLogging(/*argv*/nullptr);
+ android::base::InitLogging(/*argv=*/nullptr);
CreateRootedTree(BitString::kCapacity + 2u, BitString::kCapacity + 2u);
}
@@ -312,8 +312,8 @@
void CreateRootedTree(size_t width, size_t height) {
all_classes_.clear();
- root_ = CreateClassFor(/*parent*/nullptr, /*x*/0, /*y*/0);
- CreateTreeFor(root_, /*width*/width, /*depth*/height);
+ root_ = CreateClassFor(/*parent=*/nullptr, /*x=*/0, /*y=*/0);
+ CreateTreeFor(root_, /*width=*/width, /*levels=*/height);
}
MockClass* CreateClassFor(MockClass* parent, size_t x, size_t y) {
@@ -681,7 +681,7 @@
const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
ASSERT_EQ(depth, transitions.size());
- EnsureStateChangedTestRecursive(root, /*cur_depth*/0u, depth, transitions);
+ EnsureStateChangedTestRecursive(root, /*cur_depth=*/0u, depth, transitions);
}
TEST_F(SubtypeCheckTest, EnsureInitialized_NoOverflow) {
@@ -869,8 +869,8 @@
{
// Create too-wide siblings at the kTargetDepth level.
- MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
- CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+ MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+ CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
// Leave the rest of the tree as the default.
@@ -914,15 +914,15 @@
{
// Create too-wide siblings at the kTargetDepth level.
- MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1);
- CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+ MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1);
+ CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren()) << *child;
ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
// Leave the rest of the tree as the default.
// Create too-wide children for a too-wide parent.
- MockClass* child_subchild = child->FindChildAt(/*x*/0, kTargetDepth);
- CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*depth*/1);
+ MockClass* child_subchild = child->FindChildAt(/*x=*/0, kTargetDepth);
+ CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOffSub*2, child_subchild->GetNumberOfChildren()) << *child_subchild;
ASSERT_TRUE(IsTooWide(child_subchild->GetMaxChild())) << *(child_subchild->GetMaxChild());
}
@@ -1035,8 +1035,8 @@
{
// Create too-wide siblings at the kTargetDepth level.
- MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
- CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+ MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+ CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
// Leave the rest of the tree as the default.
@@ -1045,7 +1045,7 @@
MockClass* child_subchild = child->GetMaxChild();
ASSERT_TRUE(child_subchild != nullptr);
ASSERT_EQ(0u, child_subchild->GetNumberOfChildren()) << *child_subchild;
- CreateTreeFor(child_subchild, /*width*/1, /*levels*/kTooDeepTargetDepth);
+ CreateTreeFor(child_subchild, /*width=*/1, /*levels=*/kTooDeepTargetDepth);
MockClass* too_deep_child = child_subchild->FindChildAt(0, kTooDeepTargetDepth + 2);
ASSERT_TRUE(too_deep_child != nullptr) << child_subchild->ToDotGraph();
ASSERT_TRUE(IsTooWide(too_deep_child)) << *(too_deep_child);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2e04e0c..a3de4e2 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -152,7 +152,7 @@
void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
CHECK(kUseReadBarrier);
tls32_.is_gc_marking = is_marking;
- UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking);
+ UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
ResetQuickAllocEntryPointsForThread(is_marking);
}
@@ -579,7 +579,7 @@
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
- if (ProtectStack(/* fatal_on_error */ false)) {
+ if (ProtectStack(/* fatal_on_error= */ false)) {
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
uint32_t unwanted_size = stack_top - pregion - kPageSize;
@@ -648,7 +648,7 @@
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
// Protect the bottom of the stack to prevent read/write to it.
- ProtectStack(/* fatal_on_error */ true);
+ ProtectStack(/* fatal_on_error= */ true);
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
@@ -2014,13 +2014,13 @@
DumpKernelStack(os, GetTid(), " kernel: ", false);
ArtMethod* method =
GetCurrentMethod(nullptr,
- /*check_suspended*/ !force_dump_stack,
- /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
+ /*check_suspended=*/ !force_dump_stack,
+ /*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
}
DumpJavaStack(os,
- /*check_suspended*/ !force_dump_stack,
- /*dump_locks*/ !force_dump_stack);
+ /*check_suspended=*/ !force_dump_stack,
+ /*dump_locks=*/ !force_dump_stack);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
@@ -2911,8 +2911,8 @@
// Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
class_linker->EnsureInitialized(soa.Self(),
h_aste_class,
- /* can_init_fields */ true,
- /* can_init_parents */ true);
+ /* can_init_fields= */ true,
+ /* can_init_parents= */ true);
if (soa.Self()->IsExceptionPending()) {
// This should not fail in a healthy runtime.
return nullptr;
@@ -3429,9 +3429,9 @@
}
PushDeoptimizationContext(
JValue(),
- false /* is_reference */,
+ /* is_reference= */ false,
(force_deopt ? nullptr : exception),
- false /* from_code */,
+ /* from_code= */ false,
method_type);
artDeoptimize(this);
UNREACHABLE();
@@ -3557,7 +3557,7 @@
}
}
// Mark lock count map required for structured locking checks.
- shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg */ -1, this);
+ shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this);
}
private:
@@ -3573,7 +3573,7 @@
if (kVerifyImageObjectsMarked) {
gc::Heap* const heap = Runtime::Current()->GetHeap();
gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
- /*fail_ok*/true);
+ /*fail_ok=*/true);
if (space != nullptr && space->IsImageSpace()) {
bool failed = false;
if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
@@ -3595,7 +3595,7 @@
}
}
mirror::Object* new_ref = klass.Ptr();
- visitor_(&new_ref, /* vreg */ -1, this);
+ visitor_(&new_ref, /* vreg= */ -1, this);
if (new_ref != klass) {
method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
}
@@ -3668,7 +3668,7 @@
mirror::Object* ref = ref_addr->AsMirrorPtr();
if (ref != nullptr) {
mirror::Object* new_ref = ref;
- visitor_(&new_ref, /* vreg */ -1, this);
+ visitor_(&new_ref, /* vreg= */ -1, this);
if (ref != new_ref) {
ref_addr->Assign(new_ref);
}
@@ -3861,9 +3861,9 @@
void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) {
- VisitRoots</* kPrecise */ true>(visitor);
+ VisitRoots</* kPrecise= */ true>(visitor);
} else {
- VisitRoots</* kPrecise */ false>(visitor);
+ VisitRoots</* kPrecise= */ false>(visitor);
}
}
@@ -4078,7 +4078,7 @@
void Thread::SetReadBarrierEntrypoints() {
// Make sure entrypoints aren't null.
- UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true);
+ UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true);
}
void Thread::ClearAllInterpreterCaches() {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index f8c90b1..d21b600 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -438,7 +438,7 @@
// Wake up the threads blocking for weak ref access so that they will respond to the empty
// checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
- Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
+ Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint=*/true);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
uint64_t total_wait_time = 0;
@@ -491,9 +491,9 @@
// Found a runnable thread that hasn't responded to the empty checkpoint request.
// Assume it's stuck and safe to dump its stack.
thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
- /*dump_native_stack*/ true,
- /*backtrace_map*/ nullptr,
- /*force_dump_stack*/ true);
+ /*dump_native_stack=*/ true,
+ /*backtrace_map=*/ nullptr,
+ /*force_dump_stack=*/ true);
}
}
}
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index f1c808b..a245f65 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -47,10 +47,10 @@
stack_size += kPageSize;
std::string error_msg;
stack_ = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
stack_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
CHECK(stack_.IsValid()) << error_msg;
CHECK_ALIGNED(stack_.Begin(), kPageSize);
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 2600f55..9e7c44a 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -119,7 +119,7 @@
// Drain the task list. Note: we have to restart here, as no tasks will be finished when
// the pool is stopped.
thread_pool.StartWorkers(self);
- thread_pool.Wait(self, /* do_work */ true, false);
+ thread_pool.Wait(self, /* do_work= */ true, false);
}
class TreeTask : public Task {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4ee983d..ad58c2e 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -435,7 +435,7 @@
// want to use the trampolines anyway since it is faster. It makes the story with disabling
// jit-gc more complex though.
runtime->GetInstrumentation()->EnableMethodTracing(
- kTracerInstrumentationKey, /*needs_interpreter*/!runtime->IsJavaDebuggable());
+ kTracerInstrumentationKey, /*needs_interpreter=*/!runtime->IsJavaDebuggable());
}
}
}
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 452cd8e..bd59e73 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -150,11 +150,11 @@
(writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
unquicken ? MAP_PRIVATE : MAP_SHARED,
file_fd,
- /* start */ 0u,
+ /* start= */ 0u,
low_4gb,
vdex_filename.c_str(),
mmap_reuse,
- /* reservation */ nullptr,
+ /* reservation= */ nullptr,
error_msg);
if (!mmap.IsValid()) {
*error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
@@ -173,7 +173,7 @@
return nullptr;
}
vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
- /* decompile_return_instruction */ false);
+ /* decompile_return_instruction= */ false);
// Update the quickening info size to pretend there isn't any.
size_t offset = vdex->GetDexSectionHeaderOffset();
reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
@@ -213,13 +213,13 @@
std::unique_ptr<const DexFile> dex(dex_file_loader.OpenWithDataSection(
dex_file_start,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
GetLocationChecksum(i),
- nullptr /*oat_dex_file*/,
- false /*verify*/,
- false /*verify_checksum*/,
+ /*oat_dex_file=*/ nullptr,
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
error_msg));
if (dex == nullptr) {
return false;
diff --git a/runtime/vdex_file_test.cc b/runtime/vdex_file_test.cc
index ced6e28..9d92b42 100644
--- a/runtime/vdex_file_test.cc
+++ b/runtime/vdex_file_test.cc
@@ -34,14 +34,14 @@
std::unique_ptr<VdexFile> vdex = VdexFile::Open(tmp.GetFd(),
0,
tmp.GetFilename(),
- /*writable*/false,
- /*low_4gb*/false,
- /*quicken*/false,
+ /*writable=*/false,
+ /*low_4gb=*/false,
+ /*unquicken=*/false,
&error_msg);
EXPECT_TRUE(vdex == nullptr);
vdex = VdexFile::Open(
- tmp.GetFilename(), /*writable*/false, /*low_4gb*/false, /*quicken*/ false, &error_msg);
+ tmp.GetFilename(), /*writable=*/false, /*low_4gb=*/false, /*unquicken=*/ false, &error_msg);
EXPECT_TRUE(vdex == nullptr);
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 5fce892..7b07389 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -242,7 +242,7 @@
*previous_idx = method_idx;
const InvokeType type = method.GetInvokeType(class_def.access_flags_);
ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- method_idx, dex_cache, class_loader, /* referrer */ nullptr, type);
+ method_idx, dex_cache, class_loader, /* referrer= */ nullptr, type);
if (resolved_method == nullptr) {
DCHECK(self->IsExceptionPending());
// We couldn't resolve the method, but continue regardless.
@@ -263,7 +263,7 @@
callbacks,
allow_soft_failures,
log_level,
- /*need_precise_constants*/ false,
+ /*need_precise_constants=*/ false,
api_level,
&hard_failure_msg);
if (result.kind == FailureKind::kHardFailure) {
@@ -340,11 +340,11 @@
method_idx,
method,
method_access_flags,
- true /* can_load_classes */,
+ /* can_load_classes= */ true,
allow_soft_failures,
need_precise_constants,
- false /* verify to dump */,
- true /* allow_thread_suspension */,
+ /* verify to dump */ false,
+ /* allow_thread_suspension= */ true,
api_level);
if (verifier.Verify()) {
// Verification completed, however failures may be pending that didn't cause the verification
@@ -475,11 +475,11 @@
dex_method_idx,
method,
method_access_flags,
- true /* can_load_classes */,
- true /* allow_soft_failures */,
- true /* need_precise_constants */,
- true /* verify_to_dump */,
- true /* allow_thread_suspension */,
+ /* can_load_classes= */ true,
+ /* allow_soft_failures= */ true,
+ /* need_precise_constants= */ true,
+ /* verify_to_dump= */ true,
+ /* allow_thread_suspension= */ true,
api_level);
verifier->Verify();
verifier->DumpFailures(vios->Stream());
@@ -570,11 +570,11 @@
m->GetDexMethodIndex(),
m,
m->GetAccessFlags(),
- false /* can_load_classes */,
- true /* allow_soft_failures */,
- false /* need_precise_constants */,
- false /* verify_to_dump */,
- false /* allow_thread_suspension */,
+ /* can_load_classes= */ false,
+ /* allow_soft_failures= */ true,
+ /* need_precise_constants= */ false,
+ /* verify_to_dump= */ false,
+ /* allow_thread_suspension= */ false,
api_level);
verifier.interesting_dex_pc_ = dex_pc;
verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index cedc583..7519257 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -42,7 +42,7 @@
// Verify the class
std::string error_msg;
FailureKind failure = MethodVerifier::VerifyClass(
- self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level */ 0u, &error_msg);
+ self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level= */ 0u, &error_msg);
if (android::base::StartsWith(descriptor, "Ljava/lang/invoke")) {
ASSERT_TRUE(failure == FailureKind::kSoftFailure ||
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 4a3f9e6..91be00d 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -756,13 +756,13 @@
VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
join_class,
GetClass(),
- /* strict */ true,
- /* is_assignable */ true);
+ /* is_strict= */ true,
+ /* is_assignable= */ true);
VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
join_class,
incoming_type.GetClass(),
- /* strict */ true,
- /* is_assignable */ true);
+ /* is_strict= */ true,
+ /* is_assignable= */ true);
}
if (GetClass() == join_class && !IsPreciseReference()) {
return *this;
@@ -771,7 +771,7 @@
} else {
std::string temp;
const char* descriptor = join_class->GetDescriptor(&temp);
- return reg_types->FromClass(descriptor, join_class, /* precise */ false);
+ return reg_types->FromClass(descriptor, join_class, /* precise= */ false);
}
}
} else {
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9f87adf..f62e8b6 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -126,7 +126,7 @@
inline const PreciseReferenceType& RegTypeCache::JavaLangClass() {
const RegType* result = &FromClass("Ljava/lang/Class;",
GetClassRoot<mirror::Class>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
@@ -135,7 +135,7 @@
// String is final and therefore always precise.
const RegType* result = &FromClass("Ljava/lang/String;",
GetClassRoot<mirror::String>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
@@ -143,7 +143,7 @@
inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() {
const RegType* result = &FromClass("Ljava/lang/invoke/MethodHandle;",
GetClassRoot<mirror::MethodHandle>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
@@ -151,7 +151,7 @@
inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() {
const RegType* result = &FromClass("Ljava/lang/invoke/MethodType;",
GetClassRoot<mirror::MethodType>(),
- /* precise */ true);
+ /* precise= */ true);
DCHECK(result->IsPreciseReference());
return *down_cast<const PreciseReferenceType*>(result);
}
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index f1f3488..ceba748 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -438,14 +438,14 @@
// Is the resolved part a primitive array?
if (resolved_merged_is_array && !resolved_parts_merged.IsObjectArrayTypes()) {
- return JavaLangObject(false /* precise */);
+ return JavaLangObject(/* precise= */ false);
}
// Is any part not an array (but exists)?
if ((!left_unresolved_is_array && left_resolved != &left) ||
(!right_unresolved_is_array && right_resolved != &right) ||
!resolved_merged_is_array) {
- return JavaLangObject(false /* precise */);
+ return JavaLangObject(/* precise= */ false);
}
}
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 0430d20..3224385 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -80,8 +80,8 @@
EXPECT_FALSE(precise_lo.CheckWidePair(precise_const));
EXPECT_TRUE(precise_lo.CheckWidePair(precise_hi));
// Test Merging.
- EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier */ nullptr)).IsLongTypes());
- EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier */ nullptr)).IsLongHighTypes());
+ EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier= */ nullptr)).IsLongTypes());
+ EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier= */ nullptr)).IsLongHighTypes());
}
TEST_F(RegTypeTest, Primitives) {
@@ -429,7 +429,7 @@
const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
const RegType& unresolved_merged = cache.FromUnresolvedMerge(
- unresolved_ref, unresolved_ref_another, /* verifier */ nullptr);
+ unresolved_ref, unresolved_ref_another, /* verifier= */ nullptr);
std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -490,14 +490,14 @@
RegTypeCache cache_new(true, allocator);
const RegType& string = cache_new.JavaLangString();
const RegType& Object = cache_new.JavaLangObject(true);
- EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier */ nullptr).IsJavaLangObject());
+ EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier= */ nullptr).IsJavaLangObject());
// Merge two unresolved types.
const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true);
EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
- const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsUnresolvedMergedReference());
RegType& merged_nonconst = const_cast<RegType&>(merged);
@@ -520,22 +520,22 @@
const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
{
// float MERGE precise cst => float.
- const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
{
// precise cst MERGE float => float.
- const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
{
// float MERGE imprecise cst => float.
- const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
{
// imprecise cst MERGE float => float.
- const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsFloat());
}
}
@@ -556,46 +556,46 @@
const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// lo MERGE imprecise cst lo => lo.
const RegType& merged = long_lo_type.Merge(
- imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// imprecise cst lo MERGE lo => lo.
const RegType& merged = imprecise_cst_lo.Merge(
- long_lo_type, &cache_new, /* verifier */ nullptr);
+ long_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier */ nullptr);
+ const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
{
// hi MERGE imprecise cst hi => hi.
const RegType& merged = long_hi_type.Merge(
- imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
{
// imprecise cst hi MERGE hi => hi.
const RegType& merged = imprecise_cst_hi.Merge(
- long_hi_type, &cache_new, /* verifier */ nullptr);
+ long_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsLongHi());
}
}
@@ -617,49 +617,49 @@
{
// lo MERGE precise cst lo => lo.
const RegType& merged = double_lo_type.Merge(
- precise_cst_lo, &cache_new, /* verifier */ nullptr);
+ precise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// precise cst lo MERGE lo => lo.
const RegType& merged = precise_cst_lo.Merge(
- double_lo_type, &cache_new, /* verifier */ nullptr);
+ double_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// lo MERGE imprecise cst lo => lo.
const RegType& merged = double_lo_type.Merge(
- imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// imprecise cst lo MERGE lo => lo.
const RegType& merged = imprecise_cst_lo.Merge(
- double_lo_type, &cache_new, /* verifier */ nullptr);
+ double_lo_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// hi MERGE precise cst hi => hi.
const RegType& merged = double_hi_type.Merge(
- precise_cst_hi, &cache_new, /* verifier */ nullptr);
+ precise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// precise cst hi MERGE hi => hi.
const RegType& merged = precise_cst_hi.Merge(
- double_hi_type, &cache_new, /* verifier */ nullptr);
+ double_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// hi MERGE imprecise cst hi => hi.
const RegType& merged = double_hi_type.Merge(
- imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+ imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// imprecise cst hi MERGE hi => hi.
const RegType& merged = imprecise_cst_hi.Merge(
- double_hi_type, &cache_new, /* verifier */ nullptr);
+ double_hi_type, &cache_new, /* verifier= */ nullptr);
EXPECT_TRUE(merged.IsDoubleHi());
}
}
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index fb91976..b666c15 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -43,7 +43,7 @@
}
VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files)
- : VerifierDeps(dex_files, /*output_only*/ true) {}
+ : VerifierDeps(dex_files, /*output_only=*/ true) {}
void VerifierDeps::MergeWith(const VerifierDeps& other,
const std::vector<const DexFile*>& dex_files) {
@@ -439,7 +439,7 @@
AddAssignability(dex_file,
destination_component,
source_component,
- /* is_strict */ true,
+ /* is_strict= */ true,
is_assignable);
return;
}
@@ -707,7 +707,7 @@
VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files,
ArrayRef<const uint8_t> data)
- : VerifierDeps(dex_files, /*output_only*/ false) {
+ : VerifierDeps(dex_files, /*output_only=*/ false) {
if (data.empty()) {
// Return eagerly, as the first thing we expect from VerifierDeps data is
// the number of created strings, even if there is no dependency.
@@ -1089,9 +1089,9 @@
const DexFileDeps& deps,
Thread* self) const {
bool result = VerifyAssignability(
- class_loader, dex_file, deps.assignable_types_, /* expected_assignability */ true, self);
+ class_loader, dex_file, deps.assignable_types_, /* expected_assignability= */ true, self);
result = result && VerifyAssignability(
- class_loader, dex_file, deps.unassignable_types_, /* expected_assignability */ false, self);
+ class_loader, dex_file, deps.unassignable_types_, /* expected_assignability= */ false, self);
result = result && VerifyClasses(class_loader, dex_file, deps.classes_, self);
result = result && VerifyFields(class_loader, dex_file, deps.fields_, self);