ART: Refactor for bugprone-argument-comment
Handles runtime.
Bug: 116054210
Test: WITH_TIDY=1 mmma art
Change-Id: Ibc0d5086809d647f0ce4df5452eb84442d27ecf0
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c1f69b8..ef893ee 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -599,12 +599,12 @@
void Run(Thread* self) override {
ScopedObjectAccess soa(self);
if (kind_ == kCompile) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false);
} else if (kind_ == kCompileOsr) {
- Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true);
+ Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true);
} else {
DCHECK(kind_ == kAllocateProfile);
- if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
+ if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
}
}
@@ -673,7 +673,7 @@
if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
if ((new_count >= WarmMethodThreshold()) &&
(method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
- bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
+ bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
if (success) {
VLOG(jit) << "Start profiling " << method->PrettyMethod();
}
@@ -741,7 +741,7 @@
if (np_method->IsCompilable()) {
if (!np_method->IsNative()) {
// The compiler requires a ProfilingInfo object for non-native methods.
- ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
+ ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
}
JitCompileTask compile_task(method, JitCompileTask::kCompile);
// Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
@@ -761,7 +761,7 @@
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, profiling_info->GetSavedEntryPoint());
} else {
- AddSamples(thread, method, 1, /* with_backedges */false);
+ AddSamples(thread, method, 1, /* with_backedges= */false);
}
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 63cb6a4..8600b41 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -221,7 +221,7 @@
unique_fd mem_fd;
// Bionic supports memfd_create, but the call may fail on older kernels.
- mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
+ mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
if (mem_fd.get() < 0) {
VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
<< strerror(errno);
@@ -281,8 +281,8 @@
kProtRW,
base_flags,
mem_fd,
- /* start */ 0,
- /* low_4gb */ true,
+ /* start= */ 0,
+ /* low_4gb= */ true,
"data-code-cache",
&error_str);
} else {
@@ -303,12 +303,12 @@
base_flags = MAP_PRIVATE | MAP_ANON;
data_pages = MemMap::MapAnonymous(
"data-code-cache",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
data_capacity + exec_capacity,
kProtRW,
- /* low_4gb */ true,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /* low_4gb= */ true,
+ /* reuse= */ false,
+ /* reservation= */ nullptr,
&error_str);
}
@@ -347,8 +347,8 @@
kProtR,
base_flags,
mem_fd,
- /* start */ data_capacity,
- /* low_4GB */ false,
+ /* start= */ data_capacity,
+ /* low_4GB= */ false,
"jit-code-cache-rw",
&error_str);
if (!non_exec_pages.IsValid()) {
@@ -1008,7 +1008,7 @@
// Simply discard the compiled code. Clear the counter so that it may be recompiled later.
// Hopefully the class hierarchy will be more stable when compilation is retried.
single_impl_still_valid = false;
- ClearMethodCounter(method, /*was_warm*/ false);
+ ClearMethodCounter(method, /*was_warm=*/ false);
break;
}
}
@@ -1156,7 +1156,7 @@
// method. The compiled code for the method (if there is any) must not be in any threads call stack.
void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
MutexLock mu(Thread::Current(), lock_);
- RemoveMethodLocked(method, /* release_memory */ true);
+ RemoveMethodLocked(method, /* release_memory= */ true);
}
// This invalidates old_method. Once this function returns one can no longer use old_method to
@@ -1314,7 +1314,7 @@
// its stack frame, it is not the method owning return_pc_. We just pass null to
// LookupMethodHeader: the method is only checked against in debug builds.
OatQuickMethodHeader* method_header =
- code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
+ code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr);
if (method_header != nullptr) {
const void* code = method_header->GetCode();
CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
@@ -1438,7 +1438,7 @@
<< PrettySize(CodeCacheSize())
<< ", data=" << PrettySize(DataCacheSize());
- DoCollection(self, /* collect_profiling_info */ do_full_collection);
+ DoCollection(self, /* collect_profiling_info= */ do_full_collection);
VLOG(jit) << "After code cache collection, code="
<< PrettySize(CodeCacheSize())
@@ -1551,7 +1551,7 @@
info->SetSavedEntryPoint(nullptr);
// We are going to move this method back to interpreter. Clear the counter now to
// give it a chance to be hot again.
- ClearMethodCounter(info->GetMethod(), /*was_warm*/ true);
+ ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
}
}
} else if (kIsDebugBuild) {
@@ -1933,7 +1933,7 @@
VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
// Because the counter is not atomic, there are some rare cases where we may not hit the
// threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
- ClearMethodCounter(method, /*was_warm*/ false);
+ ClearMethodCounter(method, /*was_warm=*/ false);
return false;
}
@@ -2009,7 +2009,7 @@
// and clear the counter to get the method Jitted again.
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, GetQuickToInterpreterBridge());
- ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr);
+ ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
} else {
MutexLock mu(Thread::Current(), lock_);
auto it = osr_code_map_.find(method);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 9043f26..e3248ea 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -129,7 +129,7 @@
}
total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs();
}
- FetchAndCacheResolvedClassesAndMethods(/*startup*/ true);
+ FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true);
// When we save without waiting for JIT notifications we use a simple
@@ -183,7 +183,7 @@
uint16_t number_of_new_methods = 0;
uint64_t start_work = NanoTime();
- bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods);
+ bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save=*/false, &number_of_new_methods);
// Update the notification counter based on result. Note that there might be contention on this
// but we don't care about to be 100% precise.
if (!profile_saved_to_disk) {
@@ -501,7 +501,7 @@
// We only need to do this once, not once per dex location.
// TODO: Figure out a way to only do it when stuff has changed? It takes 30-50ms.
- FetchAndCacheResolvedClassesAndMethods(/*startup*/ false);
+ FetchAndCacheResolvedClassesAndMethods(/*startup=*/ false);
for (const auto& it : tracked_locations) {
if (!force_save && ShuttingDown(Thread::Current())) {
@@ -521,7 +521,7 @@
}
{
ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
- if (!info.Load(filename, /*clear_if_invalid*/ true)) {
+ if (!info.Load(filename, /*clear_if_invalid=*/ true)) {
LOG(WARNING) << "Could not forcefully load profile " << filename;
continue;
}
@@ -607,9 +607,9 @@
Runtime* runtime = Runtime::Current();
bool attached = runtime->AttachCurrentThread("Profile Saver",
- /*as_daemon*/true,
+ /*as_daemon=*/true,
runtime->GetSystemThreadGroup(),
- /*create_peer*/true);
+ /*create_peer=*/true);
if (!attached) {
CHECK(runtime->IsShuttingDown(Thread::Current()));
return nullptr;
@@ -751,7 +751,7 @@
// Force save everything before destroying the thread since we want profiler_pthread_ to remain
// valid.
- instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+ instance_->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
// Wait for the saver thread to stop.
CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
@@ -838,7 +838,7 @@
// but we only use this in testing when we now this won't happen.
// Refactor the way we handle the instance so that we don't end up in this situation.
if (saver != nullptr) {
- saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+ saver->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
}
}
@@ -846,7 +846,7 @@
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
- if (!info.Load(profile, /*clear_if_invalid*/false)) {
+ if (!info.Load(profile, /*clear_if_invalid=*/false)) {
return false;
}
ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref);
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a3dae83..f6139bb 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -125,7 +125,7 @@
}
bool IsInUseByCompiler() const {
- return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) ||
+ return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) ||
(current_inline_uses_ > 0);
}