Merge "Clean up sampling tracing." into dalvik-dev
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 498f7ef..a150f06 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -19,7 +19,10 @@
TEST_COMMON_SRC_FILES := \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
+ compiler/image_test.cc \
compiler/jni/jni_compiler_test.cc \
+ compiler/oat_test.cc \
+ compiler/output_stream_test.cc \
compiler/utils/arm/managed_register_arm_test.cc \
compiler/utils/x86/managed_register_x86_test.cc \
runtime/barrier_test.cc \
@@ -41,7 +44,6 @@
runtime/gc/heap_test.cc \
runtime/gc/space/space_test.cc \
runtime/gtest_test.cc \
- runtime/image_test.cc \
runtime/indenter_test.cc \
runtime/indirect_reference_table_test.cc \
runtime/intern_table_test.cc \
@@ -49,8 +51,6 @@
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
runtime/mirror/object_test.cc \
- runtime/oat_test.cc \
- runtime/output_stream_test.cc \
runtime/reference_table_test.cc \
runtime/runtime_test.cc \
runtime/thread_pool_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 5caf688..0abe9ba 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -87,6 +87,7 @@
elf_stripper.cc \
elf_writer.cc \
elf_writer_quick.cc \
+ file_output_stream.cc \
image_writer.cc \
oat_writer.cc \
vector_output_stream.cc
@@ -97,6 +98,7 @@
sea_ir/ir/instruction_tools.cc \
sea_ir/ir/sea.cc \
sea_ir/code_gen/code_gen.cc \
+ sea_ir/code_gen/code_gen_data.cc \
sea_ir/types/type_inference.cc \
sea_ir/types/type_inference_visitor.cc \
sea_ir/debug/dot_gen.cc
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 4659f7b..a0a5c01 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -314,7 +314,7 @@
uint32_t method_idx,
jobject class_loader,
const art::DexFile& dex_file);
-
+#ifdef ART_SEA_IR_MODE
extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler,
const art::DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -323,7 +323,7 @@
uint32_t method_idx,
jobject class_loader,
const art::DexFile& dex_file);
-
+#endif
extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver,
uint32_t access_flags, uint32_t method_idx,
const art::DexFile& dex_file);
@@ -664,8 +664,7 @@
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- typedef DescriptorSet::iterator It; // TODO: C++0x auto
- for (It it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
+ for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
std::string descriptor(*it);
SirtRef<mirror::Class> klass(self, class_linker->FindSystemClass(descriptor.c_str()));
if (klass.get() == NULL) {
@@ -687,12 +686,9 @@
unresolved_exception_types.clear();
class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor,
&unresolved_exception_types);
- typedef std::set<std::pair<uint16_t, const DexFile*> >::const_iterator It; // TODO: C++0x auto
- for (It it = unresolved_exception_types.begin(),
- end = unresolved_exception_types.end();
- it != end; ++it) {
- uint16_t exception_type_idx = it->first;
- const DexFile* dex_file = it->second;
+ for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
+ uint16_t exception_type_idx = exception_type.first;
+ const DexFile* dex_file = exception_type.second;
mirror::DexCache* dex_cache = class_linker->FindDexCache(*dex_file);
mirror:: ClassLoader* class_loader = NULL;
SirtRef<mirror::Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
@@ -2230,16 +2226,15 @@
CHECK(compiled_method != NULL);
} else if ((access_flags & kAccAbstract) != 0) {
} else {
- bool compile = verifier::MethodVerifier::IsCandidateForCompilation(code_item, access_flags);
-#ifdef ART_SEA_IR_MODE
- bool use_sea = Runtime::Current()->IsSeaIRMode();
- use_sea = use_sea && (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
- if (use_sea) compile = true;
-#endif
+ MethodReference method_ref(&dex_file, method_idx);
+ bool compile = verifier::MethodVerifier::IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
CompilerFn compiler = compiler_;
#ifdef ART_SEA_IR_MODE
+ bool use_sea = Runtime::Current()->IsSeaIRMode();
+ use_sea = use_sea &&
+ (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
if (use_sea) {
compiler = sea_ir_compiler_;
LOG(INFO) << "Using SEA IR to compile..." << std::endl;
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index 6c090fd..359c493 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -80,7 +80,6 @@
#define DT_MIPS_RLD_MAP 0x70000016 // d_ptr
bool ElfFixup::FixupDynamic(ElfFile& elf_file, uintptr_t base_address) {
- // TODO: C++0x auto.
for (::llvm::ELF::Elf32_Word i = 0; i < elf_file.GetDynamicNum(); i++) {
::llvm::ELF::Elf32_Dyn& elf_dyn = elf_file.GetDynamic(i);
::llvm::ELF::Elf32_Word d_tag = elf_dyn.d_tag;
diff --git a/runtime/file_output_stream.cc b/compiler/file_output_stream.cc
similarity index 100%
rename from runtime/file_output_stream.cc
rename to compiler/file_output_stream.cc
diff --git a/runtime/file_output_stream.h b/compiler/file_output_stream.h
similarity index 86%
rename from runtime/file_output_stream.h
rename to compiler/file_output_stream.h
index 23a57f5..bde9e68 100644
--- a/runtime/file_output_stream.h
+++ b/compiler/file_output_stream.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_FILE_OUTPUT_STREAM_H_
-#define ART_RUNTIME_FILE_OUTPUT_STREAM_H_
+#ifndef ART_COMPILER_FILE_OUTPUT_STREAM_H_
+#define ART_COMPILER_FILE_OUTPUT_STREAM_H_
#include "output_stream.h"
@@ -34,11 +34,11 @@
virtual off_t Seek(off_t offset, Whence whence);
private:
- File* file_;
+ File* const file_;
DISALLOW_COPY_AND_ASSIGN(FileOutputStream);
};
} // namespace art
-#endif // ART_RUNTIME_FILE_OUTPUT_STREAM_H_
+#endif // ART_COMPILER_FILE_OUTPUT_STREAM_H_
diff --git a/runtime/image_test.cc b/compiler/image_test.cc
similarity index 100%
rename from runtime/image_test.cc
rename to compiler/image_test.cc
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4e9ae54..548ea9e 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -74,10 +74,7 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const std::vector<DexCache*>& all_dex_caches = class_linker->GetDexCaches();
- for (size_t i = 0; i < all_dex_caches.size(); i++) {
- DexCache* dex_cache = all_dex_caches[i];
- dex_caches_.insert(dex_cache);
- }
+ dex_caches_.insert(all_dex_caches.begin(), all_dex_caches.end());
UniquePtr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
if (oat_file.get() == NULL) {
@@ -117,11 +114,7 @@
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->CollectGarbage(false); // Remove garbage.
// Trim size of alloc spaces.
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
- // TODO: C++0x auto
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- gc::space::ContinuousSpace* space = *it;
+ for (const auto& space : heap->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
space->AsDlMallocSpace()->Trim();
}
@@ -163,13 +156,8 @@
}
bool ImageWriter::AllocMemory() {
- gc::Heap* heap = Runtime::Current()->GetHeap();
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
size_t size = 0;
- // TODO: C++0x auto
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- gc::space::ContinuousSpace* space = *it;
+ for (const auto& space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
size += space->Size();
}
@@ -203,9 +191,7 @@
String* string = obj->AsString();
const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
- typedef Set::const_iterator CacheIt; // TODO: C++0x auto
- for (CacheIt it = writer->dex_caches_.begin(), end = writer->dex_caches_.end(); it != end; ++it) {
- DexCache* dex_cache = *it;
+ for (DexCache* dex_cache : writer->dex_caches_) {
const DexFile& dex_file = *dex_cache->GetDexFile();
const DexFile::StringId* string_id = dex_file.FindStringId(utf16_string);
if (string_id != NULL) {
@@ -251,16 +237,13 @@
class_linker->VisitClasses(NonImageClassesVisitor, &context);
// Remove the undesired classes from the class roots.
- typedef std::set<std::string>::const_iterator ClassIt; // TODO: C++0x auto
- for (ClassIt it = non_image_classes.begin(), end = non_image_classes.end(); it != end; ++it) {
- class_linker->RemoveClass((*it).c_str(), NULL);
+ for (const std::string& it : non_image_classes) {
+ class_linker->RemoveClass(it.c_str(), NULL);
}
// Clear references to removed classes from the DexCaches.
ArtMethod* resolution_method = runtime->GetResolutionMethod();
- typedef Set::const_iterator CacheIt; // TODO: C++0x auto
- for (CacheIt it = dex_caches_.begin(), end = dex_caches_.end(); it != end; ++it) {
- DexCache* dex_cache = *it;
+ for (DexCache* dex_cache : dex_caches_) {
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
if (klass != NULL && !IsImageClass(klass)) {
@@ -324,9 +307,8 @@
void ImageWriter::DumpImageClasses() {
CompilerDriver::DescriptorSet* image_classes = compiler_driver_.GetImageClasses();
CHECK(image_classes != NULL);
- typedef std::set<std::string>::const_iterator It; // TODO: C++0x auto
- for (It it = image_classes->begin(), end = image_classes->end(); it != end; ++it) {
- LOG(INFO) << " " << *it;
+ for (const std::string& image_class : *image_classes) {
+ LOG(INFO) << " " << image_class;
}
}
@@ -368,9 +350,8 @@
ObjectArray<Object>* dex_caches = ObjectArray<Object>::Alloc(self, object_array_class,
dex_caches_.size());
int i = 0;
- typedef Set::const_iterator It; // TODO: C++0x auto
- for (It it = dex_caches_.begin(), end = dex_caches_.end(); it != end; ++it, ++i) {
- dex_caches->Set(i, *it);
+ for (DexCache* dex_cache : dex_caches_) {
+ dex_caches->Set(i++, dex_cache);
}
// build an Object[] of the roots needed to restore the runtime
@@ -403,7 +384,7 @@
SirtRef<ObjectArray<Object> > image_roots(self, CreateImageRoots());
gc::Heap* heap = Runtime::Current()->GetHeap();
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
+ const auto& spaces = heap->GetContinuousSpaces();
DCHECK(!spaces.empty());
DCHECK_EQ(0U, image_end_);
@@ -418,10 +399,7 @@
// TODO: Add InOrderWalk to heap bitmap.
const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
DCHECK(heap->GetLargeObjectsSpace()->GetLiveObjects()->IsEmpty());
- // TODO: C++0x auto
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- gc::space::ContinuousSpace* space = *it;
+ for (const auto& space : spaces) {
space->GetLiveBitmap()->InOrderWalk(CalculateNewObjectOffsetsCallback, this);
DCHECK_LT(image_end_, image_->Size());
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 750109d..6a126b8 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -204,8 +204,7 @@
uint32_t quick_to_interpreter_bridge_offset_;
// DexCaches seen while scanning for fixing up CodeAndDirectMethods
- typedef std::set<mirror::DexCache*> Set;
- Set dex_caches_;
+ std::set<mirror::DexCache*> dex_caches_;
};
} // namespace art
diff --git a/runtime/oat_test.cc b/compiler/oat_test.cc
similarity index 100%
rename from runtime/oat_test.cc
rename to compiler/oat_test.cc
diff --git a/runtime/output_stream.h b/compiler/output_stream.h
similarity index 90%
rename from runtime/output_stream.h
rename to compiler/output_stream.h
index e7f8006..112dcfc 100644
--- a/runtime/output_stream.h
+++ b/compiler/output_stream.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OUTPUT_STREAM_H_
-#define ART_RUNTIME_OUTPUT_STREAM_H_
+#ifndef ART_COMPILER_OUTPUT_STREAM_H_
+#define ART_COMPILER_OUTPUT_STREAM_H_
#include <stdint.h>
@@ -53,4 +53,4 @@
} // namespace art
-#endif // ART_RUNTIME_OUTPUT_STREAM_H_
+#endif // ART_COMPILER_OUTPUT_STREAM_H_
diff --git a/runtime/output_stream_test.cc b/compiler/output_stream_test.cc
similarity index 98%
rename from runtime/output_stream_test.cc
rename to compiler/output_stream_test.cc
index 8da2ac9..d5e9755 100644
--- a/runtime/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -78,4 +78,4 @@
CheckTestOutput(output);
}
-} // namespace std
+} // namespace art
diff --git a/compiler/sea_ir/code_gen/code_gen.cc b/compiler/sea_ir/code_gen/code_gen.cc
index cb150e5..8d79c41 100644
--- a/compiler/sea_ir/code_gen/code_gen.cc
+++ b/compiler/sea_ir/code_gen/code_gen.cc
@@ -15,8 +15,14 @@
*/
#include <llvm/Support/raw_ostream.h>
+
+#include "base/logging.h"
+#include "utils.h"
+
#include "sea_ir/ir/sea.h"
#include "sea_ir/code_gen/code_gen.h"
+#include "sea_ir/types/type_inference.h"
+#include "sea_ir/types/types.h"
namespace sea_ir {
@@ -50,34 +56,36 @@
}
}
-void CodeGenPostpassVisitor::Visit(SeaGraph* graph) {
- std::vector<SignatureNode*>* parameters = graph->GetParameterNodes();
- std::cout << "=== SeaGraph ===" << parameters->size() << std::endl;
-}
-void CodeGenVisitor::Visit(SeaGraph* graph) {
- std::vector<SignatureNode*>* parameters = graph->GetParameterNodes();
- std::cout << "=== SeaGraph ===" << parameters->size() << std::endl;
-}
+void CodeGenPostpassVisitor::Visit(SeaGraph* graph) { }
+void CodeGenVisitor::Visit(SeaGraph* graph) { }
void CodeGenPrepassVisitor::Visit(SeaGraph* graph) {
std::vector<SignatureNode*>* parameters = graph->GetParameterNodes();
- std::cout << "=== SeaGraph ===" << parameters->size() << std::endl;
- // TODO: Extract correct types from dex for params and return value.
+ // TODO: It may be better to extract correct types from dex
+ // instead than from type inference.
DCHECK(parameters != NULL);
- std::vector<llvm::Type*> parameter_types(parameters->size(),
- llvm::Type::getInt32Ty(*llvm_data_->context_));
- // Build llvm function name.
- std::string function_name = art::StringPrintf(
- "class=%d_method=%d", graph->class_def_idx_, graph->method_idx_);
+ std::vector<llvm::Type*> parameter_types;
+ for (std::vector<SignatureNode*>::const_iterator param_iterator = parameters->begin();
+ param_iterator!= parameters->end(); param_iterator++) {
+ const Type* param_type = graph->ti_->type_data_.FindTypeOf((*param_iterator)->Id());
+ DCHECK(param_type->Equals(graph->ti_->type_cache_->Integer()))
+ << "Code generation for types other than integer not implemented.";
+ parameter_types.push_back(llvm::Type::getInt32Ty(*llvm_data_->context_));
+ }
- // Build llvm function type and parameters.
+ // TODO: Get correct function return type.
+ const Type* return_type = graph->ti_->type_data_.FindTypeOf(-1);
+ DCHECK(return_type->Equals(graph->ti_->type_cache_->Integer()))
+ << "Code generation for types other than integer not implemented.";
llvm::FunctionType *function_type = llvm::FunctionType::get(
llvm::Type::getInt32Ty(*llvm_data_->context_),
parameter_types, false);
+
llvm_data_->function_ = llvm::Function::Create(function_type,
- llvm::Function::ExternalLinkage, function_name, &llvm_data_->module_);
+ llvm::Function::ExternalLinkage, function_name_, &llvm_data_->module_);
unsigned param_id = 0;
for (llvm::Function::arg_iterator arg_it = llvm_data_->function_->arg_begin();
param_id != llvm_data_->function_->arg_size(); ++arg_it, ++param_id) {
+ // TODO: The "+1" is because of the Method parameter on position 0.
DCHECK(parameters->size() > param_id) << "Insufficient parameters for function signature";
// Build parameter register name for LLVM IR clarity.
std::string arg_name = art::StringPrintf("r%d", parameters->at(param_id)->GetResultRegister());
@@ -97,15 +105,12 @@
}
void CodeGenPrepassVisitor::Visit(Region* region) {
- std::cout << " == Region " << region->StringId() << " ==" << std::endl;
llvm_data_->builder_.SetInsertPoint(llvm_data_->GetBlock(region));
}
void CodeGenPostpassVisitor::Visit(Region* region) {
- std::cout << " == Region " << region->StringId() << " ==" << std::endl;
llvm_data_->builder_.SetInsertPoint(llvm_data_->GetBlock(region));
}
void CodeGenVisitor::Visit(Region* region) {
- std::cout << " == Region " << region->StringId() << " ==" << std::endl;
llvm_data_->builder_.SetInsertPoint(llvm_data_->GetBlock(region));
}
@@ -189,13 +194,17 @@
void CodeGenVisitor::Visit(InvokeStaticInstructionNode* invoke) {
std::string instr = invoke->GetInstruction()->DumpString(NULL);
std::cout << "6.Instruction: " << instr << std::endl;
- // TODO: Build callee llvm function name.
- std::string function_name = art::StringPrintf("class=%d_method=%d", 0, 1);
+ // TODO: Build callee LLVM function name.
+ std::string symbol = "dex_";
+ symbol += art::MangleForJni(PrettyMethod(invoke->GetCalledMethodIndex(), dex_file_));
+ std::string function_name = "dex_int_00020Main_fibonacci_00028int_00029";
llvm::Function *callee = llvm_data_->module_.getFunction(function_name);
// TODO: Add proper checking of the matching between formal and actual signature.
DCHECK(NULL != callee);
std::vector<llvm::Value*> parameter_values;
std::vector<InstructionNode*> parameter_sources = invoke->GetSSAProducers();
+ // TODO: Replace first parameter with Method argument instead of 0.
+ parameter_values.push_back(llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt(32, 0)));
for (std::vector<InstructionNode*>::const_iterator cit = parameter_sources.begin();
cit != parameter_sources.end(); ++cit) {
llvm::Value* parameter_value = llvm_data_->GetValue((*cit));
@@ -245,7 +254,7 @@
}
void CodeGenPostpassVisitor::Visit(PhiInstructionNode* phi) {
- std::cout << "Phi node for: " << phi->GetRegisterNumber() << std::endl;
+ std::cout << "10. Instruction: Phi(" << phi->GetRegisterNumber() << ")" << std::endl;
Region* r = phi->GetRegion();
const std::vector<Region*>* predecessors = r->GetPredecessors();
DCHECK(NULL != predecessors);
@@ -267,17 +276,14 @@
}
void CodeGenVisitor::Visit(SignatureNode* signature) {
- std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl;
DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
"Signature nodes must correspond to a single parameter register.";
}
void CodeGenPrepassVisitor::Visit(SignatureNode* signature) {
- std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl;
DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
"Signature nodes must correspond to a single parameter register.";
}
void CodeGenPostpassVisitor::Visit(SignatureNode* signature) {
- std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl;
DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
"Signature nodes must correspond to a single parameter register.";
}
diff --git a/compiler/sea_ir/code_gen/code_gen.h b/compiler/sea_ir/code_gen/code_gen.h
index b1bc4dc..544e9f0 100644
--- a/compiler/sea_ir/code_gen/code_gen.h
+++ b/compiler/sea_ir/code_gen/code_gen.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
#define ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
+#include "instruction_set.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
@@ -66,6 +67,9 @@
void AddValue(InstructionNode* instruction, llvm::Value* value) {
AddValue(instruction->Id(), value);
}
+ // Generates and returns in @elf the executable code corresponding to the llvm module
+ //
+ std::string GetElf(art::InstructionSet instruction_set);
llvm::LLVMContext* const context_;
llvm::Module module_;
@@ -97,6 +101,8 @@
class CodeGenPrepassVisitor: public CodeGenPassVisitor {
public:
+ explicit CodeGenPrepassVisitor(const std::string& function_name):
+ function_name_(function_name) { }
void Visit(SeaGraph* graph);
void Visit(SignatureNode* region);
void Visit(Region* region);
@@ -113,6 +119,9 @@
void Visit(GotoInstructionNode* instruction) { }
void Visit(IfEqzInstructionNode* instruction) { }
void Visit(PhiInstructionNode* region);
+
+ private:
+ std::string function_name_;
};
class CodeGenPostpassVisitor: public CodeGenPassVisitor {
@@ -137,7 +146,8 @@
class CodeGenVisitor: public CodeGenPassVisitor {
public:
- explicit CodeGenVisitor(CodeGenData* code_gen_data): CodeGenPassVisitor(code_gen_data) { }
+ explicit CodeGenVisitor(CodeGenData* code_gen_data,
+ const art::DexFile& dex_file): CodeGenPassVisitor(code_gen_data), dex_file_(dex_file) { }
void Visit(SeaGraph* graph);
void Visit(SignatureNode* region);
void Visit(Region* region);
@@ -152,6 +162,10 @@
void Visit(GotoInstructionNode* instruction);
void Visit(IfEqzInstructionNode* instruction);
void Visit(PhiInstructionNode* region) { }
+
+ private:
+ std::string function_name_;
+ const art::DexFile& dex_file_;
};
} // namespace sea_ir
#endif // ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
diff --git a/compiler/sea_ir/code_gen/code_gen_data.cc b/compiler/sea_ir/code_gen/code_gen_data.cc
new file mode 100644
index 0000000..17f64db
--- /dev/null
+++ b/compiler/sea_ir/code_gen/code_gen_data.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <llvm/PassManager.h>
+#include <llvm/Support/TargetRegistry.h>
+#include <llvm/Support/FormattedStream.h>
+#include <llvm/Target/TargetMachine.h>
+#include <llvm/Transforms/IPO.h>
+#include <llvm/Transforms/IPO/PassManagerBuilder.h>
+
+#include "base/logging.h"
+#include "driver/compiler_driver.h"
+#include "sea_ir/ir/sea.h"
+#include "sea_ir/code_gen/code_gen.h"
+
+
+namespace sea_ir {
+std::string CodeGenData::GetElf(art::InstructionSet instruction_set) {
+ std::string elf;
+ ::llvm::raw_string_ostream out_stream(elf);
+ // Lookup the LLVM target
+ std::string target_triple;
+ std::string target_cpu;
+ std::string target_attr;
+ art::CompilerDriver::InstructionSetToLLVMTarget(instruction_set,
+ target_triple, target_cpu, target_attr);
+
+ std::string errmsg;
+ const ::llvm::Target* target =
+ ::llvm::TargetRegistry::lookupTarget(target_triple, errmsg);
+
+ CHECK(target != NULL) << errmsg;
+
+ // Target options
+ ::llvm::TargetOptions target_options;
+ target_options.FloatABIType = ::llvm::FloatABI::Soft;
+ target_options.NoFramePointerElim = true;
+ target_options.NoFramePointerElimNonLeaf = true;
+ target_options.UseSoftFloat = false;
+ target_options.EnableFastISel = false;
+
+ // Create the ::llvm::TargetMachine
+ ::llvm::OwningPtr< ::llvm::TargetMachine> target_machine(
+ target->createTargetMachine(target_triple, target_cpu, target_attr, target_options,
+ ::llvm::Reloc::Static, ::llvm::CodeModel::Small,
+ ::llvm::CodeGenOpt::Aggressive));
+
+ CHECK(target_machine.get() != NULL) << "Failed to create target machine";
+
+ // Add target data
+ const ::llvm::DataLayout* data_layout = target_machine->getDataLayout();
+
+ // PassManager for code generation passes
+ ::llvm::PassManager pm;
+ pm.add(new ::llvm::DataLayout(*data_layout));
+
+ // FunctionPassManager for optimization pass
+ ::llvm::FunctionPassManager fpm(&module_);
+ fpm.add(new ::llvm::DataLayout(*data_layout));
+
+ // Add optimization pass
+ ::llvm::PassManagerBuilder pm_builder;
+ // TODO: Use inliner after we can do IPO.
+ pm_builder.Inliner = NULL;
+ // pm_builder.Inliner = ::llvm::createFunctionInliningPass();
+ // pm_builder.Inliner = ::llvm::createAlwaysInlinerPass();
+ // pm_builder.Inliner = ::llvm::createPartialInliningPass();
+ pm_builder.OptLevel = 3;
+ pm_builder.DisableSimplifyLibCalls = 1;
+ pm_builder.DisableUnitAtATime = 1;
+ pm_builder.populateFunctionPassManager(fpm);
+ pm_builder.populateModulePassManager(pm);
+ pm.add(::llvm::createStripDeadPrototypesPass());
+ // Add passes to emit ELF image
+ {
+ ::llvm::formatted_raw_ostream formatted_os(out_stream, false);
+ // Ask the target to add backend passes as necessary.
+ if (target_machine->addPassesToEmitFile(pm,
+ formatted_os,
+ ::llvm::TargetMachine::CGFT_ObjectFile,
+ true)) {
+ LOG(FATAL) << "Unable to generate ELF for this target";
+ }
+
+ // Run the code generation passes
+ pm.run(module_);
+ }
+ return elf;
+}
+} // namespace sea_ir
diff --git a/compiler/sea_ir/debug/dot_gen.h b/compiler/sea_ir/debug/dot_gen.h
index 05d97fa..f2ab8c4 100644
--- a/compiler/sea_ir/debug/dot_gen.h
+++ b/compiler/sea_ir/debug/dot_gen.h
@@ -101,10 +101,10 @@
// Saves to @filename the .dot representation of @graph with the options @options.
void DumpSea(SeaGraph* graph, std::string filename,
art::SafeMap<int, const Type*>* types) const {
- LOG(INFO) << "Starting to write SEA string to file.";
+ LOG(INFO) << "Starting to write SEA string to file " << filename << std::endl;
DotGenerationVisitor dgv = DotGenerationVisitor(&options_, types);
graph->Accept(&dgv);
- art::File* file = art::OS::OpenFileReadWrite(filename.c_str());
+ art::File* file = art::OS::CreateEmptyFile(filename.c_str());
art::FileOutputStream fos(file);
std::string graph_as_string = dgv.GetResult();
graph_as_string += "}";
diff --git a/compiler/sea_ir/frontend.cc b/compiler/sea_ir/frontend.cc
index 421c3a4..6efc103 100644
--- a/compiler/sea_ir/frontend.cc
+++ b/compiler/sea_ir/frontend.cc
@@ -35,7 +35,6 @@
#include "sea_ir/types/types.h"
#include "sea_ir/code_gen/code_gen.h"
-
namespace art {
static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler,
@@ -48,34 +47,22 @@
, llvm::LlvmCompilationUnit* llvm_compilation_unit
#endif
) {
- // NOTE: Instead of keeping the convention from the Dalvik frontend.cc
- // and silencing the cpplint.py warning, I just corrected the formatting.
- VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
+ LOG(INFO) << "Compiling " << PrettyMethod(method_idx, dex_file) << ".";
sea_ir::SeaGraph* ir_graph = sea_ir::SeaGraph::GetGraph(dex_file);
- sea_ir::CodeGenData* llvm_data =
- ir_graph->CompileMethod(code_item, class_def_idx, method_idx, method_access_flags, dex_file);
+ std::string symbol = "dex_" + MangleForJni(PrettyMethod(method_idx, dex_file));
+ sea_ir::CodeGenData* llvm_data = ir_graph->CompileMethod(symbol,
+ code_item, class_def_idx, method_idx, method_access_flags, dex_file);
sea_ir::DotConversion dc;
SafeMap<int, const sea_ir::Type*>* types = ir_graph->ti_->GetTypeMap();
dc.DumpSea(ir_graph, "/tmp/temp.dot", types);
- CHECK(0 && "No SEA compiled function exists yet.");
-
MethodReference mref(&dex_file, method_idx);
-
- // TODO: Passing the LLVM code as string is ugly and inefficient,
- // but it is the way portable did it. I kept it for compatibility,
- // but actually it should not happen.
- std::string llvm_code;
- ::llvm::raw_string_ostream str_os(llvm_code);
- ::llvm::WriteBitcodeToFile(&llvm_data->module_, str_os);
-
- std::string symbol = "dex_";
- symbol += MangleForJni(PrettyMethod(method_idx, dex_file));
-
+ std::string llvm_code = llvm_data->GetElf(compiler.GetInstructionSet());
CompiledMethod* compiled_method = new CompiledMethod(
compiler.GetInstructionSet(),
llvm_code,
*verifier::MethodVerifier::GetDexGcMap(mref),
symbol);
+ LOG(INFO) << "Compiled SEA IR method " << PrettyMethod(method_idx, dex_file) << ".";
return compiled_method;
}
diff --git a/compiler/sea_ir/ir/instruction_nodes.h b/compiler/sea_ir/ir/instruction_nodes.h
index 906a10f..63e89e7 100644
--- a/compiler/sea_ir/ir/instruction_nodes.h
+++ b/compiler/sea_ir/ir/instruction_nodes.h
@@ -56,6 +56,8 @@
// essentially creating SSA form.
void RenameToSSA(int reg_no, InstructionNode* definition) {
definition_edges_.insert(std::pair<int, InstructionNode*>(reg_no, definition));
+ DCHECK(NULL != definition) << "SSA definition for register " << reg_no
+ << " used in instruction " << Id() << " not found.";
definition->AddSSAUse(this);
}
// Returns the ordered set of Instructions that define the input operands of this instruction.
@@ -179,14 +181,22 @@
class InvokeStaticInstructionNode: public InstructionNode {
public:
- explicit InvokeStaticInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
+ explicit InvokeStaticInstructionNode(const art::Instruction* inst): InstructionNode(inst),
+ method_index_(inst->VRegB_35c()) { }
int GetResultRegister() const {
return RETURN_REGISTER;
}
+
+ int GetCalledMethodIndex() const {
+ return method_index_;
+ }
void Accept(IRVisitor* v) {
v->Visit(this);
v->Traverse(this);
}
+
+ private:
+ const uint32_t method_index_;
};
class AddIntInstructionNode: public InstructionNode {
diff --git a/compiler/sea_ir/ir/regions_test.cc b/compiler/sea_ir/ir/regions_test.cc
index 9813465..8ca51e4 100644
--- a/compiler/sea_ir/ir/regions_test.cc
+++ b/compiler/sea_ir/ir/regions_test.cc
@@ -56,4 +56,4 @@
EXPECT_EQ(root, preds->at(0));
}
-} // namespace art
+} // namespace sea_ir
diff --git a/compiler/sea_ir/ir/sea.cc b/compiler/sea_ir/ir/sea.cc
index 902839d..5ccaba6 100644
--- a/compiler/sea_ir/ir/sea.cc
+++ b/compiler/sea_ir/ir/sea.cc
@@ -174,6 +174,21 @@
DCHECK(!changed) << "Reaching definitions computation did not reach a fixed point.";
}
+void SeaGraph::InsertSignatureNodes(const art::DexFile::CodeItem* code_item, Region* r) {
+ // Insert a fake SignatureNode for the first parameter.
+ // TODO: Provide a register enum value for the fake parameter.
+ SignatureNode* parameter_def_node = new sea_ir::SignatureNode(0, 0);
+ AddParameterNode(parameter_def_node);
+ r->AddChild(parameter_def_node);
+ // Insert SignatureNodes for each Dalvik register parameter.
+ for (unsigned int crt_offset = 0; crt_offset < code_item->ins_size_; crt_offset++) {
+ int register_no = code_item->registers_size_ - crt_offset - 1;
+ int position = crt_offset + 1;
+ SignatureNode* parameter_def_node = new sea_ir::SignatureNode(register_no, position);
+ AddParameterNode(parameter_def_node);
+ r->AddChild(parameter_def_node);
+ }
+}
void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
const art::DexFile& dex_file, uint32_t class_def_idx,
@@ -209,15 +224,8 @@
Region* r = GetNewRegion();
- // Insert one SignatureNode per function argument,
- // to serve as placeholder definitions in dataflow analysis.
- for (unsigned int crt_offset = 0; crt_offset < code_item->ins_size_; crt_offset++) {
- int position = crt_offset; // TODO: Is this the correct offset in the signature?
- SignatureNode* parameter_def_node =
- new sea_ir::SignatureNode(code_item->registers_size_ - 1 - crt_offset, position);
- AddParameterNode(parameter_def_node);
- r->AddChild(parameter_def_node);
- }
+
+ InsertSignatureNodes(code_item, r);
// Pass: Assign instructions to region nodes and
// assign branches their control flow successors.
i = 0;
@@ -386,23 +394,21 @@
scoped_table->CloseScope();
}
-CodeGenData* SeaGraph::GenerateLLVM() {
+CodeGenData* SeaGraph::GenerateLLVM(const std::string& function_name,
+ const art::DexFile& dex_file) {
// Pass: Generate LLVM IR.
- CodeGenPrepassVisitor code_gen_prepass_visitor;
+ CodeGenPrepassVisitor code_gen_prepass_visitor(function_name);
std::cout << "Generating code..." << std::endl;
- std::cout << "=== PRE VISITING ===" << std::endl;
Accept(&code_gen_prepass_visitor);
- CodeGenVisitor code_gen_visitor(code_gen_prepass_visitor.GetData());
- std::cout << "=== VISITING ===" << std::endl;
+ CodeGenVisitor code_gen_visitor(code_gen_prepass_visitor.GetData(), dex_file);
Accept(&code_gen_visitor);
- std::cout << "=== POST VISITING ===" << std::endl;
CodeGenPostpassVisitor code_gen_postpass_visitor(code_gen_visitor.GetData());
Accept(&code_gen_postpass_visitor);
- code_gen_postpass_visitor.Write(std::string("my_file.llvm"));
return code_gen_postpass_visitor.GetData();
}
CodeGenData* SeaGraph::CompileMethod(
+ const std::string& function_name,
const art::DexFile::CodeItem* code_item, uint32_t class_def_idx,
uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file) {
// Two passes: Builds the intermediate structure (non-SSA) of the sea-ir for the function.
@@ -422,7 +428,7 @@
// Pass: type inference
ti_->ComputeTypes(this);
// Pass: Generate LLVM IR.
- CodeGenData* cgd = GenerateLLVM();
+ CodeGenData* cgd = GenerateLLVM(function_name, dex_file);
return cgd;
}
@@ -607,7 +613,7 @@
sea_instructions.push_back(new IfNeInstructionNode(in));
break;
case art::Instruction::ADD_INT_LIT8:
- sea_instructions.push_back(new UnnamedConstInstructionNode(in, in->VRegB_22b()));
+ sea_instructions.push_back(new UnnamedConstInstructionNode(in, in->VRegC_22b()));
sea_instructions.push_back(new AddIntLitInstructionNode(in));
break;
case art::Instruction::MOVE_RESULT:
diff --git a/compiler/sea_ir/ir/sea.h b/compiler/sea_ir/ir/sea.h
index df420ed..92c2043 100644
--- a/compiler/sea_ir/ir/sea.h
+++ b/compiler/sea_ir/ir/sea.h
@@ -50,12 +50,12 @@
class SignatureNode: public InstructionNode {
public:
// Creates a new signature node representing the initial definition of the
- // register @parameter_register which is the @position-th argument to the method.
- explicit SignatureNode(unsigned int parameter_register, unsigned int position):
- InstructionNode(NULL), parameter_register_(parameter_register), position_(position) { }
+ // register @register_no which is the @signature_position-th argument to the method.
+ explicit SignatureNode(unsigned int register_no, unsigned int signature_position):
+ InstructionNode(NULL), register_no_(register_no), position_(signature_position) { }
int GetResultRegister() const {
- return parameter_register_;
+ return register_no_;
}
unsigned int GetPositionInSignature() const {
@@ -72,7 +72,7 @@
}
private:
- const unsigned int parameter_register_;
+ const unsigned int register_no_;
const unsigned int position_; // The position of this parameter node is
// in the function parameter list.
};
@@ -261,7 +261,8 @@
public:
static SeaGraph* GetGraph(const art::DexFile&);
- CodeGenData* CompileMethod(const art::DexFile::CodeItem* code_item, uint32_t class_def_idx,
+ CodeGenData* CompileMethod(const std::string& function_name,
+ const art::DexFile::CodeItem* code_item, uint32_t class_def_idx,
uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file);
// Returns all regions corresponding to this SeaGraph.
std::vector<Region*>* GetRegions() {
@@ -338,7 +339,9 @@
void RenameAsSSA(Region* node, utils::ScopedHashtable<int, InstructionNode*>* scoped_table);
// Generate LLVM IR for the method.
// Precondition: ConvertToSSA().
- CodeGenData* GenerateLLVM();
+ CodeGenData* GenerateLLVM(const std::string& function_name, const art::DexFile& dex_file);
+ // Inserts one SignatureNode for each argument of the function in
+ void InsertSignatureNodes(const art::DexFile::CodeItem* code_item, Region* r);
static SeaGraph graph_;
std::vector<Region*> regions_;
diff --git a/compiler/sea_ir/types/type_data_test.cc b/compiler/sea_ir/types/type_data_test.cc
index a66ebce..f7a5362 100644
--- a/compiler/sea_ir/types/type_data_test.cc
+++ b/compiler/sea_ir/types/type_data_test.cc
@@ -38,4 +38,4 @@
EXPECT_TRUE(byte_type == td.FindTypeOf(second_instruction_id));
}
-} // namespace art
+} // namespace sea_ir
diff --git a/compiler/sea_ir/types/type_inference.cc b/compiler/sea_ir/types/type_inference.cc
index 31d7f0f..1731987 100644
--- a/compiler/sea_ir/types/type_inference.cc
+++ b/compiler/sea_ir/types/type_inference.cc
@@ -68,6 +68,9 @@
std::vector<const Type*> FunctionTypeInfo::GetDeclaredArgumentTypes() {
art::ScopedObjectAccess soa(art::Thread::Current());
std::vector<const Type*> argument_types;
+ // TODO: The additional (fake) Method parameter is added on the first position,
+ // but is represented as integer because we don't support pointers yet.
+ argument_types.push_back(&(type_cache_->Integer()));
// Include the "this" pointer.
size_t cur_arg = 0;
if (!IsStatic()) {
@@ -82,7 +85,7 @@
}
cur_arg++;
}
-
+ // Include the types of the parameters in the Java method signature.
const art::DexFile::ProtoId& proto_id =
dex_file_->GetMethodPrototype(dex_file_->GetMethodId(dex_method_idx_));
art::DexFileParameterIterator iterator(*dex_file_, proto_id);
@@ -149,6 +152,13 @@
std::copy(instructions->begin(), instructions->end(), std::back_inserter(worklist));
}
TypeInferenceVisitor tiv(graph, &type_data_, type_cache_);
+ // Record return type of the function.
+ graph->Accept(&tiv);
+ const Type* new_type = tiv.GetType();
+ type_data_.SetTypeOf(-1, new_type); // TODO: Record this info in a way that
+ // does not need magic constants.
+ // Make SeaGraph a SeaNode?
+
// Sparse (SSA) fixed-point algorithm that processes each instruction in the work-list,
// adding consumers of instructions whose result changed type back into the work-list.
// Note: According to [1] list iterators should not be invalidated on insertion,
@@ -159,14 +169,11 @@
// TODO: Remove elements as I go.
for (std::list<InstructionNode*>::const_iterator instruction_it = worklist.begin();
instruction_it != worklist.end(); instruction_it++) {
- std::cout << "[TI] Instruction: " << (*instruction_it)->Id() << std::endl;
(*instruction_it)->Accept(&tiv);
const Type* old_type = type_data_.FindTypeOf((*instruction_it)->Id());
const Type* new_type = tiv.GetType();
bool type_changed = (old_type != new_type);
if (type_changed) {
- std::cout << " New type:" << new_type->IsIntegralTypes() << std::endl;
- std::cout << " Descrip:" << new_type->Dump()<< " on " << (*instruction_it)->Id() << std::endl;
type_data_.SetTypeOf((*instruction_it)->Id(), new_type);
// Add SSA consumers of the current instruction to the work-list.
std::vector<InstructionNode*>* consumers = (*instruction_it)->GetSSAConsumers();
diff --git a/compiler/sea_ir/types/type_inference.h b/compiler/sea_ir/types/type_inference.h
index d951d82..7a178b2 100644
--- a/compiler/sea_ir/types/type_inference.h
+++ b/compiler/sea_ir/types/type_inference.h
@@ -31,8 +31,7 @@
// precise verification (which is the job of the verifier).
class TypeInference {
public:
- TypeInference() {
- type_cache_ = new art::verifier::RegTypeCache(false);
+ TypeInference() : type_cache_(new art::verifier::RegTypeCache(false)) {
}
// Computes the types for the method with SEA IR representation provided by @graph.
@@ -43,10 +42,8 @@
}
// Returns true if @descriptor corresponds to a primitive type.
static bool IsPrimitiveDescriptor(char descriptor);
-
- protected:
- art::verifier::RegTypeCache* type_cache_;
- TypeData type_data_;
+ TypeData type_data_; // TODO: Make private, add accessor and not publish a SafeMap above.
+ art::verifier::RegTypeCache* const type_cache_; // TODO: Make private.
};
// Stores information about the exact type of a function.
diff --git a/compiler/sea_ir/types/type_inference_visitor.cc b/compiler/sea_ir/types/type_inference_visitor.cc
index 3da2fc1..27bb5d8 100644
--- a/compiler/sea_ir/types/type_inference_visitor.cc
+++ b/compiler/sea_ir/types/type_inference_visitor.cc
@@ -21,6 +21,12 @@
namespace sea_ir {
+void TypeInferenceVisitor::Visit(SeaGraph* graph) {
+ FunctionTypeInfo fti(graph_, type_cache_);
+ const Type* return_type = fti.GetReturnValueType();
+ crt_type_.push_back(return_type);
+}
+
void TypeInferenceVisitor::Visit(SignatureNode* parameter) {
FunctionTypeInfo fti(graph_, type_cache_);
std::vector<const Type*> arguments = fti.GetDeclaredArgumentTypes();
@@ -78,9 +84,9 @@
const Type* TypeInferenceVisitor::MergeTypes(std::vector<const Type*>& types) const {
const Type* type = NULL;
- if (types.size()>0) {
+ if (types.size() > 0) {
type = *(types.begin());
- if (types.size()>1) {
+ if (types.size() > 1) {
for (std::vector<const Type*>::const_iterator cit = types.begin();
cit != types.end(); cit++) {
if (!type->Equals(**cit)) {
diff --git a/compiler/sea_ir/types/type_inference_visitor.h b/compiler/sea_ir/types/type_inference_visitor.h
index 200b9f0..d715151 100644
--- a/compiler/sea_ir/types/type_inference_visitor.h
+++ b/compiler/sea_ir/types/type_inference_visitor.h
@@ -40,7 +40,7 @@
}
// There are no type related actions to be performed on these classes.
void Initialize(SeaGraph* graph) { }
- void Visit(SeaGraph* graph) { }
+ void Visit(SeaGraph* graph);
void Visit(Region* region) { }
void Visit(PhiInstructionNode* instruction);
@@ -61,7 +61,7 @@
std::vector<const Type*> GetOperandTypes(InstructionNode* instruction) const;
const Type* GetType() {
// TODO: Currently multiple defined types are not supported.
- if (crt_type_.size()>0) {
+ if (!crt_type_.empty()) {
const Type* single_type = crt_type_.at(0);
crt_type_.clear();
return single_type;
diff --git a/compiler/sea_ir/types/type_inference_visitor_test.cc b/compiler/sea_ir/types/type_inference_visitor_test.cc
index 8a249eb..77acb3d 100644
--- a/compiler/sea_ir/types/type_inference_visitor_test.cc
+++ b/compiler/sea_ir/types/type_inference_visitor_test.cc
@@ -130,4 +130,4 @@
}
-} // namespace art
+} // namespace sea_ir
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 9d79002..c9be4ed 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -137,6 +137,7 @@
// Next in linked list of slow paths
SlowPath *next_;
+ private:
friend class AssemblerBuffer;
DISALLOW_COPY_AND_ASSIGN(SlowPath);
};
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index fbfdfd9..8bc7877 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -736,13 +736,10 @@
oat_dumper_.reset(new OatDumper(host_prefix_, *oat_file));
- std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file->GetOatDexFiles();
- for (size_t i = 0; i < oat_dex_files.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
+ for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
CHECK(oat_dex_file != NULL);
- std::pair<std::string, size_t> entry(oat_dex_file->GetDexFileLocation(),
- oat_dex_file->FileSize());
- stats_.oat_dex_file_sizes.push_back(entry);
+ stats_.oat_dex_file_sizes.push_back(std::make_pair(oat_dex_file->GetDexFileLocation(),
+ oat_dex_file->FileSize()));
}
os << "OBJECTS:\n" << std::flush;
@@ -761,10 +758,7 @@
std::ostream indent_os(&indent_filter);
os_ = &indent_os;
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- // TODO: C++0x auto
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- gc::space::Space* space = *it;
+ for (const auto& space : spaces) {
if (space->IsImageSpace()) {
gc::space::ImageSpace* image_space = space->AsImageSpace();
image_space->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
@@ -1263,16 +1257,16 @@
os << "object_bytes breakdown:\n";
size_t object_bytes_total = 0;
- typedef SizeAndCountTable::const_iterator It; // TODO: C++0x auto
- for (It it = sizes_and_counts.begin(), end = sizes_and_counts.end(); it != end; ++it) {
- const std::string& descriptor(it->first);
- double average = static_cast<double>(it->second.bytes) / static_cast<double>(it->second.count);
- double percent = PercentOfObjectBytes(it->second.bytes);
+ for (const auto& sizes_and_count : sizes_and_counts) {
+ const std::string& descriptor(sizes_and_count.first);
+ double average = static_cast<double>(sizes_and_count.second.bytes) /
+ static_cast<double>(sizes_and_count.second.count);
+ double percent = PercentOfObjectBytes(sizes_and_count.second.bytes);
os << StringPrintf("%32s %8zd bytes %6zd instances "
"(%4.0f bytes/instance) %2.0f%% of object_bytes\n",
- descriptor.c_str(), it->second.bytes, it->second.count,
- average, percent);
- object_bytes_total += it->second.bytes;
+ descriptor.c_str(), sizes_and_count.second.bytes,
+ sizes_and_count.second.count, average, percent);
+ object_bytes_total += sizes_and_count.second.bytes;
}
os << "\n" << std::flush;
CHECK_EQ(object_bytes, object_bytes_total);
@@ -1292,11 +1286,10 @@
large_initializer_code_bytes, PercentOfOatBytes(large_initializer_code_bytes),
large_method_code_bytes, PercentOfOatBytes(large_method_code_bytes))
<< "DexFile sizes:\n";
- typedef std::vector<std::pair<std::string, size_t> >::const_iterator It2;
- for (It2 it = oat_dex_file_sizes.begin(); it != oat_dex_file_sizes.end();
- ++it) {
+ for (const std::pair<std::string, size_t>& oat_dex_file_size : oat_dex_file_sizes) {
os << StringPrintf("%s = %zd (%2.0f%% of oat file bytes)\n",
- it->first.c_str(), it->second, PercentOfOatBytes(it->second));
+ oat_dex_file_size.first.c_str(), oat_dex_file_size.second,
+ PercentOfOatBytes(oat_dex_file_size.second));
}
os << "\n" << StringPrintf("gc_map_bytes = %7zd (%2.0f%% of oat file bytes)\n"
diff --git a/runtime/Android.mk b/runtime/Android.mk
index b34abe4..1ce7a30 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -44,7 +44,6 @@
disassembler_mips.cc \
disassembler_x86.cc \
elf_file.cc \
- file_output_stream.cc \
gc/allocator/dlmalloc.cc \
gc/accounting/card_table.cc \
gc/accounting/gc_allocator.cc \
diff --git a/runtime/base/bounded_fifo.h b/runtime/base/bounded_fifo.h
new file mode 100644
index 0000000..cb92d40
--- /dev/null
+++ b/runtime/base/bounded_fifo.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BOUNDED_FIFO_H_
+#define ART_RUNTIME_BASE_BOUNDED_FIFO_H_
+
+#include "cutils/atomic.h"
+#include "cutils/atomic-inline.h"
+
+namespace art {
+
+// A bounded fifo is a fifo which has a bounded size. The power of two version uses a bit mask to
+// avoid needing to deal with wrapping integers around or using a modulo operation.
+template <typename T, const size_t MaxSize>
+class BoundedFifoPowerOfTwo {
+ public:
+ BoundedFifoPowerOfTwo() {
+ // TODO: Do this with a compile time check.
+ CHECK(IsPowerOfTwo(MaxSize));
+ clear();
+ }
+
+ void clear() {
+ back_index_ = 0;
+ size_ = 0;
+ }
+
+ bool empty() const {
+ return size() == 0;
+ }
+
+ size_t size() const {
+ return size_;
+ }
+
+ void push_back(const T& value) {
+ ++size_;
+ DCHECK_LE(size_, MaxSize);
+ // Relies on integer overflow behaviour.
+ data_[back_index_++ & mask_] = value;
+ }
+
+ const T& front() const {
+ DCHECK_GT(size_, 0U);
+ return data_[(back_index_ - size_) & mask_];
+ }
+
+ void pop_front() {
+ DCHECK_GT(size_, 0U);
+ --size_;
+ }
+
+ private:
+ static const size_t mask_ = MaxSize - 1;
+ size_t back_index_, size_;
+ T data_[MaxSize];
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BOUNDED_FIFO_H_
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index f508af9..2a02cf4 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -112,6 +112,6 @@
DISALLOW_COPY_AND_ASSIGN(Histogram);
};
-}
+} // namespace art
#endif // ART_RUNTIME_BASE_HISTOGRAM_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 039e7bc..d0b8bbd 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1111,16 +1111,15 @@
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
- for (size_t i = 0; i < dex_caches_.size(); i++) {
- visitor(dex_caches_[i], arg);
+ for (mirror::DexCache* dex_cache : dex_caches_) {
+ visitor(dex_cache, arg);
}
}
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
- visitor(it->second, arg);
+ for (const std::pair<size_t, mirror::Class*>& it : classes_) {
+ visitor(it.second, arg);
}
// We deliberately ignore the class roots in the image since we
@@ -1135,14 +1134,13 @@
void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const {
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
- if (!visitor(it->second, arg)) {
+ for (const std::pair<size_t, mirror::Class*>& it : classes_) {
+ if (!visitor(it.second, arg)) {
return;
}
}
- for (It it = image_classes_.begin(), end = image_classes_.end(); it != end; ++it) {
- if (!visitor(it->second, arg)) {
+ for (const std::pair<size_t, mirror::Class*>& it : image_classes_) {
+ if (!visitor(it.second, arg)) {
return;
}
}
@@ -1157,9 +1155,8 @@
void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const {
std::set<mirror::Class*> classes;
VisitClasses(GetClassesVisitor, &classes);
- typedef std::set<mirror::Class*>::const_iterator It; // TODO: C++0x auto
- for (It it = classes.begin(), end = classes.end(); it != end; ++it) {
- if (!visitor(*it, arg)) {
+ for (mirror::Class* klass : classes) {
+ if (!visitor(klass, arg)) {
return;
}
}
@@ -1614,6 +1611,13 @@
// No code: need interpreter.
return true;
}
+#ifdef ART_SEA_IR_MODE
+ ScopedObjectAccess soa(Thread::Current());
+ if (std::string::npos != PrettyMethod(method).find("fibonacci")) {
+ LOG(INFO) << "Found " << PrettyMethod(method);
+ return false;
+ }
+#endif
// If interpreter mode is enabled, every method (except native and proxy) must
// be run with interpreter.
return Runtime::Current()->GetInstrumentation()->InterpretOnly() &&
@@ -2160,10 +2164,9 @@
bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) {
size_t hash = Hash(descriptor);
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::iterator It; // TODO: C++0x auto
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh;
- for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash;
+ for (auto it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash;
++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
@@ -2172,7 +2175,7 @@
return true;
}
}
- for (It it = image_classes_.lower_bound(hash), end = classes_.end();
+ for (auto it = image_classes_.lower_bound(hash), end = classes_.end();
it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
@@ -2204,8 +2207,9 @@
const mirror::ClassLoader* class_loader,
size_t hash, const Table& classes) {
ClassHelper kh(NULL, this);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
+ auto end = classes_.end();
+ for (auto it = classes.lower_bound(hash); it != end && it->first == hash;
+ ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0 && klass->GetClassLoader() == class_loader) {
@@ -2228,17 +2232,18 @@
classes.clear();
size_t hash = Hash(descriptor);
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh(NULL, this);
- for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
+ for (auto it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash;
+ ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
classes.push_back(klass);
}
}
- for (It it = image_classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
+ for (auto it = image_classes_.lower_bound(hash), end = classes_.end();
+ it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
@@ -3967,12 +3972,11 @@
std::vector<mirror::Class*> all_classes;
{
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
- all_classes.push_back(it->second);
+ for (const std::pair<size_t, mirror::Class*>& it : classes_) {
+ all_classes.push_back(it.second);
}
- for (It it = image_classes_.begin(), end = image_classes_.end(); it != end; ++it) {
- all_classes.push_back(it->second);
+ for (const std::pair<size_t, mirror::Class*>& it : image_classes_) {
+ all_classes.push_back(it.second);
}
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 569a370..a72ae22 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3033,9 +3033,8 @@
}
{
ScopedObjectAccess soa(self);
- typedef std::list<Thread*>::const_iterator It; // TODO: C++0x auto
- for (It it = threads.begin(), end = threads.end(); it != end; ++it) {
- Dbg::DdmSendThreadNotification(*it, CHUNK_TYPE("THCR"));
+ for (Thread* thread : threads) {
+ Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
}
}
ResumeVM();
@@ -3600,8 +3599,7 @@
}
size_t IndexOf(const char* s) const {
- typedef std::set<std::string>::const_iterator It; // TODO: C++0x auto
- It it = table_.find(s);
+ auto it = table_.find(s);
if (it == table_.end()) {
LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
}
@@ -3613,9 +3611,8 @@
}
void WriteTo(std::vector<uint8_t>& bytes) const {
- typedef std::set<std::string>::const_iterator It; // TODO: C++0x auto
- for (It it = table_.begin(); it != table_.end(); ++it) {
- const char* s = (*it).c_str();
+ for (const std::string& str : table_) {
+ const char* s = str.c_str();
size_t s_len = CountModifiedUtf8Chars(s);
UniquePtr<uint16_t> s_utf16(new uint16_t[s_len]);
ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 09e929c..5b076e0 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -1299,8 +1299,7 @@
}
bool DexFileVerifier::CheckOffsetToTypeMap(uint32_t offset, uint16_t type) {
- typedef SafeMap<uint32_t, uint16_t>::iterator It; // TODO: C++0x auto
- It it = offset_to_type_map_.find(offset);
+ auto it = offset_to_type_map_.find(offset);
if (it == offset_to_type_map_.end()) {
LOG(ERROR) << StringPrintf("No data map entry found @ %x; expected %x", offset, type);
return false;
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index a732566..997d725 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -98,6 +98,12 @@
return begin_[index];
}
+ // Pop a number of elements.
+ void PopBackCount(int32_t n) {
+ DCHECK_GE(Size(), static_cast<size_t>(n));
+ back_index_.fetch_sub(n);
+ }
+
bool IsEmpty() const {
return Size() == 0;
}
@@ -108,11 +114,11 @@
}
T* Begin() const {
- return const_cast<mirror::Object**>(begin_ + front_index_);
+ return const_cast<T*>(begin_ + front_index_);
}
T* End() const {
- return const_cast<mirror::Object**>(begin_ + back_index_);
+ return const_cast<T*>(begin_ + back_index_);
}
size_t Capacity() const {
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index fb0f61b..fa2ab27 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -65,33 +65,32 @@
(reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
// Now we have the words, we can send these to be processed in parallel.
- uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
- uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
+ auto* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
+ auto* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
+ for (;;) {
+ while (LIKELY(*word_cur == 0)) {
+ ++word_cur;
+ if (UNLIKELY(word_cur >= word_end)) {
+ goto exit_for;
+ }
+ }
- // TODO: Parallelize
- while (word_cur < word_end) {
// Find the first dirty card.
- while (*word_cur == 0 && word_cur < word_end) {
- word_cur++;
- }
- if (word_cur >= word_end) {
- break;
- }
uintptr_t start_word = *word_cur;
+ uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(reinterpret_cast<byte*>(word_cur)));
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
- if ((start_word & 0xFF) >= minimum_age) {
- byte* card = reinterpret_cast<byte*>(word_cur) + i;
- const byte card_byte = *card;
- DCHECK(card_byte == (start_word & 0xFF) || card_byte == kCardDirty)
- << "card " << static_cast<size_t>(card_byte) << " word " << (start_word & 0xFF);
- uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card));
- uintptr_t end = start + kCardSize;
- bitmap->VisitMarkedRange(start, end, visitor);
+ if (static_cast<byte>(start_word) >= minimum_age) {
+ auto* card = reinterpret_cast<byte*>(word_cur) + i;
+ DCHECK(*card == static_cast<byte>(start_word) || *card == kCardDirty)
+ << "card " << static_cast<size_t>(*card) << " word " << (start_word & 0xFF);
+ bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
}
start_word >>= 8;
+ start += kCardSize;
}
++word_cur;
}
+ exit_for:
// Handle any unaligned cards at the end.
card_cur = reinterpret_cast<byte*>(word_end);
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index a1936de..37e719f 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -117,10 +117,10 @@
void ClearSpaceCards(space::ContinuousSpace* space);
// Returns the first address in the heap which maps to this card.
- void* AddrFromCard(const byte *card_addr) const;
+ void* AddrFromCard(const byte *card_addr) const ALWAYS_INLINE;
// Returns the address of the relevant byte in the card table, given an address on the heap.
- byte* CardFromAddr(const void *addr) const;
+ byte* CardFromAddr(const void *addr) const ALWAYS_INLINE;
bool AddrIsInCardTable(const void* addr) const;
@@ -134,7 +134,7 @@
return card_addr >= begin && card_addr < end;
}
- void CheckCardValid(byte* card) const;
+ void CheckCardValid(byte* card) const ALWAYS_INLINE;
// Verifies that all gray objects are on a dirty card.
void VerifyCardTable();
diff --git a/runtime/gc/accounting/gc_allocator.cc b/runtime/gc/accounting/gc_allocator.cc
index 0b0d3ed..11d0e67 100644
--- a/runtime/gc/accounting/gc_allocator.cc
+++ b/runtime/gc/accounting/gc_allocator.cc
@@ -31,6 +31,6 @@
Runtime::Current()->GetHeap()->RegisterGCDeAllocation(bytes);
free(p);
}
-}
-}
-}
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/accounting/gc_allocator.h b/runtime/gc/accounting/gc_allocator.h
index d1356a7..1fba858 100644
--- a/runtime/gc/accounting/gc_allocator.h
+++ b/runtime/gc/accounting/gc_allocator.h
@@ -75,8 +75,8 @@
GCAllocatorImpl<T>,
std::allocator<T> >::value {
};
-}
-}
-}
+} // namespace accounting
+} // namespace gc
+} // namespace art
#endif // ART_RUNTIME_GC_ACCOUNTING_GC_ALLOCATOR_H_
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 0524ccb..18b93d4 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -25,20 +25,12 @@
template <typename Visitor>
inline void HeapBitmap::Visit(const Visitor& visitor) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
+ for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It2;
- DCHECK(discontinuous_space_sets_.begin() != discontinuous_space_sets_.end());
- for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- SpaceSetMap* set = *it;
- set->Visit(visitor);
+ DCHECK(!discontinuous_space_sets_.empty());
+ for (const auto& space_set : discontinuous_space_sets_) {
+ space_set->Visit(visitor);
}
}
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index 0462905..5589461 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -23,12 +23,9 @@
namespace accounting {
void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- if (*it == old_bitmap) {
- *it = new_bitmap;
+ for (auto& bitmap : continuous_space_bitmaps_) {
+ if (bitmap == old_bitmap) {
+ bitmap = new_bitmap;
return;
}
}
@@ -36,12 +33,9 @@
}
void HeapBitmap::ReplaceObjectSet(SpaceSetMap* old_set, SpaceSetMap* new_set) {
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It;
- for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- if (*it == old_set) {
- *it = new_set;
+ for (auto& space_set : discontinuous_space_sets_) {
+ if (space_set == old_set) {
+ space_set = new_set;
return;
}
}
@@ -52,13 +46,10 @@
DCHECK(bitmap != NULL);
// Check for interval overlap.
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
- SpaceBitmap* cur_bitmap = *it;
- CHECK(bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
- bitmap->HeapLimit() > cur_bitmap->HeapBegin())
+ for (const auto& cur_bitmap : continuous_space_bitmaps_) {
+ CHECK(!(
+ bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
+ bitmap->HeapLimit() > cur_bitmap->HeapBegin()))
<< "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap " << cur_bitmap->Dump();
}
continuous_space_bitmaps_.push_back(bitmap);
@@ -70,20 +61,13 @@
}
void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
+ for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->Walk(callback, arg);
}
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It2;
- DCHECK(discontinuous_space_sets_.begin() != discontinuous_space_sets_.end());
- for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- SpaceSetMap* set = *it;
- set->Walk(callback, arg);
+
+ DCHECK(!discontinuous_space_sets_.empty());
+ for (const auto& space_set : discontinuous_space_sets_) {
+ space_set->Walk(callback, arg);
}
}
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index ada976f..2ca8c4a 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -66,11 +66,7 @@
}
SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
+ for (const auto& bitmap : continuous_space_bitmaps_) {
if (bitmap->HasAddress(obj)) {
return bitmap;
}
@@ -79,13 +75,9 @@
}
SpaceSetMap* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) {
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It;
- for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- SpaceSetMap* set = *it;
- if (set->Test(obj)) {
- return set;
+ for (const auto& space_set : discontinuous_space_sets_) {
+ if (space_set->Test(obj)) {
+ return space_set;
}
}
return NULL;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 3bbc381..4865219 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -36,54 +36,6 @@
namespace gc {
namespace accounting {
-class MarkIfReachesAllocspaceVisitor {
- public:
- explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
- : heap_(heap),
- bitmap_(bitmap) {
- }
-
- // Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const {
- // TODO: Optimize?
- // TODO: C++0x auto
- const std::vector<space::ContinuousSpace*>& spaces = heap_->GetContinuousSpaces();
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if ((*cur)->IsDlMallocSpace() && (*cur)->Contains(ref)) {
- bitmap_->Set(obj);
- break;
- }
- }
- }
-
- private:
- Heap* const heap_;
- accounting::SpaceBitmap* const bitmap_;
-};
-
-class ModUnionVisitor {
- public:
- explicit ModUnionVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
- : heap_(heap),
- bitmap_(bitmap) {
- }
-
- void operator()(const Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- // We don't have an early exit since we use the visitor pattern, an early exit should
- // significantly speed this up.
- MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
- collector::MarkSweep::VisitObjectReferences(obj, visitor);
- }
- private:
- Heap* const heap_;
- accounting::SpaceBitmap* const bitmap_;
-};
-
class ModUnionClearCardSetVisitor {
public:
explicit ModUnionClearCardSetVisitor(ModUnionTable::CardSet* const cleared_cards)
@@ -237,29 +189,23 @@
void ModUnionTableReferenceCache::Verify() {
// Start by checking that everything in the mod union table is marked.
Heap* heap = GetHeap();
- typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It;
- typedef std::vector<const Object*>::const_iterator It2;
- for (It it = references_.begin(), end = references_.end(); it != end; ++it) {
- for (It2 it_ref = it->second.begin(), end_ref = it->second.end(); it_ref != end_ref;
- ++it_ref ) {
- CHECK(heap->IsLiveObjectLocked(*it_ref));
+ for (const std::pair<const byte*, std::vector<const Object*> >& it : references_) {
+ for (const Object* ref : it.second) {
+ CHECK(heap->IsLiveObjectLocked(ref));
}
}
// Check the references of each clean card which is also in the mod union table.
CardTable* card_table = heap->GetCardTable();
- for (It it = references_.begin(); it != references_.end(); ++it) {
- const byte* card = &*it->first;
+ for (const std::pair<const byte*, std::vector<const Object*> > & it : references_) {
+ const byte* card = it.first;
if (*card == CardTable::kCardClean) {
- std::set<const Object*> reference_set;
- for (It2 itr = it->second.begin(); itr != it->second.end(); ++itr) {
- reference_set.insert(*itr);
- }
+ std::set<const Object*> reference_set(it.second.begin(), it.second.end());
ModUnionCheckReferences visitor(this, reference_set);
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
- space::ContinuousSpace* space =
- heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ DCHECK(space != nullptr);
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor);
}
@@ -268,24 +214,20 @@
void ModUnionTableReferenceCache::Dump(std::ostream& os) {
CardTable* card_table = heap_->GetCardTable();
- typedef std::set<byte*>::const_iterator It;
os << "ModUnionTable cleared cards: [";
- for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
- byte* card = *it;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ for (byte* card_addr : cleared_cards_) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
}
os << "]\nModUnionTable references: [";
- typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It2;
- for (It2 it = references_.begin(); it != references_.end(); ++it) {
- const byte* card = &*it->first;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ for (const std::pair<const byte*, std::vector<const Object*> >& it : references_) {
+ const byte* card_addr = it.first;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
- typedef std::vector<const Object*>::const_iterator It3;
- for (It3 itr = it->second.begin(); itr != it->second.end(); ++itr) {
- os << reinterpret_cast<const void*>(*itr) << ",";
+ for (const mirror::Object* ref : it.second) {
+ os << reinterpret_cast<const void*>(ref) << ",";
}
os << "},";
}
@@ -298,20 +240,18 @@
std::vector<const Object*> cards_references;
ModUnionReferenceVisitor visitor(this, &cards_references);
- typedef std::set<byte*>::iterator It;
- for (It it = cleared_cards_.begin(), cc_end = cleared_cards_.end(); it != cc_end; ++it) {
- byte* card = *it;
+ for (const auto& card : cleared_cards_) {
// Clear and re-compute alloc space references associated with this card.
cards_references.clear();
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
- SpaceBitmap* live_bitmap =
- heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false)->GetLiveBitmap();
+ auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ DCHECK(space != nullptr);
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor);
// Update the corresponding references for the card.
- // TODO: C++0x auto
- SafeMap<const byte*, std::vector<const Object*> >::iterator found = references_.find(card);
+ auto found = references_.find(card);
if (found == references_.end()) {
if (cards_references.empty()) {
// No reason to add empty array.
@@ -326,14 +266,11 @@
}
void ModUnionTableReferenceCache::MarkReferences(collector::MarkSweep* mark_sweep) {
- // TODO: C++0x auto
size_t count = 0;
- typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It;
- for (It it = references_.begin(); it != references_.end(); ++it) {
- typedef std::vector<const Object*>::const_iterator It2;
- for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref) {
- mark_sweep->MarkRoot(*it_ref);
+ for (const auto& ref : references_) {
+ for (const auto& obj : ref.second) {
+ mark_sweep->MarkRoot(obj);
++count;
}
}
@@ -353,38 +290,28 @@
void ModUnionTableCardCache::MarkReferences(collector::MarkSweep* mark_sweep) {
CardTable* card_table = heap_->GetCardTable();
ModUnionScanImageRootVisitor visitor(mark_sweep);
- typedef std::set<byte*>::const_iterator It;
- It it = cleared_cards_.begin();
- It cc_end = cleared_cards_.end();
- if (it != cc_end) {
- byte* card = *it;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
- space::ContinuousSpace* cur_space =
- heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
- accounting::SpaceBitmap* cur_live_bitmap = cur_space->GetLiveBitmap();
- cur_live_bitmap->VisitMarkedRange(start, end, visitor);
- for (++it; it != cc_end; ++it) {
- card = *it;
- start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- end = start + CardTable::kCardSize;
- if (UNLIKELY(!cur_space->Contains(reinterpret_cast<Object*>(start)))) {
- cur_space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
- cur_live_bitmap = cur_space->GetLiveBitmap();
- }
- cur_live_bitmap->VisitMarkedRange(start, end, visitor);
+ space::ContinuousSpace* space = nullptr;
+ SpaceBitmap* bitmap = nullptr;
+ for (const byte* card_addr : cleared_cards_) {
+ auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+ auto end = start + CardTable::kCardSize;
+ auto obj_start = reinterpret_cast<Object*>(start);
+ if (UNLIKELY(space == nullptr || !space->Contains(obj_start))) {
+ space = heap_->FindContinuousSpaceFromObject(obj_start, false);
+ DCHECK(space != nullptr);
+ bitmap = space->GetLiveBitmap();
+ DCHECK(bitmap != nullptr);
}
+ bitmap->VisitMarkedRange(start, end, visitor);
}
}
void ModUnionTableCardCache::Dump(std::ostream& os) {
CardTable* card_table = heap_->GetCardTable();
- typedef std::set<byte*>::const_iterator It;
os << "ModUnionTable dirty cards: [";
- for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
- byte* card = *it;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
+ for (const byte* card_addr : cleared_cards_) {
+ auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+ auto end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
}
os << "]";
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 378a971..039415e 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -61,9 +61,7 @@
}
void GarbageCollector::Run() {
- Thread* self = Thread::Current();
ThreadList* thread_list = Runtime::Current()->GetThreadList();
-
uint64_t start_time = NanoTime();
pause_times_.clear();
duration_ns_ = 0;
@@ -82,6 +80,7 @@
uint64_t pause_end = NanoTime();
pause_times_.push_back(pause_end - pause_start);
} else {
+ auto* self = Thread::Current();
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
MarkingPhase();
@@ -114,11 +113,7 @@
// these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
// bits of dead objects in the live bitmap.
const GcType gc_type = GetGcType();
- const std::vector<space::ContinuousSpace*>& cont_spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = cont_spaces.begin(), end = cont_spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
// We never allocate into zygote spaces.
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
(gc_type == kGcTypeFull &&
@@ -132,16 +127,13 @@
}
}
}
- const std::vector<space::DiscontinuousSpace*>& disc_spaces = GetHeap()->GetDiscontinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = disc_spaces.begin(), end = disc_spaces.end(); it != end; ++it) {
- space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(*it);
+ for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
+ space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
accounting::SpaceSetMap* live_set = space->GetLiveObjects();
accounting::SpaceSetMap* mark_set = space->GetMarkObjects();
heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
- space->SwapBitmaps();
+ down_cast<space::LargeObjectSpace*>(space)->SwapBitmaps();
}
}
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index e158952..d0b0b5c 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -37,27 +37,26 @@
}
mirror::Class* klass = obj->GetClass();
DCHECK(klass != NULL);
- if (klass == java_lang_Class_) {
+ if (UNLIKELY(klass->IsArrayClass())) {
+ if (kCountScannedTypes) {
+ ++array_count_;
+ }
+ if (klass->IsObjectArrayClass()) {
+ VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
+ }
+ } else if (UNLIKELY(klass == java_lang_Class_)) {
DCHECK_EQ(klass->GetClass(), java_lang_Class_);
if (kCountScannedTypes) {
++class_count_;
}
VisitClassReferences(klass, obj, visitor);
- } else if (klass->IsArrayClass()) {
- if (kCountScannedTypes) {
- ++array_count_;
- }
- visitor(obj, klass, mirror::Object::ClassOffset(), false);
- if (klass->IsObjectArrayClass()) {
- VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
- }
} else {
if (kCountScannedTypes) {
++other_count_;
}
VisitOtherReferences(klass, obj, visitor);
if (UNLIKELY(klass->IsReferenceClass())) {
- DelayReferenceReferent(const_cast<mirror::Object*>(obj));
+ DelayReferenceReferent(klass, const_cast<mirror::Object*>(obj));
}
}
}
@@ -117,6 +116,11 @@
bool is_static, const Visitor& visitor) {
if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
// Found a reference offset bitmap. Mark the specified offsets.
+#ifndef MOVING_COLLECTOR
+ // Clear the class bit since we mark the class as part of marking the classlinker roots.
+ DCHECK_EQ(mirror::Object::ClassOffset().Uint32Value(), 0U);
+ ref_offsets &= (1U << (sizeof(ref_offsets) * 8 - 1)) - 1;
+#endif
while (ref_offsets != 0) {
size_t right_shift = CLZ(ref_offsets);
MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
@@ -149,11 +153,11 @@
template <typename Visitor>
inline void MarkSweep::VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
const Visitor& visitor) {
- const int32_t length = array->GetLength();
- for (int32_t i = 0; i < length; ++i) {
- const mirror::Object* element = array->GetWithoutChecks(i);
+ const size_t length = static_cast<size_t>(array->GetLength());
+ for (size_t i = 0; i < length; ++i) {
+ const mirror::Object* element = array->GetWithoutChecks(static_cast<int32_t>(i));
const size_t width = sizeof(mirror::Object*);
- MemberOffset offset = MemberOffset(i * width + mirror::Array::DataOffset(width).Int32Value());
+ MemberOffset offset(i * width + mirror::Array::DataOffset(width).Int32Value());
visitor(array, element, offset, false);
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 61570ae..5c31eb1 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -21,6 +21,7 @@
#include <climits>
#include <vector>
+#include "base/bounded_fifo.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex-inl.h"
@@ -60,17 +61,29 @@
namespace collector {
// Performance options.
-static const bool kParallelMarkStack = true;
-static const bool kDisableFinger = true; // TODO: Fix, bit rotten.
-static const bool kUseMarkStackPrefetch = true;
-static const size_t kSweepArrayChunkFreeSize = 1024;
+constexpr bool kUseRecursiveMark = false;
+constexpr bool kUseMarkStackPrefetch = true;
+constexpr size_t kSweepArrayChunkFreeSize = 1024;
+
+// Parallelism options.
+constexpr bool kParallelCardScan = true;
+constexpr bool kParallelRecursiveMark = true;
+// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
+// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
+// having this can add overhead in ProcessReferences since we may end up doing many calls of
+// ProcessMarkStack with very small mark stacks.
+constexpr size_t kMinimumParallelMarkStackSize = 128;
+constexpr bool kParallelProcessMarkStack = true;
// Profiling and information flags.
-static const bool kCountClassesMarked = false;
-static const bool kProfileLargeObjects = false;
-static const bool kMeasureOverhead = false;
-static const bool kCountTasks = false;
-static const bool kCountJavaLangRefs = false;
+constexpr bool kCountClassesMarked = false;
+constexpr bool kProfileLargeObjects = false;
+constexpr bool kMeasureOverhead = false;
+constexpr bool kCountTasks = false;
+constexpr bool kCountJavaLangRefs = false;
+
+// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
+constexpr bool kCheckLocks = kDebugLocking;
void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
// Bind live to mark bitmap if necessary.
@@ -84,18 +97,14 @@
SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
reinterpret_cast<Object*>(space->End()));
} else {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- const space::ContinuousSpace* prev_space = NULL;
+ const space::ContinuousSpace* prev_space = nullptr;
// Find out if the previous space is immune.
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- if (*it == space) {
+ for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+ if (cur_space == space) {
break;
}
- prev_space = *it;
+ prev_space = cur_space;
}
-
// If previous space was immune, then extend the immune region. Relies on continuous spaces
// being sorted by Heap::AddContinuousSpace.
if (prev_space != NULL &&
@@ -109,13 +118,9 @@
void MarkSweep::BindBitmaps() {
timings_.StartSplit("BindBitmaps");
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-
// Mark all of the spaces we never collect as immune.
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
ImmuneSpace(space);
}
@@ -172,7 +177,7 @@
FindDefaultMarkBitmap();
-// Do any pre GC verification.
+ // Do any pre GC verification.
timings_.NewSplit("PreGcVerification");
heap_->PreGcVerification(this);
}
@@ -186,7 +191,6 @@
bool MarkSweep::HandleDirtyObjectsPhase() {
base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Thread* self = Thread::Current();
- accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Locks::mutator_lock_->AssertExclusiveHeld(self);
{
@@ -196,7 +200,7 @@
ReMarkRoots();
// Scan dirty objects, this is only required if we are not doing concurrent GC.
- RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
+ RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
}
ProcessReferences(self);
@@ -206,7 +210,7 @@
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// This second sweep makes sure that we don't have any objects in the live stack which point to
// freed objects. These cause problems since their references may be previously freed objects.
- SweepArray(allocation_stack, false);
+ SweepArray(GetHeap()->allocation_stack_.get(), false);
}
return true;
}
@@ -236,7 +240,7 @@
// If we exclusively hold the mutator lock, all threads must be suspended.
MarkRoots();
} else {
- MarkRootsCheckpoint(self);
+ MarkThreadRoots(self);
MarkNonThreadRoots();
}
MarkConcurrentRoots();
@@ -245,14 +249,17 @@
MarkReachableObjects();
}
+void MarkSweep::MarkThreadRoots(Thread* self) {
+ MarkRootsCheckpoint(self);
+}
+
void MarkSweep::MarkReachableObjects() {
// Mark everything allocated since the last as GC live so that we can sweep concurrently,
// knowing that new allocations won't be marked as live.
timings_.StartSplit("MarkStackAsLive");
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
- heap_->large_object_space_->GetLiveObjects(),
- live_stack);
+ heap_->large_object_space_->GetLiveObjects(), live_stack);
live_stack->Reset();
timings_.EndSplit();
// Recursively mark all the non-image bits set in the mark bitmap.
@@ -261,7 +268,7 @@
void MarkSweep::ReclaimPhase() {
base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
- Thread* self = Thread::Current();
+ auto* self = Thread::Current();
if (!IsConcurrent()) {
base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
@@ -281,7 +288,7 @@
// first place.
mirror::Object** end = allocation_stack->End();
for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
- Object* obj = *it;
+ const Object* obj = *it;
if (obj != NULL) {
UnMarkObjectNonNull(obj);
}
@@ -322,13 +329,9 @@
void MarkSweep::FindDefaultMarkBitmap() {
base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
- current_mark_bitmap_ = (*it)->GetMarkBitmap();
+ current_mark_bitmap_ = space->GetMarkBitmap();
CHECK(current_mark_bitmap_ != NULL);
return;
}
@@ -344,11 +347,10 @@
// Someone else acquired the lock and expanded the mark stack before us.
return;
}
- std::vector<Object*> temp;
- temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
+ std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
mark_stack_->Resize(mark_stack_->Capacity() * 2);
- for (size_t i = 0; i < temp.size(); ++i) {
- mark_stack_->PushBack(temp[i]);
+ for (const auto& obj : temp) {
+ mark_stack_->PushBack(obj);
}
}
@@ -474,7 +476,7 @@
// objects. Any newly-marked objects whose addresses are lower than
// the finger won't be visited by the bitmap scan, so those objects
// need to be added to the mark stack.
-void MarkSweep::MarkObject(const Object* obj) {
+inline void MarkSweep::MarkObject(const Object* obj) {
if (obj != NULL) {
MarkObjectNonNull(obj);
}
@@ -549,26 +551,13 @@
timings_.EndSplit();
}
-class CheckObjectVisitor {
- public:
- explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
-
- void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
- NO_THREAD_SAFETY_ANALYSIS {
- if (kDebugLocking) {
- Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
- }
- mark_sweep_->CheckReference(obj, ref, offset, is_static);
- }
-
- private:
- MarkSweep* const mark_sweep_;
-};
-
void MarkSweep::CheckObject(const Object* obj) {
DCHECK(obj != NULL);
- CheckObjectVisitor visitor(this);
- VisitObjectReferences(obj, visitor);
+ VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset,
+ bool is_static) NO_THREAD_SAFETY_ANALYSIS {
+ Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+ CheckReference(obj, ref, offset, is_static);
+ });
}
void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
@@ -591,11 +580,12 @@
class ScanObjectVisitor {
public:
- explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
+ explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
+ : mark_sweep_(mark_sweep) {}
// TODO: Fixme when anotatalysis works with visitors.
- void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
- if (kDebugLocking) {
+ void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
@@ -606,72 +596,276 @@
MarkSweep* const mark_sweep_;
};
-void MarkSweep::ScanGrayObjects(byte minimum_age) {
- accounting::CardTable* card_table = GetHeap()->GetCardTable();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- ScanObjectVisitor visitor(this);
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
- space::ContinuousSpace* space = *it;
- switch (space->GetGcRetentionPolicy()) {
- case space::kGcRetentionPolicyNeverCollect:
- timings_.StartSplit("ScanGrayImageSpaceObjects");
- break;
- case space::kGcRetentionPolicyFullCollect:
- timings_.StartSplit("ScanGrayZygoteSpaceObjects");
- break;
- case space::kGcRetentionPolicyAlwaysCollect:
- timings_.StartSplit("ScanGrayAllocSpaceObjects");
- break;
+template <bool kUseFinger = false>
+class MarkStackTask : public Task {
+ public:
+ MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
+ const Object** mark_stack)
+ : mark_sweep_(mark_sweep),
+ thread_pool_(thread_pool),
+ mark_stack_pos_(mark_stack_size) {
+ // We may have to copy part of an existing mark stack when another mark stack overflows.
+ if (mark_stack_size != 0) {
+ DCHECK(mark_stack != NULL);
+ // TODO: Check performance?
+ std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
}
- byte* begin = space->Begin();
- byte* end = space->End();
- // Image spaces are handled properly since live == marked for them.
- accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age);
+ if (kCountTasks) {
+ ++mark_sweep_->work_chunks_created_;
+ }
+ }
+
+ static const size_t kMaxSize = 1 * KB;
+
+ protected:
+ class ScanObjectParallelVisitor {
+ public:
+ explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
+ : chunk_task_(chunk_task) {}
+
+ void operator()(const Object* obj) const {
+ MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
+ mark_sweep->ScanObjectVisit(obj,
+ [mark_sweep, this](const Object* /* obj */, const Object* ref,
+ const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE {
+ if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
+ if (kUseFinger) {
+ android_memory_barrier();
+ if (reinterpret_cast<uintptr_t>(ref) >=
+ static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
+ return;
+ }
+ }
+ chunk_task_->MarkStackPush(ref);
+ }
+ });
+ }
+
+ private:
+ MarkStackTask<kUseFinger>* const chunk_task_;
+ };
+
+ virtual ~MarkStackTask() {
+ // Make sure that we have cleared our mark stack.
+ DCHECK_EQ(mark_stack_pos_, 0U);
+ if (kCountTasks) {
+ ++mark_sweep_->work_chunks_deleted_;
+ }
+ }
+
+ MarkSweep* const mark_sweep_;
+ ThreadPool* const thread_pool_;
+ // Thread local mark stack for this task.
+ const Object* mark_stack_[kMaxSize];
+ // Mark stack position.
+ size_t mark_stack_pos_;
+
+ void MarkStackPush(const Object* obj) ALWAYS_INLINE {
+ if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
+ // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
+ mark_stack_pos_ /= 2;
+ auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
+ mark_stack_ + mark_stack_pos_);
+ thread_pool_->AddTask(Thread::Current(), task);
+ }
+ DCHECK(obj != nullptr);
+ DCHECK(mark_stack_pos_ < kMaxSize);
+ mark_stack_[mark_stack_pos_++] = obj;
+ }
+
+ virtual void Finalize() {
+ delete this;
+ }
+
+ // Scans all of the objects
+ virtual void Run(Thread* self) {
+ ScanObjectParallelVisitor visitor(this);
+ // TODO: Tune this.
+ static const size_t kFifoSize = 4;
+ BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
+ for (;;) {
+ const Object* obj = NULL;
+ if (kUseMarkStackPrefetch) {
+ while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
+ const Object* obj = mark_stack_[--mark_stack_pos_];
+ DCHECK(obj != NULL);
+ __builtin_prefetch(obj);
+ prefetch_fifo.push_back(obj);
+ }
+ if (UNLIKELY(prefetch_fifo.empty())) {
+ break;
+ }
+ obj = prefetch_fifo.front();
+ prefetch_fifo.pop_front();
+ } else {
+ if (UNLIKELY(mark_stack_pos_ == 0)) {
+ break;
+ }
+ obj = mark_stack_[--mark_stack_pos_];
+ }
+ DCHECK(obj != NULL);
+ visitor(obj);
+ }
+ }
+};
+
+class CardScanTask : public MarkStackTask<false> {
+ public:
+ CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
+ byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
+ const Object** mark_stack_obj)
+ : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
+ bitmap_(bitmap),
+ begin_(begin),
+ end_(end),
+ minimum_age_(minimum_age) {
+ }
+
+ protected:
+ accounting::SpaceBitmap* const bitmap_;
+ byte* const begin_;
+ byte* const end_;
+ const byte minimum_age_;
+
+ virtual void Finalize() {
+ delete this;
+ }
+
+ virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+ ScanObjectParallelVisitor visitor(this);
+ accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
+ card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
+ // Finish by emptying our local mark stack.
+ MarkStackTask::Run(self);
+ }
+};
+
+void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
+ accounting::CardTable* card_table = GetHeap()->GetCardTable();
+ ThreadPool* thread_pool = GetHeap()->GetThreadPool();
+ const bool parallel = kParallelCardScan && thread_pool != nullptr;
+ if (parallel) {
+ auto* self = Thread::Current();
+ // Can't have a different split for each space since multiple spaces can have their cards being
+ // scanned at the same time.
+ timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
+ // Try to take some of the mark stack since we can pass this off to the worker tasks.
+ const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
+ const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
+ const auto mark_stack_size = mark_stack_end - mark_stack_begin;
+ const size_t thread_count = thread_pool->GetThreadCount() + 1;
+ // Estimated number of work tasks we will create.
+ const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
+ DCHECK_NE(mark_stack_tasks, 0U);
+ const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
+ mark_stack_size / mark_stack_tasks + 1);
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ byte* card_begin = space->Begin();
+ byte* card_end = space->End();
+ // Calculate how many bytes of heap we will scan,
+ const size_t address_range = card_end - card_begin;
+ // Calculate how much address range each task gets.
+ const size_t card_delta = RoundUp(address_range / thread_count + 1,
+ accounting::CardTable::kCardSize);
+ // Create the worker tasks for this space.
+ while (card_begin != card_end) {
+ // Add a range of cards.
+ size_t addr_remaining = card_end - card_begin;
+ size_t card_increment = std::min(card_delta, addr_remaining);
+ // Take from the back of the mark stack.
+ size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
+ size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
+ mark_stack_end -= mark_stack_increment;
+ mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
+ DCHECK_EQ(mark_stack_end, mark_stack_->End());
+ // Add the new task to the thread pool.
+ auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
+ card_begin + card_increment, minimum_age,
+ mark_stack_increment, mark_stack_end);
+ thread_pool->AddTask(self, task);
+ card_begin += card_increment;
+ }
+ }
+ thread_pool->StartWorkers(self);
+ thread_pool->Wait(self, paused, true); // Only do work in the main thread if we are paused.
+ thread_pool->StopWorkers(self);
timings_.EndSplit();
+ } else {
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ // Image spaces are handled properly since live == marked for them.
+ switch (space->GetGcRetentionPolicy()) {
+ case space::kGcRetentionPolicyNeverCollect:
+ timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
+ "ScanGrayImageSpaceObjects");
+ break;
+ case space::kGcRetentionPolicyFullCollect:
+ timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
+ "ScanGrayZygoteSpaceObjects");
+ break;
+ case space::kGcRetentionPolicyAlwaysCollect:
+ timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
+ "ScanGrayAllocSpaceObjects");
+ break;
+ }
+ ScanObjectVisitor visitor(this);
+ card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
+ timings_.EndSplit();
+ }
}
}
-class CheckBitmapVisitor {
- public:
- explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
-
- void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
- if (kDebugLocking) {
- Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
- }
- DCHECK(obj != NULL);
- mark_sweep_->CheckObject(obj);
- }
-
- private:
- MarkSweep* mark_sweep_;
-};
-
void MarkSweep::VerifyImageRoots() {
// Verify roots ensures that all the references inside the image space point
// objects which are either in the image space or marked objects in the alloc
// space
timings_.StartSplit("VerifyImageRoots");
- CheckBitmapVisitor visitor(this);
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- if ((*it)->IsImageSpace()) {
- space::ImageSpace* space = (*it)->AsImageSpace();
- uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ space::ImageSpace* image_space = space->AsImageSpace();
+ uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
+ accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
DCHECK(live_bitmap != NULL);
- live_bitmap->VisitMarkedRange(begin, end, visitor);
+ live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) {
+ if (kCheckLocks) {
+ Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+ }
+ DCHECK(obj != NULL);
+ CheckObject(obj);
+ });
}
}
timings_.EndSplit();
}
+class RecursiveMarkTask : public MarkStackTask<false> {
+ public:
+ RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
+ accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
+ : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
+ bitmap_(bitmap),
+ begin_(begin),
+ end_(end) {
+ }
+
+ protected:
+ accounting::SpaceBitmap* const bitmap_;
+ const uintptr_t begin_;
+ const uintptr_t end_;
+
+ virtual void Finalize() {
+ delete this;
+ }
+
+ // Scans all of the objects
+ virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+ ScanObjectParallelVisitor visitor(this);
+ bitmap_->VisitMarkedRange(begin_, end_, visitor);
+ // Finish by emptying our local mark stack.
+ MarkStackTask::Run(self);
+ }
+};
+
// Populates the mark stack based on the set of marked objects and
// recursively marks until the mark stack is emptied.
void MarkSweep::RecursiveMark() {
@@ -684,14 +878,14 @@
CHECK(phantom_reference_list_ == NULL);
CHECK(cleared_reference_list_ == NULL);
- const bool partial = GetGcType() == kGcTypePartial;
- ScanObjectVisitor scan_visitor(this);
- if (!kDisableFinger) {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ if (kUseRecursiveMark) {
+ const bool partial = GetGcType() == kGcTypePartial;
+ ScanObjectVisitor scan_visitor(this);
+ auto* self = Thread::Current();
+ ThreadPool* thread_pool = heap_->GetThreadPool();
+ const bool parallel = kParallelRecursiveMark && thread_pool != NULL;
+ mark_stack_->Reset();
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
(!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
current_mark_bitmap_ = space->GetMarkBitmap();
@@ -699,14 +893,39 @@
GetHeap()->DumpSpaces();
LOG(FATAL) << "invalid bitmap";
}
- // This function does not handle heap end increasing, so we must use the space end.
- uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
+ if (parallel) {
+ // We will use the mark stack the future.
+ // CHECK(mark_stack_->IsEmpty());
+ // This function does not handle heap end increasing, so we must use the space end.
+ uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+ atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
+
+ // Create a few worker tasks.
+ size_t n = (thread_pool->GetThreadCount() + 1) * 2;
+ while (begin != end) {
+ uintptr_t start = begin;
+ uintptr_t delta = (end - begin) / n;
+ delta = RoundUp(delta, KB);
+ if (delta < 16 * KB) delta = end - begin;
+ begin += delta;
+ auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
+ begin);
+ thread_pool->AddTask(self, task);
+ }
+ thread_pool->StartWorkers(self);
+ thread_pool->Wait(self, false, true);
+ thread_pool->StopWorkers(self);
+ } else {
+ // This function does not handle heap end increasing, so we must use the space end.
+ uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+ current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
+ }
}
}
}
- ProcessMarkStack();
+ ProcessMarkStack(false);
}
bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
@@ -715,9 +934,9 @@
!reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
}
-void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
- ScanGrayObjects(minimum_age);
- ProcessMarkStack();
+void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
+ ScanGrayObjects(paused, minimum_age);
+ ProcessMarkStack(paused);
}
void MarkSweep::ReMarkRoots() {
@@ -729,10 +948,7 @@
void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
MutexLock mu(Thread::Current(), vm->weak_globals_lock);
- IndirectReferenceTable* table = &vm->weak_globals;
- typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
- for (It it = table->begin(), end = table->end(); it != end; ++it) {
- const Object** entry = *it;
+ for (const Object** entry : vm->weak_globals) {
if (!is_marked(*entry, arg)) {
*entry = kClearedJniWeakGlobal;
}
@@ -815,10 +1031,7 @@
JavaVMExt* vm = runtime->GetJavaVM();
MutexLock mu(Thread::Current(), vm->weak_globals_lock);
- IndirectReferenceTable* table = &vm->weak_globals;
- typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
- for (It it = table->begin(), end = table->end(); it != end; ++it) {
- const Object** entry = *it;
+ for (const Object** entry : vm->weak_globals) {
VerifyIsLive(*entry);
}
}
@@ -904,7 +1117,7 @@
// bitmap, resulting in occasional frees of Weaks which are still in use.
SweepSystemWeaksArray(allocations);
- timings_.StartSplit("Process allocation stack");
+ timings_.StartSplit("SweepArray");
// Newly allocated objects MUST be in the alloc space and those are the only objects which we are
// going to free.
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -937,12 +1150,12 @@
DCHECK_GE(out, objects_to_chunk_free);
DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) {
- timings_.StartSplit("FreeList");
+ // timings_.StartSplit("FreeList");
size_t chunk_freed_objects = out - objects_to_chunk_free;
freed_objects += chunk_freed_objects;
freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
objects_to_chunk_free = out;
- timings_.EndSplit();
+ // timings_.EndSplit();
}
}
} else if (!large_mark_objects->Test(obj)) {
@@ -954,11 +1167,11 @@
DCHECK_GE(out, objects_to_chunk_free);
DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
if (out - objects_to_chunk_free > 0) {
- timings_.StartSplit("FreeList");
+ // timings_.StartSplit("FreeList");
size_t chunk_freed_objects = out - objects_to_chunk_free;
freed_objects += chunk_freed_objects;
freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
- timings_.EndSplit();
+ // timings_.EndSplit();
}
CHECK_EQ(count, allocations->Size());
timings_.EndSplit();
@@ -988,11 +1201,7 @@
SweepCallbackContext scc;
scc.mark_sweep = this;
scc.self = Thread::Current();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
// We always sweep always collect spaces.
bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
if (!partial && !sweep_space) {
@@ -1000,8 +1209,8 @@
sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
}
if (sweep_space) {
- uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+ auto begin = reinterpret_cast<uintptr_t>(space->Begin());
+ auto end = reinterpret_cast<uintptr_t>(space->End());
scc.space = space->AsDlMallocSpace();
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
@@ -1040,11 +1249,9 @@
size_t freed_objects = 0;
size_t freed_bytes = 0;
Thread* self = Thread::Current();
- // TODO: C++0x
- typedef accounting::SpaceSetMap::Objects::iterator It;
- for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
- if (!large_mark_objects->Test(*it)) {
- freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
+ for (const Object* obj : live_objects) {
+ if (!large_mark_objects->Test(obj)) {
+ freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
++freed_objects;
}
}
@@ -1054,11 +1261,7 @@
}
void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsDlMallocSpace() && space->Contains(ref)) {
DCHECK(IsMarked(obj));
@@ -1101,31 +1304,34 @@
// Process the "referent" field in a java.lang.ref.Reference. If the
// referent has not yet been marked, put it on the appropriate list in
-// the gcHeap for later processing.
-void MarkSweep::DelayReferenceReferent(Object* obj) {
- DCHECK(obj != NULL);
- Class* klass = obj->GetClass();
- DCHECK(klass != NULL);
+// the heap for later processing.
+void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
+ DCHECK(klass != nullptr);
DCHECK(klass->IsReferenceClass());
- Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
+ DCHECK(obj != NULL);
Object* referent = heap_->GetReferenceReferent(obj);
- if (kCountJavaLangRefs) {
- ++reference_count_;
- }
- if (pending == NULL && referent != NULL && !IsMarked(referent)) {
- Object** list = NULL;
- if (klass->IsSoftReferenceClass()) {
- list = &soft_reference_list_;
- } else if (klass->IsWeakReferenceClass()) {
- list = &weak_reference_list_;
- } else if (klass->IsFinalizerReferenceClass()) {
- list = &finalizer_reference_list_;
- } else if (klass->IsPhantomReferenceClass()) {
- list = &phantom_reference_list_;
+ if (referent != NULL && !IsMarked(referent)) {
+ if (kCountJavaLangRefs) {
+ ++reference_count_;
}
- DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
- // TODO: One lock per list?
- heap_->EnqueuePendingReference(obj, list);
+ Thread* self = Thread::Current();
+ // TODO: Remove these locks, and use atomic stacks for storing references?
+ if (klass->IsSoftReferenceClass()) {
+ MutexLock mu(self, *heap_->GetSoftRefQueueLock());
+ heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+ } else if (klass->IsWeakReferenceClass()) {
+ MutexLock mu(self, *heap_->GetWeakRefQueueLock());
+ heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+ } else if (klass->IsFinalizerReferenceClass()) {
+ MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
+ heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+ } else if (klass->IsPhantomReferenceClass()) {
+ MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
+ heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+ } else {
+ LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
+ << " " << std::hex << klass->GetAccessFlags();
+ }
}
}
@@ -1135,13 +1341,13 @@
class MarkObjectVisitor {
public:
- explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
+ explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
// TODO: Fixme when anotatalysis works with visitors.
void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const
+ bool /* is_static */) const ALWAYS_INLINE
NO_THREAD_SAFETY_ANALYSIS {
- if (kDebugLocking) {
+ if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
@@ -1159,204 +1365,61 @@
ScanObjectVisit(obj, visitor);
}
-class MarkStackChunk : public Task {
- public:
- MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
- : mark_sweep_(mark_sweep),
- thread_pool_(thread_pool),
- index_(0),
- length_(0),
- output_(NULL) {
- length_ = end - begin;
- if (begin != end) {
- // Cost not significant since we only do this for the initial set of mark stack chunks.
- memcpy(data_, begin, length_ * sizeof(*begin));
- }
- if (kCountTasks) {
- ++mark_sweep_->work_chunks_created_;
- }
- }
-
- ~MarkStackChunk() {
- DCHECK(output_ == NULL || output_->length_ == 0);
- DCHECK_GE(index_, length_);
- delete output_;
- if (kCountTasks) {
- ++mark_sweep_->work_chunks_deleted_;
- }
- }
-
- MarkSweep* const mark_sweep_;
- ThreadPool* const thread_pool_;
- static const size_t max_size = 1 * KB;
- // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing.
- size_t index_;
- // Input / output mark stack. We add newly marked references to data_ until length reaches
- // max_size. This is an optimization so that less tasks are created.
- // TODO: Investigate using a bounded buffer FIFO.
- Object* data_[max_size];
- // How many elements in data_ we need to scan.
- size_t length_;
- // Output block, newly marked references get added to the ouput block so that another thread can
- // scan them.
- MarkStackChunk* output_;
-
- class MarkObjectParallelVisitor {
- public:
- explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {}
-
- void operator()(const Object* /* obj */, const Object* ref,
- const MemberOffset& /* offset */, bool /* is_static */) const {
- if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
- chunk_task_->MarkStackPush(ref);
- }
- }
-
- private:
- MarkStackChunk* const chunk_task_;
- };
-
- // Push an object into the block.
- // Don't need to use atomic ++ since we only one thread is writing to an output block at any
- // given time.
- void Push(Object* obj) {
- CHECK(obj != NULL);
- data_[length_++] = obj;
- }
-
- void MarkStackPush(const Object* obj) {
- if (static_cast<size_t>(length_) < max_size) {
- Push(const_cast<Object*>(obj));
- } else {
- // Internal (thread-local) buffer is full, push to a new buffer instead.
- if (UNLIKELY(output_ == NULL)) {
- AllocateOutputChunk();
- } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
- // Output block is full, queue it up for processing and obtain a new block.
- EnqueueOutput();
- AllocateOutputChunk();
- }
- output_->Push(const_cast<Object*>(obj));
- }
- }
-
- void ScanObject(Object* obj) {
- mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this));
- }
-
- void EnqueueOutput() {
- if (output_ != NULL) {
- uint64_t start = 0;
- if (kMeasureOverhead) {
- start = NanoTime();
- }
- thread_pool_->AddTask(Thread::Current(), output_);
- output_ = NULL;
- if (kMeasureOverhead) {
- mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
- }
- }
- }
-
- void AllocateOutputChunk() {
- uint64_t start = 0;
- if (kMeasureOverhead) {
- start = NanoTime();
- }
- output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
- if (kMeasureOverhead) {
- mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
- }
- }
-
- void Finalize() {
- EnqueueOutput();
- delete this;
- }
-
- // Scans all of the objects
- virtual void Run(Thread* self) {
- size_t index;
- while ((index = index_++) < length_) {
- if (kUseMarkStackPrefetch) {
- static const size_t prefetch_look_ahead = 1;
- __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]);
- }
- Object* obj = data_[index];
- DCHECK(obj != NULL);
- ScanObject(obj);
- }
- }
-};
-
-void MarkSweep::ProcessMarkStackParallel() {
- CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled";
+void MarkSweep::ProcessMarkStackParallel(bool paused) {
Thread* self = Thread::Current();
ThreadPool* thread_pool = GetHeap()->GetThreadPool();
- // Split the current mark stack up into work tasks.
const size_t num_threads = thread_pool->GetThreadCount();
- const size_t stack_size = mark_stack_->Size();
const size_t chunk_size =
- std::min((stack_size + num_threads - 1) / num_threads,
- static_cast<size_t>(MarkStackChunk::max_size));
- size_t index = 0;
- for (size_t i = 0; i < num_threads || index < stack_size; ++i) {
- Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)];
- Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)];
- index += chunk_size;
- thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
+ std::min(mark_stack_->Size() / num_threads + 1,
+ static_cast<size_t>(MarkStackTask<false>::kMaxSize));
+ CHECK_GT(chunk_size, 0U);
+ // Split the current mark stack up into work tasks.
+ for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
+ const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
+ thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
+ const_cast<const mirror::Object**>(it)));
+ it += delta;
}
thread_pool->StartWorkers(self);
- thread_pool->Wait(self, true, true);
+ // Don't do work in the main thread since it assumed at least one other thread will require CPU
+ // time during the GC.
+ thread_pool->Wait(self, paused, true);
+ thread_pool->StopWorkers(self);
mark_stack_->Reset();
- // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
}
// Scan anything that's on the mark stack.
-void MarkSweep::ProcessMarkStack() {
- ThreadPool* thread_pool = GetHeap()->GetThreadPool();
+void MarkSweep::ProcessMarkStack(bool paused) {
timings_.StartSplit("ProcessMarkStack");
- if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) {
- ProcessMarkStackParallel();
- timings_.EndSplit();
- return;
- }
-
- if (kUseMarkStackPrefetch) {
- const size_t fifo_size = 4;
- const size_t fifo_mask = fifo_size - 1;
- const Object* fifo[fifo_size];
- for (size_t i = 0; i < fifo_size; ++i) {
- fifo[i] = NULL;
- }
- size_t fifo_pos = 0;
- size_t fifo_count = 0;
- for (;;) {
- const Object* obj = fifo[fifo_pos & fifo_mask];
- if (obj != NULL) {
- ScanObject(obj);
- fifo[fifo_pos & fifo_mask] = NULL;
- --fifo_count;
- }
-
- if (!mark_stack_->IsEmpty()) {
- const Object* obj = mark_stack_->PopBack();
- DCHECK(obj != NULL);
- fifo[fifo_pos & fifo_mask] = obj;
- __builtin_prefetch(obj);
- fifo_count++;
- }
- fifo_pos++;
-
- if (!fifo_count) {
- CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
- break;
- }
- }
+ const bool parallel = kParallelProcessMarkStack && GetHeap()->GetThreadPool() &&
+ mark_stack_->Size() >= kMinimumParallelMarkStackSize;
+ if (parallel) {
+ ProcessMarkStackParallel(paused);
} else {
- while (!mark_stack_->IsEmpty()) {
- const Object* obj = mark_stack_->PopBack();
+ // TODO: Tune this.
+ static const size_t kFifoSize = 4;
+ BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
+ for (;;) {
+ const Object* obj = NULL;
+ if (kUseMarkStackPrefetch) {
+ while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
+ const Object* obj = mark_stack_->PopBack();
+ DCHECK(obj != NULL);
+ __builtin_prefetch(obj);
+ prefetch_fifo.push_back(obj);
+ }
+ if (prefetch_fifo.empty()) {
+ break;
+ }
+ obj = prefetch_fifo.front();
+ prefetch_fifo.pop_front();
+ } else {
+ if (mark_stack_->IsEmpty()) {
+ break;
+ }
+ obj = mark_stack_->PopBack();
+ }
DCHECK(obj != NULL);
ScanObject(obj);
}
@@ -1397,9 +1460,8 @@
*list = clear;
timings_.EndSplit();
- // Restart the mark with the newly black references added to the
- // root set.
- ProcessMarkStack();
+ // Restart the mark with the newly black references added to the root set.
+ ProcessMarkStack(true);
}
inline bool MarkSweep::IsMarked(const Object* object) const
@@ -1457,7 +1519,7 @@
}
timings_.EndSplit();
if (has_enqueued) {
- ProcessMarkStack();
+ ProcessMarkStack(true);
}
DCHECK(*list == NULL);
}
@@ -1508,11 +1570,7 @@
void MarkSweep::UnBindBitmaps() {
base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
if (alloc_space->temp_bitmap_.get() != NULL) {
@@ -1585,11 +1643,7 @@
cumulative_timings_.End();
// Clear all of the spaces' mark bitmaps.
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
space->GetMarkBitmap()->Clear();
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index e39e2f7..ba12e64 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -54,7 +54,6 @@
class ContinuousSpace;
} // namespace space
-class CheckObjectVisitor;
class Heap;
namespace collector {
@@ -126,7 +125,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Builds a mark stack with objects on dirty cards and recursively mark until it empties.
- void RecursiveMarkDirtyObjects(byte minimum_age)
+ void RecursiveMarkDirtyObjects(bool paused, byte minimum_age)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -260,8 +259,13 @@
// Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a
// space set, removing the object from the set.
void UnMarkObjectNonNull(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Mark the vm thread roots.
+ virtual void MarkThreadRoots(Thread* self)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(const mirror::Object* obj);
@@ -342,20 +346,20 @@
}
// Blackens objects grayed during a garbage collection.
- void ScanGrayObjects(byte minimum_age)
+ void ScanGrayObjects(bool paused, byte minimum_age)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Object* reference)
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
- void ProcessMarkStack()
+ void ProcessMarkStack(bool paused)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ProcessMarkStackParallel()
+ void ProcessMarkStackParallel(bool paused)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -402,6 +406,9 @@
mirror::Object* phantom_reference_list_;
mirror::Object* cleared_reference_list_;
+ // Parallel finger.
+ AtomicInteger atomic_finger_;
+
// Number of bytes freed in this collection.
AtomicInteger freed_bytes_;
// Number of objects freed in this collection.
@@ -428,9 +435,9 @@
bool clear_soft_references_;
+ private:
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
friend class CheckBitmapVisitor;
- friend class CheckObjectVisitor;
friend class CheckReferenceVisitor;
friend class art::gc::Heap;
friend class InternTableEntryIsUnmarked;
@@ -444,7 +451,7 @@
friend class ModUnionScanImageRootVisitor;
friend class ScanBitmapVisitor;
friend class ScanImageRootVisitor;
- friend class MarkStackChunk;
+ template<bool kUseFinger> friend class MarkStackTask;
friend class FifoMarkStackChunk;
DISALLOW_COPY_AND_ASSIGN(MarkSweep);
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index ef893c5..cc3cfe5 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -33,14 +33,10 @@
void PartialMarkSweep::BindBitmaps() {
MarkSweep::BindBitmaps();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
// zygote space are viewed as marked.
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
CHECK(space->IsZygoteSpace());
ImmuneSpace(space);
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 25304b9..3b788f4 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -38,6 +38,7 @@
// collections, ie the Zygote space. Also mark this space is immune.
virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
};
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index aad7c29..19cbe6b 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -33,15 +33,11 @@
void StickyMarkSweep::BindBitmaps() {
PartialMarkSweep::BindBitmaps();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
// know what was allocated since the last GC. A side-effect of binding the allocation space mark
// and live bitmap is that marking the objects will place them in the live bitmap.
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
BindLiveToMarkBitmap(space);
}
@@ -51,7 +47,11 @@
}
void StickyMarkSweep::MarkReachableObjects() {
- RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty - 1);
+ // All reachable objects must be referenced by a root or a dirty card, so we can clear the mark
+ // stack here since all objects in the mark stack will get scanned by the card scanning anyways.
+ // TODO: Not put these objects in the mark stack in the first place.
+ mark_stack_->Reset();
+ RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
}
void StickyMarkSweep::Sweep(bool swap_bitmaps) {
@@ -59,6 +59,11 @@
SweepArray(live_stack, false);
}
+void StickyMarkSweep::MarkThreadRoots(Thread* self) {
+ MarkRootsCheckpoint(self);
+}
+
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index e009b62..79c4359 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -43,8 +43,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual void MarkThreadRoots(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ private:
DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a2453b8..5b01104 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -80,7 +80,10 @@
num_gc_threads_(num_gc_threads),
low_memory_mode_(low_memory_mode),
have_zygote_space_(false),
- reference_queue_lock_(NULL),
+ soft_ref_queue_lock_(NULL),
+ weak_ref_queue_lock_(NULL),
+ finalizer_ref_queue_lock_(NULL),
+ phantom_ref_queue_lock_(NULL),
is_gc_running_(false),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -97,7 +100,7 @@
// Initially care about pauses in case we never get notified of process states, or if the JNI
// code becomes broken.
care_about_pause_times_(true),
- concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
+ concurrent_start_bytes_(concurrent_gc ? initial_size - kMinConcurrentRemainingBytes
: std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
@@ -183,11 +186,8 @@
heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity();
}
- // Mark image objects in the live bitmap
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::iterator It;
- for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
- space::ContinuousSpace* space = *it;
+ // Mark image objects in the live bitmap.
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
space::ImageSpace* image_space = space->AsImageSpace();
image_space->RecordImageAllocations(image_space->GetLiveBitmap());
@@ -222,8 +222,11 @@
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
- // Create the reference queue lock, this is required so for parallel object scanning in the GC.
- reference_queue_lock_ = new Mutex("reference queue lock");
+ // Create the reference queue locks, this is required so for parallel object scanning in the GC.
+ soft_ref_queue_lock_ = new Mutex("Soft reference queue lock");
+ weak_ref_queue_lock_ = new Mutex("Weak reference queue lock");
+ finalizer_ref_queue_lock_ = new Mutex("Finalizer reference queue lock");
+ phantom_ref_queue_lock_ = new Mutex("Phantom reference queue lock");
last_gc_time_ns_ = NanoTime();
last_gc_size_ = GetBytesAllocated();
@@ -243,20 +246,15 @@
}
void Heap::CreateThreadPool() {
- thread_pool_.reset(new ThreadPool(num_gc_threads_));
+ if (num_gc_threads_ != 0) {
+ thread_pool_.reset(new ThreadPool(num_gc_threads_));
+ }
}
void Heap::DeleteThreadPool() {
thread_pool_.reset(NULL);
}
-// Sort spaces based on begin address
-struct ContinuousSpaceSorter {
- bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const {
- return a->Begin() < b->Begin();
- }
-};
-
static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) {
CHECK(out_value != NULL);
jfieldID field = env->GetStaticFieldID(clz, name, "I");
@@ -269,7 +267,7 @@
}
void Heap::ListenForProcessStateChange() {
- VLOG(gc) << "Heap notified of process state change";
+ VLOG(heap) << "Heap notified of process state change";
Thread* self = Thread::Current();
JNIEnvExt* env = self->GetJniEnv();
@@ -323,8 +321,8 @@
int process_state = 0;
if (ReadStaticInt(env, activity_manager.get(), care_about_pauses[i], &process_state)) {
process_state_cares_about_pause_time_.insert(process_state);
- VLOG(gc) << "Adding process state " << process_state
- << " to set of states which care about pause time";
+ VLOG(heap) << "Adding process state " << process_state
+ << " to set of states which care about pause time";
}
}
}
@@ -370,8 +368,8 @@
care_about_pause_times_ = process_state_cares_about_pause_time_.find(process_state) !=
process_state_cares_about_pause_time_.end();
- VLOG(gc) << "New process state " << process_state
- << " care about pauses " << care_about_pause_times_;
+ VLOG(heap) << "New process state " << process_state
+ << " care about pauses " << care_about_pause_times_;
}
}
@@ -388,14 +386,15 @@
}
// Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
- std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), ContinuousSpaceSorter());
+ std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
+ [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
+ return a->Begin() < b->Begin();
+ });
// Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
// avoid redundant marking.
bool seen_zygote = false, seen_alloc = false;
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
DCHECK(!seen_zygote);
DCHECK(!seen_alloc);
@@ -436,17 +435,13 @@
uint64_t total_duration = 0;
// Dump cumulative loggers for each GC type.
- // TODO: C++0x
uint64_t total_paused_time = 0;
- typedef std::vector<collector::MarkSweep*>::const_iterator It;
- for (It it = mark_sweep_collectors_.begin();
- it != mark_sweep_collectors_.end(); ++it) {
- collector::MarkSweep* collector = *it;
+ for (const auto& collector : mark_sweep_collectors_) {
CumulativeLogger& logger = collector->GetCumulativeTimings();
if (logger.GetTotalNs() != 0) {
os << Dumpable<CumulativeLogger>(logger);
const uint64_t total_ns = logger.GetTotalNs();
- const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs();
+ const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
const uint64_t freed_bytes = collector->GetTotalFreedBytes();
const uint64_t freed_objects = collector->GetTotalFreedObjects();
@@ -502,16 +497,17 @@
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
- delete reference_queue_lock_;
+ delete soft_ref_queue_lock_;
+ delete weak_ref_queue_lock_;
+ delete finalizer_ref_queue_lock_;
+ delete phantom_ref_queue_lock_;
}
space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
bool fail_ok) const {
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- if ((*it)->Contains(obj)) {
- return *it;
+ for (const auto& space : continuous_spaces_) {
+ if (space->Contains(obj)) {
+ return space;
}
}
if (!fail_ok) {
@@ -522,11 +518,9 @@
space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
bool fail_ok) const {
- // TODO: C++0x auto
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It;
- for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- if ((*it)->Contains(obj)) {
- return *it;
+ for (const auto& space : discontinuous_spaces_) {
+ if (space->Contains(obj)) {
+ return space;
}
}
if (!fail_ok) {
@@ -544,11 +538,9 @@
}
space::ImageSpace* Heap::GetImageSpace() const {
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- if ((*it)->IsImageSpace()) {
- return (*it)->AsImageSpace();
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsImageSpace()) {
+ return space->AsImageSpace();
}
}
return NULL;
@@ -627,10 +619,7 @@
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (!large_object_allocation && total_bytes_free >= byte_count) {
size_t max_contiguous_allocation = 0;
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsDlMallocSpace()) {
space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
}
@@ -706,19 +695,14 @@
}
void Heap::DumpSpaces() {
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
LOG(INFO) << space << " " << *space << "\n"
<< live_bitmap << " " << *live_bitmap << "\n"
<< mark_bitmap << " " << *mark_bitmap;
}
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- space::DiscontinuousSpace* space = *it;
+ for (const auto& space : discontinuous_spaces_) {
LOG(INFO) << space << " " << *space << "\n";
}
}
@@ -1143,11 +1127,8 @@
have_zygote_space_ = true;
// Reset the cumulative loggers since we now have a few additional timing phases.
- // TODO: C++0x
- typedef std::vector<collector::MarkSweep*>::const_iterator It;
- for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
- it != end; ++it) {
- (*it)->ResetCumulativeStatistics();
+ for (const auto& collector : mark_sweep_collectors_) {
+ collector->ResetCumulativeStatistics();
}
}
@@ -1181,7 +1162,6 @@
bool clear_soft_references) {
Thread* self = Thread::Current();
-
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
@@ -1238,10 +1218,7 @@
ATRACE_BEGIN(gc_cause_and_type_strings[gc_cause][gc_type]);
collector::MarkSweep* collector = NULL;
- typedef std::vector<collector::MarkSweep*>::iterator It;
- for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
- it != end; ++it) {
- collector::MarkSweep* cur_collector = *it;
+ for (const auto& cur_collector : mark_sweep_collectors_) {
if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) {
collector = cur_collector;
break;
@@ -1268,7 +1245,7 @@
}
}
- if (was_slow) {
+ if (was_slow && care_about_pause_times_) {
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetBytesAllocated();
const size_t total_memory = GetTotalMemory();
@@ -1278,7 +1255,7 @@
<< ((i != pauses.size() - 1) ? ", " : "");
}
LOG(INFO) << gc_cause << " " << collector->GetName()
- << "GC freed " << PrettySize(collector->GetFreedBytes()) << ", "
+ << " GC freed " << PrettySize(collector->GetFreedBytes()) << ", "
<< percent_free << "% free, " << PrettySize(current_heap_size) << "/"
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
@@ -1596,9 +1573,7 @@
void Heap::ProcessCards(base::TimingLogger& timings) {
// Clear cards and keep track of cards cleared in the mod-union table.
- typedef std::vector<space::ContinuousSpace*>::iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
base::TimingLogger::ScopedSplit split("ImageModUnionClearCards", &timings);
image_mod_union_table_->ClearCards(space);
@@ -1866,17 +1841,18 @@
void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) {
DCHECK(ref != NULL);
DCHECK(list != NULL);
-
- // TODO: Remove this lock, use atomic stacks for storing references.
- MutexLock mu(Thread::Current(), *reference_queue_lock_);
- if (*list == NULL) {
- ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
- *list = ref;
- } else {
- mirror::Object* head =
- (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
- ref->SetFieldObject(reference_pendingNext_offset_, head, false);
- (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
+ mirror::Object* pending =
+ ref->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
+ if (pending == NULL) {
+ if (*list == NULL) {
+ ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
+ *list = ref;
+ } else {
+ mirror::Object* head =
+ (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
+ ref->SetFieldObject(reference_pendingNext_offset_, head, false);
+ (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
+ }
}
}
@@ -2085,9 +2061,7 @@
int64_t Heap::GetTotalMemory() const {
int64_t ret = 0;
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
// Currently don't include the image space.
} else if (space->IsDlMallocSpace()) {
@@ -2095,9 +2069,7 @@
ret += space->AsDlMallocSpace()->GetFootprint();
}
}
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- space::DiscontinuousSpace* space = *it;
+ for (const auto& space : discontinuous_spaces_) {
if (space->IsLargeObjectSpace()) {
ret += space->AsLargeObjectSpace()->GetBytesAllocated();
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b3346e8..72e6e43 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -380,6 +380,22 @@
return large_object_space_;
}
+ Mutex* GetSoftRefQueueLock() {
+ return soft_ref_queue_lock_;
+ }
+
+ Mutex* GetWeakRefQueueLock() {
+ return weak_ref_queue_lock_;
+ }
+
+ Mutex* GetFinalizerRefQueueLock() {
+ return finalizer_ref_queue_lock_;
+ }
+
+ Mutex* GetPhantomRefQueueLock() {
+ return phantom_ref_queue_lock_;
+ }
+
void DumpSpaces();
// GC performance measuring
@@ -499,7 +515,7 @@
const bool concurrent_gc_;
// How many GC threads we may use for garbage collection.
- const bool num_gc_threads_;
+ const size_t num_gc_threads_;
// Boolean for if we are in low memory mode.
const bool low_memory_mode_;
@@ -512,9 +528,12 @@
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
- // Mutex held when adding references to reference queues.
+ // Mutexes held when adding references to reference queues.
// TODO: move to a UniquePtr, currently annotalysis is confused that UniquePtr isn't lockable.
- Mutex* reference_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex* soft_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex* weak_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex* finalizer_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex* phantom_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// True while the garbage collector is running.
volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 3e75716..8af4d7e 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -309,9 +309,8 @@
}
void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
- typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
- for (It it = begin(), end = this->end(); it != end; ++it) {
- visitor(**it, arg);
+ for (auto ref : *this) {
+ visitor(*ref, arg);
}
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 97706b8..26f53db 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -248,8 +248,6 @@
class IndirectReferenceTable {
public:
- typedef IrtIterator iterator;
-
IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind);
~IndirectReferenceTable();
@@ -301,12 +299,12 @@
return segment_state_.parts.topIndex;
}
- iterator begin() {
- return iterator(table_, 0, Capacity());
+ IrtIterator begin() {
+ return IrtIterator(table_, 0, Capacity());
}
- iterator end() {
- return iterator(table_, Capacity(), Capacity());
+ IrtIterator end() {
+ return IrtIterator(table_, Capacity(), Capacity());
}
void VisitRoots(RootVisitor* visitor, void* arg);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ae3a165..6caad01 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -206,12 +206,9 @@
}
return true; // Ignore upcalls.
}
- typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It; // TODO: C++0x auto
bool removed_stub = false;
// TODO: make this search more efficient?
- for (It it = instrumentation_stack_->begin(), end = instrumentation_stack_->end(); it != end;
- ++it) {
- InstrumentationStackFrame instrumentation_frame = *it;
+ for (InstrumentationStackFrame instrumentation_frame : *instrumentation_stack_) {
if (instrumentation_frame.frame_id_ == GetFrameId()) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Removing exit stub in " << DescribeLocation();
@@ -407,8 +404,7 @@
void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
const mirror::ArtMethod* method,
uint32_t dex_pc) const {
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- It it = method_entry_listeners_.begin();
+ auto it = method_entry_listeners_.begin();
bool is_end = (it == method_entry_listeners_.end());
// Implemented this way to prevent problems caused by modification of the list while iterating.
while (!is_end) {
@@ -422,8 +418,7 @@
void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
const mirror::ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const {
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- It it = method_exit_listeners_.begin();
+ auto it = method_exit_listeners_.begin();
bool is_end = (it == method_exit_listeners_.end());
// Implemented this way to prevent problems caused by modification of the list while iterating.
while (!is_end) {
@@ -438,10 +433,8 @@
const mirror::ArtMethod* method,
uint32_t dex_pc) const {
if (have_method_unwind_listeners_) {
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- for (It it = method_unwind_listeners_.begin(), end = method_unwind_listeners_.end(); it != end;
- ++it) {
- (*it)->MethodUnwind(thread, method, dex_pc);
+ for (InstrumentationListener* listener : method_unwind_listeners_) {
+ listener->MethodUnwind(thread, method, dex_pc);
}
}
}
@@ -454,9 +447,8 @@
// around the problem and in general we may have to move to something like reference counting to
// ensure listeners are deleted correctly.
std::list<InstrumentationListener*> copy(dex_pc_listeners_);
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- for (It it = copy.begin(), end = copy.end(); it != end; ++it) {
- (*it)->DexPcMoved(thread, this_object, method, dex_pc);
+ for (InstrumentationListener* listener : copy) {
+ listener->DexPcMoved(thread, this_object, method, dex_pc);
}
}
@@ -467,10 +459,8 @@
if (have_exception_caught_listeners_) {
DCHECK_EQ(thread->GetException(NULL), exception_object);
thread->ClearException();
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- for (It it = exception_caught_listeners_.begin(), end = exception_caught_listeners_.end();
- it != end; ++it) {
- (*it)->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object);
+ for (InstrumentationListener* listener : exception_caught_listeners_) {
+ listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object);
}
thread->SetException(throw_location, exception_object);
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index e2c1a64..d7398ca 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -40,9 +40,8 @@
void InternTable::VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty) {
MutexLock mu(Thread::Current(), intern_table_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = strong_interns_.begin(), end = strong_interns_.end(); it != end; ++it) {
- visitor(it->second, arg);
+ for (const auto& strong_intern : strong_interns_) {
+ visitor(strong_intern.second, arg);
}
if (clean_dirty) {
is_dirty_ = false;
@@ -52,8 +51,7 @@
mirror::String* InternTable::Lookup(Table& table, mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = table.find(hash_code), end = table.end(); it != end; ++it) {
+ for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
mirror::String* existing_string = it->second;
if (existing_string->Equals(s)) {
return existing_string;
@@ -75,8 +73,7 @@
void InternTable::Remove(Table& table, const mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
- typedef Table::iterator It; // TODO: C++0x auto
- for (It it = table.find(hash_code), end = table.end(); it != end; ++it) {
+ for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
if (it->second == s) {
table.erase(it);
return;
@@ -166,8 +163,8 @@
void InternTable::SweepInternTableWeaks(IsMarkedTester is_marked, void* arg) {
MutexLock mu(Thread::Current(), intern_table_lock_);
- typedef Table::iterator It; // TODO: C++0x auto
- for (It it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
+ // TODO: std::remove_if + lambda.
+ for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
mirror::Object* object = it->second;
if (!is_marked(object, arg)) {
weak_interns_.erase(it++);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index ffb93eb..d79d2c4 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -58,8 +58,7 @@
public:
bool IsMarked(const mirror::Object* s) const {
bool erased = false;
- typedef std::vector<const mirror::String*>::iterator It; // TODO: C++0x auto
- for (It it = expected_.begin(), end = expected_.end(); it != end; ++it) {
+ for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
expected_.erase(it);
erased = true;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 390a2fd..55c0765 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -538,12 +538,12 @@
void Dump(std::ostream& os) const {
bool first = true;
- for (It it = libraries_.begin(); it != libraries_.end(); ++it) {
+ for (const auto& library : libraries_) {
if (!first) {
os << ' ';
}
first = false;
- os << it->first;
+ os << library.first;
}
}
@@ -552,7 +552,7 @@
}
SharedLibrary* Get(const std::string& path) {
- It it = libraries_.find(path);
+ auto it = libraries_.find(path);
return (it == libraries_.end()) ? NULL : it->second;
}
@@ -566,8 +566,8 @@
std::string jni_short_name(JniShortName(m));
std::string jni_long_name(JniLongName(m));
const ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
- for (It it = libraries_.begin(); it != libraries_.end(); ++it) {
- SharedLibrary* library = it->second;
+ for (const auto& lib : libraries_) {
+ SharedLibrary* library = lib.second;
if (library->GetClassLoader() != declaring_class_loader) {
// We only search libraries loaded by the appropriate ClassLoader.
continue;
@@ -591,8 +591,6 @@
}
private:
- typedef SafeMap<std::string, SharedLibrary*>::const_iterator It; // TODO: C++0x auto
-
SafeMap<std::string, SharedLibrary*> libraries_;
};
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 4d8aa6f..224b2ba 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -49,7 +49,12 @@
}
inline uint32_t ArtMethod::GetDexMethodIndex() const {
+#ifdef ART_SEA_IR_MODE
+ // TODO: Re-add this check for (PORTABLE + SMALL + ) SEA IR when PORTABLE IS fixed!
+ // DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
+#else
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
+#endif
return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_dex_index_), false);
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 7301f23..5d4a6ea 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -442,6 +442,7 @@
static Class* java_lang_reflect_ArtMethod_;
+ private:
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 48c0569..ff193c9 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -979,9 +979,7 @@
void MonitorList::SweepMonitorList(IsMarkedTester is_marked, void* arg) {
MutexLock mu(Thread::Current(), monitor_list_lock_);
- typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
- It it = list_.begin();
- while (it != list_.end()) {
+ for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
if (!is_marked(m->GetObject(), arg)) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 061dfb8..4ee3533 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -253,14 +253,10 @@
return JNI_TRUE;
}
- gc::Heap* heap = runtime->GetHeap();
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
- // TODO: C++0x auto
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- if ((*it)->IsImageSpace()) {
+ for (const auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
// TODO: Ensure this works with multiple image spaces.
- const ImageHeader& image_header = (*it)->AsImageSpace()->GetImageHeader();
+ const ImageHeader& image_header = space->AsImageSpace()->GetImageHeader();
if (oat_file->GetOatHeader().GetImageFileLocationOatChecksum() != image_header.GetOatChecksum()) {
ScopedObjectAccess soa(env);
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index de64c26..8e23cbb 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -232,9 +232,8 @@
}
void ReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = entries_.begin(), end = entries_.end(); it != end; ++it) {
- visitor(*it, arg);
+ for (const auto& ref : entries_) {
+ visitor(ref, arg);
}
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 9c28c87..671924a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -53,8 +53,8 @@
}
bool ThreadList::Contains(pid_t tid) {
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- if ((*it)->tid_ == tid) {
+ for (const auto& thread : list_) {
+ if (thread->tid_ == tid) {
return true;
}
}
@@ -113,8 +113,8 @@
void ThreadList::DumpLocked(std::ostream& os) {
os << "DALVIK THREADS (" << list_.size() << "):\n";
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- (*it)->Dump(os);
+ for (const auto& thread : list_) {
+ thread->Dump(os);
os << "\n";
}
}
@@ -122,8 +122,7 @@
void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != ignore1 && thread != ignore2) {
CHECK(thread->IsSuspended())
<< "\nUnsuspended thread: <<" << *thread << "\n"
@@ -160,9 +159,7 @@
// Call a checkpoint function for each thread, threads which are suspend get their checkpoint
// manually called.
MutexLock mu(self, *Locks::thread_list_lock_);
- // TODO: C++0x auto.
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != self) {
for (;;) {
if (thread->RequestCheckpoint(checkpoint_function)) {
@@ -189,8 +186,7 @@
checkpoint_function->Run(self);
// Run the checkpoint on the suspended threads.
- for (size_t i = 0; i < suspended_count_modified_threads.size(); ++i) {
- Thread* thread = suspended_count_modified_threads[i];
+ for (const auto& thread : suspended_count_modified_threads) {
if (!thread->IsSuspended()) {
// Wait until the thread is suspended.
uint64_t start = NanoTime();
@@ -243,8 +239,7 @@
// Update global suspend all state for attaching threads.
++suspend_all_count_;
// Increment everybody's suspend count (except our own).
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self) {
continue;
}
@@ -285,8 +280,7 @@
// Update global suspend all state for attaching threads.
--suspend_all_count_;
// Decrement the suspend counts for all threads.
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self) {
continue;
}
@@ -341,8 +335,7 @@
++suspend_all_count_;
++debug_suspend_all_count_;
// Increment everybody's suspend count (except our own).
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self || thread == debug_thread) {
continue;
}
@@ -427,8 +420,7 @@
suspend_all_count_ -= debug_suspend_all_count_;
debug_suspend_all_count_ = 0;
// Update running threads.
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self || thread->debug_suspend_count_ == 0) {
continue;
}
@@ -457,8 +449,7 @@
}
all_threads_are_daemons = true;
MutexLock mu(self, *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != self && !thread->IsDaemon()) {
all_threads_are_daemons = false;
break;
@@ -476,8 +467,7 @@
MutexLock mu(self, *Locks::thread_list_lock_);
{ // Tell all the daemons it's time to suspend.
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
// This is only run after all non-daemon threads have exited, so the remainder should all be
// daemons.
CHECK(thread->IsDaemon()) << *thread;
@@ -491,8 +481,7 @@
for (int i = 0; i < 10; ++i) {
usleep(200 * 1000);
bool all_suspended = true;
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != self && thread->GetState() == kRunnable) {
if (!have_complained) {
LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
@@ -567,22 +556,22 @@
}
void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- callback(*it, context);
+ for (const auto& thread : list_) {
+ callback(thread, context);
}
}
void ThreadList::VisitRoots(RootVisitor* visitor, void* arg) const {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- (*it)->VisitRoots(visitor, arg);
+ for (const auto& thread : list_) {
+ thread->VisitRoots(visitor, arg);
}
}
void ThreadList::VerifyRoots(VerifyRootVisitor* visitor, void* arg) const {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- (*it)->VerifyRoots(visitor, arg);
+ for (const auto& thread : list_) {
+ thread->VerifyRoots(visitor, arg);
}
}
@@ -607,9 +596,9 @@
Thread* ThreadList::FindThreadByThinLockId(uint32_t thin_lock_id) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- if ((*it)->GetThinLockId() == thin_lock_id) {
- return *it;
+ for (const auto& thread : list_) {
+ if (thread->GetThinLockId() == thin_lock_id) {
+ return thread;
}
}
return NULL;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index d95f191..3df3e2c 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -102,8 +102,6 @@
Thread* FindThreadByThinLockId(uint32_t thin_lock_id);
private:
- typedef std::list<Thread*>::const_iterator It; // TODO: C++0x auto
-
uint32_t AllocThreadId(Thread* self);
void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 067ef2d..f7fdcfb 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -23,6 +23,8 @@
namespace art {
+static const bool kMeasureWaitTime = false;
+
ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name,
size_t stack_size)
: thread_pool_(thread_pool),
@@ -64,7 +66,7 @@
MutexLock mu(self, task_queue_lock_);
tasks_.push_back(task);
// If we have any waiters, signal one.
- if (waiting_count_ != 0) {
+ if (started_ && waiting_count_ != 0) {
task_queue_condition_.Signal(self);
}
}
@@ -129,11 +131,13 @@
// We may be done, lets broadcast to the completion condition.
completion_condition_.Broadcast(self);
}
- const uint64_t wait_start = NanoTime();
+ const uint64_t wait_start = kMeasureWaitTime ? NanoTime() : 0;
task_queue_condition_.Wait(self);
- const uint64_t wait_end = NanoTime();
- total_wait_time_ += wait_end - std::max(wait_start, start_time_);
- waiting_count_--;
+ if (kMeasureWaitTime) {
+ const uint64_t wait_end = NanoTime();
+ total_wait_time_ += wait_end - std::max(wait_start, start_time_);
+ }
+ --waiting_count_;
}
// We are shutting down, return NULL to tell the worker thread to stop looping.
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index c26926c..9c6d47b 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -55,6 +55,7 @@
const size_t stack_size_;
pthread_t pthread_;
+ private:
friend class ThreadPool;
DISALLOW_COPY_AND_ASSIGN(ThreadPoolWorker);
};
@@ -117,6 +118,7 @@
uint64_t total_wait_time_;
Barrier creation_barier_;
+ private:
friend class ThreadPoolWorker;
friend class WorkStealingWorker;
DISALLOW_COPY_AND_ASSIGN(ThreadPool);
@@ -153,6 +155,7 @@
WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
virtual void Run();
+ private:
friend class WorkStealingThreadPool;
DISALLOW_COPY_AND_ASSIGN(WorkStealingWorker);
};
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 7dbd35a..b15a0f6 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -632,12 +632,9 @@
}
}
-void Trace::DumpMethodList(std::ostream& os,
- const std::set<mirror::ArtMethod*>& visited_methods) {
- typedef std::set<mirror::ArtMethod*>::const_iterator It; // TODO: C++0x auto
+void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) {
MethodHelper mh;
- for (It it = visited_methods.begin(); it != visited_methods.end(); ++it) {
- mirror::ArtMethod* method = *it;
+ for (const auto& method : visited_methods) {
mh.ChangeMethod(method);
os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(),
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index dcc9f90..4d2f36f 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1048,7 +1048,7 @@
// Compute information for compiler.
if (Runtime::Current()->IsCompiler()) {
MethodReference ref(dex_file_, dex_method_idx_);
- bool compile = IsCandidateForCompilation(code_item_, method_access_flags_);
+ bool compile = IsCandidateForCompilation(ref, method_access_flags_);
if (compile) {
/* Generate a register map and add it to the method. */
UniquePtr<const std::vector<uint8_t> > map(GenerateGcMap());
@@ -4178,8 +4178,14 @@
return result;
}
-bool MethodVerifier::IsCandidateForCompilation(const DexFile::CodeItem* code_item,
+bool MethodVerifier::IsCandidateForCompilation(MethodReference& method_ref,
const uint32_t access_flags) {
+#ifdef ART_SEA_IR_MODE
+ bool use_sea = Runtime::Current()->IsSeaIRMode();
+ use_sea = use_sea && (std::string::npos != PrettyMethod(
+ method_ref.dex_method_index, *(method_ref.dex_file)).find("fibonacci"));
+ if (use_sea) return true;
+#endif
// Don't compile class initializers, ever.
if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) {
return false;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 6171943..d6bebc6 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -122,7 +122,7 @@
uint16_t registers_size, MethodVerifier* verifier);
RegisterLine* GetLine(size_t idx) {
- Table::iterator result = pc_to_register_line_.find(idx); // TODO: C++0x auto
+ auto result = pc_to_register_line_.find(idx);
if (result == pc_to_register_line_.end()) {
return NULL;
} else {
@@ -239,7 +239,7 @@
// Describe VRegs at the given dex pc.
std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
- static bool IsCandidateForCompilation(const DexFile::CodeItem* code_item,
+ static bool IsCandidateForCompilation(MethodReference& method_ref,
const uint32_t access_flags);
private:
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 8418928..25f840c 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -395,8 +395,7 @@
std::stringstream result;
std::set<uint16_t> types = GetMergedTypes();
result << "UnresolvedMergedReferences(";
- typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
- It it = types.begin();
+ auto it = types.begin();
result << reg_type_cache_->GetFromId(*it).Dump();
for (++it; it != types.end(); ++it) {
result << ", ";
@@ -609,9 +608,8 @@
types.insert(refs.second);
}
if (kIsDebugBuild) {
- typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
- for (It it = types.begin(); it != types.end(); ++it) {
- CHECK(!reg_type_cache_->GetFromId(*it).IsUnresolvedMergedReference());
+ for (const auto& type : types) {
+ CHECK(!reg_type_cache_->GetFromId(type).IsUnresolvedMergedReference());
}
}
return types;
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 33f4195..865ba20 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -287,6 +287,7 @@
friend class RegTypeCache;
+ private:
DISALLOW_COPY_AND_ASSIGN(RegType);
};
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 24a626b..5affe47 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -208,9 +208,8 @@
result += GetRegisterType(i).Dump();
result += "],";
}
- typedef std::deque<uint32_t>::const_iterator It; // TODO: C++0x auto
- for (It it = monitors_.begin(), end = monitors_.end(); it != end; ++it) {
- result += StringPrintf("{%d},", *it);
+ for (const auto& monitor : monitors_) {
+ result += StringPrintf("{%d},", monitor);
}
return result;
}
diff --git a/tools/cpplint.py b/tools/cpplint.py
index 0b5fb93..30b5216 100755
--- a/tools/cpplint.py
+++ b/tools/cpplint.py
@@ -53,12 +53,8 @@
# - Check for 0 in char context (should be '\0')
# - Check for camel-case method name conventions for methods
# that are not simple inline getters and setters
-# - Check that base classes have virtual destructors
-# put " // namespace" after } that closes a namespace, with
-# namespace's name after 'namespace' if it is named.
# - Do not indent namespace contents
# - Avoid inlining non-trivial constructors in header files
-# include base/basictypes.h if DISALLOW_EVIL_CONSTRUCTORS is used
# - Check for old-school (void) cast for call-sites of functions
# ignored return value
# - Check gUnit usage of anonymous namespace
@@ -80,6 +76,7 @@
"""
import codecs
+import copy
import getopt
import math # for log
import os
@@ -139,6 +136,22 @@
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
+
+ root=subdir
+ The root directory used for deriving header guard CPP variable.
+ By default, the header guard CPP variable is calculated as the relative
+ path to the directory that contains .git, .hg, or .svn. When this flag
+ is specified, the relative path is calculated from the specified
+ directory. If the specified directory does not exist, this flag is
+ ignored.
+
+ Examples:
+ Assuing that src/.git exists, the header guard CPP variables for
+ src/chrome/browser/ui/browser.h are:
+
+ No flag => CHROME_BROWSER_UI_BROWSER_H_
+ --root=chrome => BROWSER_UI_BROWSER_H_
+ --root=chrome/browser => UI_BROWSER_H_
"""
# We categorize each error message we print. Here are the categories.
@@ -161,6 +174,7 @@
'build/printf_format',
'build/storage_class',
'legal/copyright',
+ 'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
@@ -169,6 +183,7 @@
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
+ 'readability/namespace',
'readability/nolint',
'readability/streams',
'readability/todo',
@@ -189,13 +204,14 @@
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
- 'runtime/virtual',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
+ 'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
+ 'whitespace/forcolon',
'whitespace/indent',
'whitespace/labels',
'whitespace/line_length',
@@ -241,8 +257,9 @@
'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h',
'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h',
'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
- 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
- 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
+ 'stdiostream.h', 'streambuf', 'streambuf.h', 'stream.h', 'strfile.h',
+ 'string', 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo',
+ 'valarray',
])
@@ -278,6 +295,34 @@
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
+# Alternative tokens and their replacements. For full list, see section 2.5
+# Alternative tokens [lex.digraph] in the C++ standard.
+#
+# Digraphs (such as '%:') are not included here since it's a mess to
+# match those on a word boundary.
+_ALT_TOKEN_REPLACEMENT = {
+ 'and': '&&',
+ 'bitor': '|',
+ 'or': '||',
+ 'xor': '^',
+ 'compl': '~',
+ 'bitand': '&',
+ 'and_eq': '&=',
+ 'or_eq': '|=',
+ 'xor_eq': '^=',
+ 'not': '!',
+ 'not_eq': '!='
+ }
+
+# Compile regular expression that matches all the above keywords. The "[ =()]"
+# bit is meant to avoid matching these keywords outside of boolean expressions.
+#
+# False positives include C-style multi-line comments (http://go/nsiut )
+# and multi-line strings (http://go/beujw ), but those have always been
+# troublesome for cpplint.
+_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
+ r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
+
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
@@ -287,6 +332,17 @@
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
+# These constants define the current inline assembly state
+_NO_ASM = 0 # Outside of inline assembly block
+_INSIDE_ASM = 1 # Inside inline assembly block
+_END_ASM = 2 # Last line of inline assembly block
+_BLOCK_ASM = 3 # The whole block is an inline assembly block
+
+# Match start of assembly blocks
+_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
+ r'(?:\s+(volatile|__volatile__))?'
+ r'\s*[{(]')
+
_regexp_compile_cache = {}
@@ -297,6 +353,10 @@
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
+# The root directory used for deriving header guard CPP variable.
+# This is set by --root flag.
+_root = None
+
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
@@ -649,7 +709,6 @@
if not self.in_a_function:
return
# END android-added
-
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
@@ -736,7 +795,6 @@
return "art/" + fullname[len(prefix) + 1:]
# END android-changed
-
# Don't know what to do; header guard warnings may be wrong...
return fullname
@@ -825,6 +883,9 @@
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
+ elif _cpplint_state.output_format == 'eclipse':
+ sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
@@ -934,7 +995,7 @@
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
- 3) raw member contains all the lines without processing.
+ 3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
@@ -974,6 +1035,29 @@
return elided
+def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
+ """Find the position just after the matching endchar.
+
+ Args:
+ line: a CleansedLines line.
+ startpos: start searching at this position.
+ depth: nesting level at startpos.
+ startchar: expression opening character.
+ endchar: expression closing character.
+
+ Returns:
+ Index just after endchar.
+ """
+ for i in xrange(startpos, len(line)):
+ if line[i] == startchar:
+ depth += 1
+ elif line[i] == endchar:
+ depth -= 1
+ if depth == 0:
+ return i + 1
+ return -1
+
+
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [, finds the position that closes it.
@@ -1000,18 +1084,23 @@
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
- num_open = line.count(startchar) - line.count(endchar)
- while linenum < clean_lines.NumLines() and num_open > 0:
+ # Check first line
+ end_pos = FindEndOfExpressionInLine(line, pos, 0, startchar, endchar)
+ if end_pos > -1:
+ return (line, linenum, end_pos)
+ tail = line[pos:]
+ num_open = tail.count(startchar) - tail.count(endchar)
+ while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
- num_open += line.count(startchar) - line.count(endchar)
- # OK, now find the endchar that actually got us back to even
- endpos = len(line)
- while num_open >= 0:
- endpos = line.rfind(')', 0, endpos)
- num_open -= 1 # chopped off another )
- return (line, linenum, endpos + 1)
+ delta = line.count(startchar) - line.count(endchar)
+ if num_open + delta <= 0:
+ return (line, linenum,
+ FindEndOfExpressionInLine(line, 0, num_open, startchar, endchar))
+ num_open += delta
+ # Did not find endchar before end of file, give up
+ return (line, clean_lines.NumLines(), -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
@@ -1041,8 +1130,13 @@
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
+ filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
+
fileinfo = FileInfo(filename)
- return re.sub(r'[-./\s]', '_', fileinfo.RepositoryName()).upper() + '_'
+ file_path_from_root = fileinfo.RepositoryName()
+ if _root:
+ file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
+ return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
@@ -1206,6 +1300,7 @@
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
+ ('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
@@ -1266,17 +1361,55 @@
'Changing pointer instead of value (or unused value of operator*).')
-class _ClassInfo(object):
+class _BlockInfo(object):
+ """Stores information about a generic block of code."""
+
+ def __init__(self, seen_open_brace):
+ self.seen_open_brace = seen_open_brace
+ self.open_parentheses = 0
+ self.inline_asm = _NO_ASM
+
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text up to the opening brace.
+
+ This is mostly for checking the text after the class identifier
+ and the "{", usually where the base class is specified. For other
+ blocks, there isn't much to check, so we always pass.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ pass
+
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text after the closing brace.
+
+ This is mostly used for checking end of namespace comments.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ pass
+
+
+class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
- def __init__(self, name, clean_lines, linenum):
+ def __init__(self, name, class_or_struct, clean_lines, linenum):
+ _BlockInfo.__init__(self, False)
self.name = name
- self.linenum = linenum
- self.seen_open_brace = False
+ self.starting_linenum = linenum
self.is_derived = False
- self.virtual_method_linenumber = None
- self.has_virtual_destructor = False
- self.brace_depth = 0
+ if class_or_struct == 'struct':
+ self.access = 'public'
+ else:
+ self.access = 'private'
# Try to find the end of the class. This will be confused by things like:
# class A {
@@ -1286,26 +1419,324 @@
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
- line = clean_lines.lines[i]
+ line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ # Look for a bare ':'
+ if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
+ self.is_derived = True
-class _ClassState(object):
- """Holds the current state of the parse relating to class declarations.
- It maintains a stack of _ClassInfos representing the parser's guess
- as to the current nesting of class declarations. The innermost class
- is at the top (back) of the stack. Typically, the stack will either
- be empty or have exactly one entry.
- """
+class _NamespaceInfo(_BlockInfo):
+ """Stores information about a namespace."""
+
+ def __init__(self, name, linenum):
+ _BlockInfo.__init__(self, False)
+ self.name = name or ''
+ self.starting_linenum = linenum
+
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Check end of namespace comments."""
+ line = clean_lines.raw_lines[linenum]
+
+ # Check how many lines is enclosed in this namespace. Don't issue
+ # warning for missing namespace comments if there aren't enough
+ # lines. However, do apply checks if there is already an end of
+ # namespace comment and it's incorrect.
+ #
+ # TODO(unknown): We always want to check end of namespace comments
+ # if a namespace is large, but sometimes we also want to apply the
+ # check if a short namespace contained nontrivial things (something
+ # other than forward declarations). There is currently no logic on
+ # deciding what these nontrivial things are, so this check is
+ # triggered by namespace size only, which works most of the time.
+ if (linenum - self.starting_linenum < 10
+ and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
+ return
+
+ # Look for matching comment at end of namespace.
+ #
+ # Note that we accept C style "/* */" comments for terminating
+ # namespaces, so that code that terminate namespaces inside
+ # preprocessor macros can be cpplint clean. Example: http://go/nxpiz
+ #
+ # We also accept stuff like "// end of namespace <name>." with the
+ # period at the end.
+ #
+ # Besides these, we don't accept anything else, otherwise we might
+ # get false negatives when existing comment is a substring of the
+ # expected namespace. Example: http://go/ldkdc, http://cl/23548205
+ if self.name:
+ # Named namespace
+ if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
+ r'[\*/\.\\\s]*$'),
+ line):
+ error(filename, linenum, 'readability/namespace', 5,
+ 'Namespace should be terminated with "// namespace %s"' %
+ self.name)
+ else:
+ # Anonymous namespace
+ if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
+ error(filename, linenum, 'readability/namespace', 5,
+ 'Namespace should be terminated with "// namespace"')
+
+
+class _PreprocessorInfo(object):
+ """Stores checkpoints of nesting stacks when #if/#else is seen."""
+
+ def __init__(self, stack_before_if):
+ # The entire nesting stack before #if
+ self.stack_before_if = stack_before_if
+
+ # The entire nesting stack up to #else
+ self.stack_before_else = []
+
+ # Whether we have already seen #else or #elif
+ self.seen_else = False
+
+
+class _NestingState(object):
+ """Holds states related to parsing braces."""
def __init__(self):
- self.classinfo_stack = []
+ # Stack for tracking all braces. An object is pushed whenever we
+ # see a "{", and popped when we see a "}". Only 3 types of
+ # objects are possible:
+ # - _ClassInfo: a class or struct.
+ # - _NamespaceInfo: a namespace.
+ # - _BlockInfo: some other type of block.
+ self.stack = []
- def CheckFinished(self, filename, error):
+ # Stack of _PreprocessorInfo objects.
+ self.pp_stack = []
+
+ def SeenOpenBrace(self):
+ """Check if we have seen the opening brace for the innermost block.
+
+ Returns:
+ True if we have seen the opening brace, False if the innermost
+ block is still expecting an opening brace.
+ """
+ return (not self.stack) or self.stack[-1].seen_open_brace
+
+ def InNamespaceBody(self):
+ """Check if we are currently one level inside a namespace body.
+
+ Returns:
+ True if top of the stack is a namespace block, False otherwise.
+ """
+ return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
+
+ def UpdatePreprocessor(self, line):
+ """Update preprocessor stack.
+
+ We need to handle preprocessors due to classes like this:
+ #ifdef SWIG
+ struct ResultDetailsPageElementExtensionPoint {
+ #else
+ struct ResultDetailsPageElementExtensionPoint : public Extension {
+ #endif
+ (see http://go/qwddn for original example)
+
+ We make the following assumptions (good enough for most files):
+ - Preprocessor condition evaluates to true from #if up to first
+ #else/#elif/#endif.
+
+ - Preprocessor condition evaluates to false from #else/#elif up
+ to #endif. We still perform lint checks on these lines, but
+ these do not affect nesting stack.
+
+ Args:
+ line: current line to check.
+ """
+ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
+ # Beginning of #if block, save the nesting stack here. The saved
+ # stack will allow us to restore the parsing state in the #else case.
+ self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
+ elif Match(r'^\s*#\s*(else|elif)\b', line):
+ # Beginning of #else block
+ if self.pp_stack:
+ if not self.pp_stack[-1].seen_else:
+ # This is the first #else or #elif block. Remember the
+ # whole nesting stack up to this point. This is what we
+ # keep after the #endif.
+ self.pp_stack[-1].seen_else = True
+ self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
+
+ # Restore the stack to how it was before the #if
+ self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
+ else:
+ # TODO(unknown): unexpected #else, issue warning?
+ pass
+ elif Match(r'^\s*#\s*endif\b', line):
+ # End of #if or #else blocks.
+ if self.pp_stack:
+ # If we saw an #else, we will need to restore the nesting
+ # stack to its former state before the #else, otherwise we
+ # will just continue from where we left off.
+ if self.pp_stack[-1].seen_else:
+ # Here we can just use a shallow copy since we are the last
+ # reference to it.
+ self.stack = self.pp_stack[-1].stack_before_else
+ # Drop the corresponding #if
+ self.pp_stack.pop()
+ else:
+ # TODO(unknown): unexpected #endif, issue warning?
+ pass
+
+ def Update(self, filename, clean_lines, linenum, error):
+ """Update nesting state with current line.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Update pp_stack first
+ self.UpdatePreprocessor(line)
+
+ # Count parentheses. This is to avoid adding struct arguments to
+ # the nesting stack.
+ if self.stack:
+ inner_block = self.stack[-1]
+ depth_change = line.count('(') - line.count(')')
+ inner_block.open_parentheses += depth_change
+
+ # Also check if we are starting or ending an inline assembly block.
+ if inner_block.inline_asm in (_NO_ASM, _END_ASM):
+ if (depth_change != 0 and
+ inner_block.open_parentheses == 1 and
+ _MATCH_ASM.match(line)):
+ # Enter assembly block
+ inner_block.inline_asm = _INSIDE_ASM
+ else:
+ # Not entering assembly block. If previous line was _END_ASM,
+ # we will now shift to _NO_ASM state.
+ inner_block.inline_asm = _NO_ASM
+ elif (inner_block.inline_asm == _INSIDE_ASM and
+ inner_block.open_parentheses == 0):
+ # Exit assembly block
+ inner_block.inline_asm = _END_ASM
+
+ # Consume namespace declaration at the beginning of the line. Do
+ # this in a loop so that we catch same line declarations like this:
+ # namespace proto2 { namespace bridge { class MessageSet; } }
+ while True:
+ # Match start of namespace. The "\b\s*" below catches namespace
+ # declarations even if it weren't followed by a whitespace, this
+ # is so that we don't confuse our namespace checker. The
+ # missing spaces will be flagged by CheckSpacing.
+ namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
+ if not namespace_decl_match:
+ break
+
+ new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
+ self.stack.append(new_namespace)
+
+ line = namespace_decl_match.group(2)
+ if line.find('{') != -1:
+ new_namespace.seen_open_brace = True
+ line = line[line.find('{') + 1:]
+
+ # Look for a class declaration in whatever is left of the line
+ # after parsing namespaces. The regexp accounts for decorated classes
+ # such as in:
+ # class LOCKABLE API Object {
+ # };
+ #
+ # Templates with class arguments may confuse the parser, for example:
+ # template <class T
+ # class Comparator = less<T>,
+ # class Vector = vector<T> >
+ # class HeapQueue {
+ #
+ # Because this parser has no nesting state about templates, by the
+ # time it saw "class Comparator", it may think that it's a new class.
+ # Nested templates have a similar problem:
+ # template <
+ # typename ExportedType,
+ # typename TupleType,
+ # template <typename, typename> class ImplTemplate>
+ #
+ # To avoid these cases, we ignore classes that are followed by '=' or '>'
+ class_decl_match = Match(
+ r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
+ '(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
+ '(([^=>]|<[^<>]*>)*)$', line)
+ if (class_decl_match and
+ (not self.stack or self.stack[-1].open_parentheses == 0)):
+ self.stack.append(_ClassInfo(
+ class_decl_match.group(4), class_decl_match.group(2),
+ clean_lines, linenum))
+ line = class_decl_match.group(5)
+
+ # If we have not yet seen the opening brace for the innermost block,
+ # run checks here.
+ if not self.SeenOpenBrace():
+ self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
+
+ # Update access control if we are inside a class/struct
+ if self.stack and isinstance(self.stack[-1], _ClassInfo):
+ access_match = Match(r'\s*(public|private|protected)\s*:', line)
+ if access_match:
+ self.stack[-1].access = access_match.group(1)
+
+ # Consume braces or semicolons from what's left of the line
+ while True:
+ # Match first brace, semicolon, or closed parenthesis.
+ matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
+ if not matched:
+ break
+
+ token = matched.group(1)
+ if token == '{':
+ # If namespace or class hasn't seen a opening brace yet, mark
+ # namespace/class head as complete. Push a new block onto the
+ # stack otherwise.
+ if not self.SeenOpenBrace():
+ self.stack[-1].seen_open_brace = True
+ else:
+ self.stack.append(_BlockInfo(True))
+ if _MATCH_ASM.match(line):
+ self.stack[-1].inline_asm = _BLOCK_ASM
+ elif token == ';' or token == ')':
+ # If we haven't seen an opening brace yet, but we already saw
+ # a semicolon, this is probably a forward declaration. Pop
+ # the stack for these.
+ #
+ # Similarly, if we haven't seen an opening brace yet, but we
+ # already saw a closing parenthesis, then these are probably
+ # function arguments with extra "class" or "struct" keywords.
+ # Also pop these stack for these.
+ if not self.SeenOpenBrace():
+ self.stack.pop()
+ else: # token == '}'
+ # Perform end of block checks and pop the stack.
+ if self.stack:
+ self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
+ self.stack.pop()
+ line = matched.group(2)
+
+ def InnermostClass(self):
+ """Get class info on the top of the stack.
+
+ Returns:
+ A _ClassInfo object if we are inside a class, or None otherwise.
+ """
+ for i in range(len(self.stack), 0, -1):
+ classinfo = self.stack[i - 1]
+ if isinstance(classinfo, _ClassInfo):
+ return classinfo
+ return None
+
+ def CheckClassFinished(self, filename, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
@@ -1313,17 +1744,18 @@
filename: The name of the current file.
error: The function to call with any errors found.
"""
- if self.classinfo_stack:
- # Note: This test can result in false positives if #ifdef constructs
- # get in the way of brace matching. See the testBuildClass test in
- # cpplint_unittest.py for an example of this.
- error(filename, self.classinfo_stack[0].linenum, 'build/class', 5,
- 'Failed to find complete declaration of class %s' %
- self.classinfo_stack[0].name)
+ # Note: This test can result in false positives if #ifdef constructs
+ # get in the way of brace matching. See the testBuildClass test in
+ # cpplint_unittest.py for an example of this.
+ for obj in self.stack:
+ if isinstance(obj, _ClassInfo):
+ error(filename, obj.starting_linenum, 'build/class', 5,
+ 'Failed to find complete declaration of class %s' %
+ obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
- class_state, error):
+ nesting_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
@@ -1336,8 +1768,6 @@
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- - classes with virtual methods need virtual destructors (compiler warning
- available, but not turned on yet.)
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
@@ -1347,8 +1777,8 @@
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
- class_state: A _ClassState instance which maintains information about
- the current stack of nested class declarations being parsed.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
@@ -1377,7 +1807,7 @@
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
- r'\s+(auto|register|static|extern|typedef)\b',
+ r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
@@ -1407,45 +1837,13 @@
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
- # Track class entry and exit, and attempt to find cases within the
- # class declaration that don't meet the C++ style
- # guidelines. Tracking is very dependent on the code matching Google
- # style guidelines, but it seems to perform well enough in testing
- # to be a worthwhile addition to the checks.
- classinfo_stack = class_state.classinfo_stack
- # Look for a class declaration. The regexp accounts for decorated classes
- # such as in:
- # class LOCKABLE API Object {
- # };
- class_decl_match = Match(
- r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
- '(class|struct)\s+([A-Z_]+\s+)*(\w+(::\w+)*)', line)
- if class_decl_match:
- classinfo_stack.append(_ClassInfo(
- class_decl_match.group(4), clean_lines, linenum))
-
- # Everything else in this function uses the top of the stack if it's
- # not empty.
- if not classinfo_stack:
+ # Everything else in this function operates on class declarations.
+ # Return early if the top of the nesting stack is not a class, or if
+ # the class head is not completed yet.
+ classinfo = nesting_state.InnermostClass()
+ if not classinfo or not classinfo.seen_open_brace:
return
- classinfo = classinfo_stack[-1]
-
- # If the opening brace hasn't been seen look for it and also
- # parent class declarations.
- if not classinfo.seen_open_brace:
- # If the line has a ';' in it, assume it's a forward declaration or
- # a single-line class declaration, which we won't process.
- if line.find(';') != -1:
- classinfo_stack.pop()
- return
- classinfo.seen_open_brace = (line.find('{') != -1)
- # Look for a bare ':'
- if Search('(^|[^:]):($|[^:])', line):
- classinfo.is_derived = True
- if not classinfo.seen_open_brace:
- return # Everything else in this function is for after open brace
-
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
@@ -1462,35 +1860,6 @@
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
- # Look for methods declared virtual.
- if Search(r'\bvirtual\b', line):
- classinfo.virtual_method_linenumber = linenum
- # Only look for a destructor declaration on the same line. It would
- # be extremely unlikely for the destructor declaration to occupy
- # more than one line.
- if Search(r'~%s\s*\(' % base_classname, line):
- classinfo.has_virtual_destructor = True
-
- # Look for class end.
- brace_depth = classinfo.brace_depth
- brace_depth = brace_depth + line.count('{') - line.count('}')
- if brace_depth <= 0:
- classinfo = classinfo_stack.pop()
- # Try to detect missing virtual destructor declarations.
- # For now, only warn if a non-derived class with virtual methods lacks
- # a virtual destructor. This is to make it less likely that people will
- # declare derived virtual destructors without declaring the base
- # destructor virtual.
- if ((classinfo.virtual_method_linenumber is not None) and
- (not classinfo.has_virtual_destructor) and
- (not classinfo.is_derived)): # Only warn for base classes
- error(filename, classinfo.linenum, 'runtime/virtual', 4,
- 'The class %s probably needs a virtual destructor due to '
- 'having virtual method(s), one declared at line %d.'
- % (classinfo.name, classinfo.virtual_method_linenumber))
- else:
- classinfo.brace_depth = brace_depth
-
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
@@ -1545,7 +1914,8 @@
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
- not Search(r'#\s*define|typedef', fncall)):
+ not Search(r'#\s*define|typedef', fncall) and
+ not Search(r'\w\s+\((\w+::)?\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
@@ -1678,8 +2048,165 @@
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
+def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
+ """Checks for improper use of DISALLOW* macros.
-def CheckSpacing(filename, clean_lines, linenum, error):
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
+ r'DISALLOW_EVIL_CONSTRUCTORS|'
+ r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
+ if not matched:
+ return
+ if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
+ if nesting_state.stack[-1].access != 'private':
+ error(filename, linenum, 'readability/constructors', 3,
+ '%s must be in the private: section' % matched.group(1))
+
+ else:
+ # Found DISALLOW* macro outside a class declaration, or perhaps it
+ # was used inside a function when it should have been part of the
+ # class declaration. We could issue a warning here, but it
+ # probably resulted in a compiler error already.
+ pass
+
+
+def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
+ """Find the corresponding > to close a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_suffix: Remainder of the current line after the initial <.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_suffix
+ nesting_stack = ['<']
+ while True:
+ # Find the next operator that can tell us whether < is used as an
+ # opening bracket or as a less-than operator. We only want to
+ # warn on the latter case.
+ #
+ # We could also check all other operators and terminate the search
+ # early, e.g. if we got something like this "a<b+c", the "<" is
+ # most likely a less-than operator, but then we will get false
+ # positives for default arguments (e.g. http://go/prccd) and
+ # other template expressions (e.g. http://go/oxcjq).
+ match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(1)
+ line = match.group(2)
+
+ if nesting_stack[-1] == '<':
+ # Expecting closing angle bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator == '>':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma after a bracket, this is most likely a template
+ # argument. We have not seen a closing angle bracket yet, but
+ # it's probably a few lines later if we look for it, so just
+ # return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting closing parenthesis or closing bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator in (')', ']'):
+ # We don't bother checking for matching () or []. If we got
+ # something like (] or [), it would have been a syntax error.
+ nesting_stack.pop()
+
+ else:
+ # Scan the next line
+ linenum += 1
+ if linenum >= len(clean_lines.elided):
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all remaining lines and still no matching angle bracket.
+ # Most likely the input was incomplete, otherwise we should have
+ # seen a semicolon and returned early.
+ return True
+
+
+def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
+ """Find the corresponding < that started a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_prefix: Part of the current line before the initial >.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_prefix
+ nesting_stack = ['>']
+ while True:
+ # Find the previous operator
+ match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(2)
+ line = match.group(1)
+
+ if nesting_stack[-1] == '>':
+ # Expecting opening angle bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator == '<':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma before a bracket, this is most likely a
+ # template argument. The opening angle bracket is probably
+ # there if we look for it, so just return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting opening parenthesis or opening bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator in ('(', '['):
+ nesting_stack.pop()
+
+ else:
+ # Scan the previous line
+ linenum -= 1
+ if linenum < 0:
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all earlier lines and still no matching angle bracket.
+ return False
+
+
+def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
@@ -1692,6 +2219,8 @@
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
@@ -1701,7 +2230,16 @@
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
- if IsBlankLine(line):
+ #
+ # Skip all the blank line checks if we are immediately inside a
+ # namespace body. In other words, don't issue blank line warnings
+ # for this block:
+ # namespace {
+ #
+ # }
+ #
+ # A warning about missing end of namespace comments will be issued instead.
+ if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
@@ -1709,8 +2247,7 @@
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
- if (prevbrace != -1 and prev_line[prevbrace:].find('}') == -1
- and prev_line[:prevbrace].find('namespace') == -1):
+ if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
@@ -1742,12 +2279,7 @@
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
- # This doesn't ignore whitespace at the end of a namespace block
- # because that is too hard without pairing open/close braces;
- # however, a special exception is made for namespace closing
- # brackets which have a comment containing "namespace".
- #
- # Also, ignore blank lines at the end of a block in a long if-else
+ # Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
@@ -1759,7 +2291,6 @@
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
- and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
@@ -1820,26 +2351,59 @@
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
- # Alas, we can't test < or > because they're legitimately used sans spaces
- # (a->b, vector<int> a). The only time we can tell is a < with no >, and
- # only if it's not template params list spilling into the next line.
+ #
+ # Check <= and >= first to avoid false positives with < and >, then
+ # check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
- if not match:
- # Note that while it seems that the '<[^<]*' term in the following
- # regexp could be simplified to '<.*', which would indeed match
- # the same class of strings, the [^<] means that searching for the
- # regexp takes linear rather than quadratic time.
- if not Search(r'<[^<]*,\s*$', line): # template params spill
- match = Search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
- # We allow no-spaces around << and >> when used like this: 10<<20, but
+ # We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
- match = Search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
+ match = Search(r'(\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
+ if match and not (match.group(1).isdigit() and match.group(2).isdigit()):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <<')
+ elif not Match(r'#.*include', line):
+ # Avoid false positives on ->
+ reduced_line = line.replace('->', '')
+
+ # Look for < that is not surrounded by spaces. This is only
+ # triggered if both sides are missing spaces, even though
+ # technically should should flag if at least one side is missing a
+ # space. This is done to avoid some false positives with shifts.
+ match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
+ if (match and
+ not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <')
+
+ # Look for > that is not surrounded by spaces. Similar to the
+ # above, we only trigger if both sides are missing spaces to avoid
+ # false positives with shifts.
+ match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
+ if (match and
+ not FindPreviousMatchingAngleBracket(clean_lines, linenum,
+ match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >')
+
+ # We allow no-spaces around >> for almost anything. This is because
+ # C++11 allows ">>" to close nested templates, which accounts for
+ # most cases when ">>" is not followed by a space.
+ #
+ # We still warn on ">>" followed by alpha character, because that is
+ # likely due to ">>" being used for right shifts, e.g.:
+ # value >> alpha
+ #
+ # When ">>" is used to close templates, the alphanumeric letter that
+ # follows would be part of an identifier, and there should still be
+ # a space separating the template type and the identifier.
+ # type<type<type>> alpha
+ match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around %s' % match.group(1))
+ 'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
@@ -1913,16 +2477,23 @@
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
- 'Semicolon defining empty statement. Use { } instead.')
+ 'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
- 'use { } instead.')
+ 'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
- 'statement, use { } instead.')
+ 'statement, use {} instead.')
+
+ # In range-based for, we wanted spaces before and after the colon, but
+ # not around "::" tokens that might appear.
+ if (Search('for *\(.*[^:]:[^: ]', line) or
+ Search('for *\(.*[^: ]:[^:]', line)):
+ error(filename, linenum, 'whitespace/forcolon', 2,
+ 'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
@@ -1948,8 +2519,8 @@
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
- if (class_info.last_line - class_info.linenum <= 24 or
- linenum <= class_info.linenum):
+ if (class_info.last_line - class_info.starting_linenum <= 24 or
+ linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
@@ -1960,15 +2531,18 @@
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
+ # Also ignores cases where the previous line ends with a backslash as can be
+ # common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
- not Search(r'\b(class|struct)\b', prev_line)):
+ not Search(r'\b(class|struct)\b', prev_line) and
+ not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
- end_class_head = class_info.linenum
- for i in range(class_info.linenum, linenum):
+ end_class_head = class_info.starting_linenum
+ for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
@@ -2018,9 +2592,11 @@
# which is commonly used to control the lifetime of
# stack-allocated variables. We don't detect this perfectly: we
# just don't complain if the last non-whitespace character on the
- # previous non-blank line is ';', ':', '{', or '}'.
+ # previous non-blank line is ';', ':', '{', or '}', or if the previous
+ # line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if not Search(r'[;:}{]\s*$', prevline):
+ if (not Search(r'[;:}{]\s*$', prevline) and
+ not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
@@ -2074,6 +2650,33 @@
"You don't need a ; after a }")
+def CheckEmptyLoopBody(filename, clean_lines, linenum, error):
+ """Loop for empty loop body with only a single semicolon.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Search for loop keywords at the beginning of the line. Because only
+ # whitespaces are allowed before the keywords, this will also ignore most
+ # do-while-loops, since those lines should start with closing brace.
+ line = clean_lines.elided[linenum]
+ if Match(r'\s*(for|while)\s*\(', line):
+ # Find the end of the conditional expression
+ (end_line, end_linenum, end_pos) = CloseExpression(
+ clean_lines, linenum, line.find('('))
+
+ # Output warning if what follows the condition expression is a semicolon.
+ # No warning for all other cases, including whitespace or newline, since we
+ # have a separate check for semicolons preceded by whitespace.
+ if end_pos >= 0 and Match(r';', end_line[end_pos:]):
+ error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
+ 'Empty loop bodies should use {} or continue')
+
+
def ReplaceableCheck(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
@@ -2142,6 +2745,38 @@
break
+def CheckAltTokens(filename, clean_lines, linenum, error):
+ """Check alternative keywords being used in boolean expressions.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Avoid preprocessor lines
+ if Match(r'^\s*#', line):
+ return
+
+ # Last ditch effort to avoid multi-line comments. This will not help
+ # if the comment started before the current line or ended after the
+ # current line, but it catches most of the false positives. At least,
+ # it provides a way to workaround this warning for people who use
+ # multi-line comments in preprocessor macros.
+ #
+ # TODO(unknown): remove this once cpplint has better support for
+ # multi-line comments.
+ if line.find('/*') >= 0 or line.find('*/') >= 0:
+ return
+
+ for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
+ error(filename, linenum, 'readability/alt_tokens', 2,
+ 'Use operator %s instead of %s' % (
+ _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
+
+
def GetLineWidth(line):
"""Determines the width of the line in column positions.
@@ -2164,7 +2799,7 @@
return len(line)
-def CheckStyle(filename, clean_lines, linenum, file_extension, class_state,
+def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
@@ -2177,6 +2812,8 @@
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
@@ -2258,16 +2895,19 @@
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
- error(filename, linenum, 'whitespace/newline', 4,
+ error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
- CheckSpacing(filename, clean_lines, linenum, error)
+ CheckEmptyLoopBody(filename, clean_lines, linenum, error)
+ CheckAccess(filename, clean_lines, linenum, nesting_state, error)
+ CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
- if class_state and class_state.classinfo_stack:
- CheckSectionSpacing(filename, clean_lines,
- class_state.classinfo_stack[-1], linenum, error)
+ CheckAltTokens(filename, clean_lines, linenum, error)
+ classinfo = nesting_state.InnermostClass()
+ if classinfo:
+ CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
@@ -2564,9 +3204,11 @@
fnline))):
# We allow non-const references in a few standard places, like functions
- # called "swap()" or iostream operators like "<<" or ">>".
+ # called "swap()" or iostream operators like "<<" or ">>". We also filter
+ # out for loops, which lint otherwise mistakenly thinks are functions.
if not Search(
- r'(swap|Swap|operator[<>][<>])\s*\(\s*(?:[\w:]|<.*>)+\s*&',
+ r'(for|swap|Swap|operator[<>][<>])\s*\(\s*'
+ r'(?:(?:typename\s*)?[\w:]|<.*>)+\s*&',
fnline):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
@@ -2578,7 +3220,7 @@
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
- r'(int|float|double|bool|char|u?int(8|16|32|64)_t)\([^)]', line) # TODO(enh): upstream change to handle all stdint types.
+ r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if match:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
@@ -2588,14 +3230,23 @@
if (match.group(1) is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Match(r'^\s*MockCallback<.*>', line))):
- error(filename, linenum, 'readability/casting', 4,
- 'Using deprecated casting style. '
- 'Use static_cast<%s>(...) instead' %
- match.group(2))
+ # Try a bit harder to catch gmock lines: the only place where
+ # something looks like an old-style cast is where we declare the
+ # return type of the mocked method, and the only time when we
+ # are missing context is if MOCK_METHOD was split across
+ # multiple lines (for example http://go/hrfhr ), so we only need
+ # to check the previous line for MOCK_METHOD.
+ if (linenum == 0 or
+ not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(\S+,\s*$',
+ clean_lines.elided[linenum - 1])):
+ error(filename, linenum, 'readability/casting', 4,
+ 'Using deprecated casting style. '
+ 'Use static_cast<%s>(...) instead' %
+ match.group(2))
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
- r'\((int|float|double|bool|char|u?int(8|16|32|64))\)', error) # TODO(enh): upstream change to handle all stdint types.
+ r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
@@ -2713,7 +3364,7 @@
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
- if match:
+ if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
@@ -2834,6 +3485,11 @@
'Using sizeof(type). Use sizeof(varname) instead if possible')
return True
+ # operator++(int) and operator--(int)
+ if (line[0:match.start(1) - 1].endswith(' operator++') or
+ line[0:match.start(1) - 1].endswith(' operator--')):
+ return False
+
remainder = line[match.end(0):]
# The close paren is for function pointers as arguments to a function.
@@ -3122,13 +3778,13 @@
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
- 'Omit template arguments from make_pair OR use pair directly OR'
- ' if appropriate, construct a pair directly')
+ 'For C++11-compatibility, omit template arguments from make_pair'
+ ' OR use pair directly OR if appropriate, construct a pair directly')
-def ProcessLine(filename, file_extension,
- clean_lines, line, include_state, function_state,
- class_state, error, extra_check_functions=[]):
+def ProcessLine(filename, file_extension, clean_lines, line,
+ include_state, function_state, nesting_state, error,
+ extra_check_functions=[]):
"""Processes a single line in the file.
Args:
@@ -3139,8 +3795,8 @@
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
- class_state: A _ClassState instance which maintains information about
- the current stack of nested class declarations being parsed.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
@@ -3149,13 +3805,16 @@
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
+ nesting_state.Update(filename, clean_lines, line, error)
+ if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
+ return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
- CheckStyle(filename, clean_lines, line, file_extension, class_state, error)
+ CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
- class_state, error)
+ nesting_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
@@ -3182,7 +3841,7 @@
include_state = _IncludeState()
function_state = _FunctionState()
- class_state = _ClassState()
+ nesting_state = _NestingState()
ResetNolintSuppressions()
@@ -3195,9 +3854,9 @@
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
- include_state, function_state, class_state, error,
+ include_state, function_state, nesting_state, error,
extra_check_functions)
- class_state.CheckFinished(filename, error)
+ nesting_state.CheckClassFinished(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
@@ -3312,7 +3971,8 @@
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'stdout', # TODO(enh): added --stdout
'counting=',
- 'filter='])
+ 'filter=',
+ 'root='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
@@ -3328,8 +3988,8 @@
elif opt == '--stdout': # TODO(enh): added --stdout
output_stream = sys.stdout # TODO(enh): added --stdout
elif opt == '--output':
- if not val in ('emacs', 'vs7'):
- PrintUsage('The only allowed output formats are emacs and vs7.')
+ if not val in ('emacs', 'vs7', 'eclipse'):
+ PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
@@ -3341,6 +4001,9 @@
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
+ elif opt == '--root':
+ global _root
+ _root = val
if not filenames:
PrintUsage('No files were specified.')
@@ -3349,7 +4012,6 @@
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
-
sys.stderr = output_stream # TODO(enh): added --stdout
return filenames