Merge "Clear (madvise) card table for CC"
diff --git a/Android.mk b/Android.mk
index 2647268..b2716cd 100644
--- a/Android.mk
+++ b/Android.mk
@@ -568,3 +568,11 @@
# m art-boot-image ART_BOOT_IMAGE_EXTRA_ARGS=--dump-init-failures=fails.txt
.PHONY: art-boot-image
art-boot-image: $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME)
+
+.PHONY: art-job-images
+art-job-images: \
+ $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \
+ $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \
+ $(HOST_OUT_EXECUTABLES)/dex2oats \
+ $(HOST_OUT_EXECUTABLES)/dex2oatds \
+ $(HOST_OUT_EXECUTABLES)/profman
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 9e94b9d..8fdf6fc 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -260,7 +260,8 @@
OatWriter* const oat_writer = oat_writers[i].get();
ElfWriter* const elf_writer = elf_writers[i].get();
std::vector<const DexFile*> cur_dex_files(1u, class_path[i]);
- oat_writer->PrepareLayout(driver, writer.get(), cur_dex_files, &patcher);
+ oat_writer->Initialize(driver, writer.get(), cur_dex_files);
+ oat_writer->PrepareLayout(&patcher);
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
elf_writer->PrepareDynamicSection(rodata_size,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index ffeff76..fd1b135 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -203,7 +203,8 @@
}
linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
instruction_set_features_.get());
- oat_writer.PrepareLayout(compiler_driver_.get(), nullptr, dex_files, &patcher);
+ oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files);
+ oat_writer.PrepareLayout(&patcher);
size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer.GetOatSize() - rodata_size;
elf_writer->PrepareDynamicSection(rodata_size,
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 52134e8..6cbca7a 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -299,6 +299,7 @@
vdex_size_(0u),
vdex_dex_files_offset_(0u),
vdex_verifier_deps_offset_(0u),
+ vdex_quickening_info_offset_(0u),
oat_size_(0u),
bss_start_(0u),
bss_size_(0u),
@@ -314,6 +315,8 @@
size_dex_file_(0),
size_verifier_deps_(0),
size_verifier_deps_alignment_(0),
+ size_quickening_info_(0),
+ size_quickening_info_alignment_(0),
size_interpreter_to_interpreter_bridge_(0),
size_interpreter_to_compiled_code_bridge_(0),
size_jni_dlsym_lookup_(0),
@@ -519,15 +522,9 @@
return true;
}
-void OatWriter::PrepareLayout(const CompilerDriver* compiler,
- ImageWriter* image_writer,
- const std::vector<const DexFile*>& dex_files,
- linker::MultiOatRelativePatcher* relative_patcher) {
+void OatWriter::PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher) {
CHECK(write_state_ == WriteState::kPrepareLayout);
- compiler_driver_ = compiler;
- image_writer_ = image_writer;
- dex_files_ = &dex_files;
relative_patcher_ = relative_patcher;
SetMultiOatRelativePatcherAdjustment();
@@ -706,9 +703,10 @@
class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
public:
- InitCodeMethodVisitor(OatWriter* writer, size_t offset)
+ InitCodeMethodVisitor(OatWriter* writer, size_t offset, size_t quickening_info_offset)
: OatDexMethodVisitor(writer, offset),
- debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()) {
+ debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()),
+ current_quickening_info_offset_(quickening_info_offset) {
writer_->absolute_patch_locations_.reserve(
writer_->compiler_driver_->GetNonRelativeLinkerPatchCount());
}
@@ -726,6 +724,9 @@
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+ if (it.GetMethodCodeItem() != nullptr) {
+ current_quickening_info_offset_ += sizeof(uint32_t);
+ }
if (compiled_method != nullptr) {
// Derived from CompiledMethod.
uint32_t quick_code_offset = 0;
@@ -771,15 +772,28 @@
DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
uint32_t vmap_table_offset = method_header->vmap_table_offset_;
- // If we don't have quick code, then we must have a vmap, as that is how the dex2dex
- // compiler records its transformations.
- DCHECK(!quick_code.empty() || vmap_table_offset != 0);
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
uint32_t code_offset = quick_code_offset - thumb_offset;
- if (vmap_table_offset != 0u && code_offset != 0u) {
- vmap_table_offset += code_offset;
- DCHECK_LT(vmap_table_offset, code_offset) << "Overflow in oat offsets";
+ if (!compiled_method->GetQuickCode().empty()) {
+ // If the code is compiled, we write the offset of the stack map relative
+ // to the code,
+ if (vmap_table_offset != 0u) {
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
+ } else {
+ if (kIsVdexEnabled) {
+ // We write the offset in the .vdex file.
+ DCHECK_EQ(vmap_table_offset, 0u);
+ vmap_table_offset = current_quickening_info_offset_;
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ current_quickening_info_offset_ += map.size() * sizeof(map.front());
+ } else {
+ // We write the offset of the quickening info relative to the code.
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
}
uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
@@ -878,6 +892,9 @@
// Cache of compiler's --debuggable option.
const bool debuggable_;
+
+ // Offset in the vdex file for the quickening info.
+ uint32_t current_quickening_info_offset_;
};
class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
@@ -893,21 +910,25 @@
if (compiled_method != nullptr) {
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
- DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
+ // If vdex is enabled, we only emit the stack map of compiled code. The quickening info will
+ // be in the vdex file.
+ if (!compiled_method->GetQuickCode().empty() || !kIsVdexEnabled) {
+ DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
- ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
- uint32_t map_size = map.size() * sizeof(map[0]);
- if (map_size != 0u) {
- size_t offset = dedupe_map_.GetOrCreate(
- map.data(),
- [this, map_size]() {
- uint32_t new_offset = offset_;
- offset_ += map_size;
- return new_offset;
- });
- // Code offset is not initialized yet, so set the map offset to 0u-offset.
- DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
- oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ uint32_t map_size = map.size() * sizeof(map[0]);
+ if (map_size != 0u) {
+ size_t offset = dedupe_map_.GetOrCreate(
+ map.data(),
+ [this, map_size]() {
+ uint32_t new_offset = offset_;
+ offset_ += map_size;
+ return new_offset;
+ });
+ // Code offset is not initialized yet, so set the map offset to 0u-offset.
+ DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
+ oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
+ }
}
++method_offsets_index_;
}
@@ -1372,7 +1393,10 @@
<< compiled_method->GetVmapTable().size() << " " << map_offset << " "
<< dex_file_->PrettyMethod(it.GetMemberIndex());
- if (map_offset != 0u) {
+ // If vdex is enabled, only emit the map for compiled code. The quickening info
+ // is emitted in the vdex already.
+ if (map_offset != 0u &&
+ !(kIsVdexEnabled && compiled_method->GetQuickCode().empty())) {
// Transform map_offset to actual oat data offset.
map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset;
DCHECK_NE(map_offset, 0u);
@@ -1539,21 +1563,18 @@
}
size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
- #define VISIT(VisitorType) \
- do { \
- VisitorType visitor(this, offset); \
- bool success = VisitDexMethods(&visitor); \
- DCHECK(success); \
- offset = visitor.GetOffset(); \
- } while (false)
+ InitCodeMethodVisitor code_visitor(this, offset, vdex_quickening_info_offset_);
+ bool success = VisitDexMethods(&code_visitor);
+ DCHECK(success);
+ offset = code_visitor.GetOffset();
- VISIT(InitCodeMethodVisitor);
if (HasImage()) {
- VISIT(InitImageMethodVisitor);
+ InitImageMethodVisitor image_visitor(this, offset);
+ success = VisitDexMethods(&image_visitor);
+ DCHECK(success);
+ offset = image_visitor.GetOffset();
}
- #undef VISIT
-
return offset;
}
@@ -1626,6 +1647,90 @@
return true;
}
+class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
+ public:
+ WriteQuickeningInfoMethodVisitor(OatWriter* writer, OutputStream* out, uint32_t offset)
+ : DexMethodVisitor(writer, offset),
+ out_(out),
+ written_bytes_(0u) {}
+
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
+ const ClassDataItemIterator& it) {
+ if (it.GetMethodCodeItem() == nullptr) {
+ // No CodeItem. Native or abstract method.
+ return true;
+ }
+
+ uint32_t method_idx = it.GetMemberIndex();
+ CompiledMethod* compiled_method =
+ writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
+
+ uint32_t length = 0;
+ const uint8_t* data = nullptr;
+ // VMap only contains quickening info if this method is not compiled.
+ if (compiled_method != nullptr && compiled_method->GetQuickCode().empty()) {
+ ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
+ data = map.data();
+ length = map.size() * sizeof(map.front());
+ }
+
+ if (!out_->WriteFully(&length, sizeof(length)) ||
+ !out_->WriteFully(data, length)) {
+ PLOG(ERROR) << "Failed to write quickening info for "
+ << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to " << out_->GetLocation();
+ return false;
+ }
+ offset_ += sizeof(length) + length;
+ written_bytes_ += sizeof(length) + length;
+ return true;
+ }
+
+ size_t GetNumberOfWrittenBytes() const {
+ return written_bytes_;
+ }
+
+ private:
+ OutputStream* const out_;
+ size_t written_bytes_;
+};
+
+bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
+ if (!kIsVdexEnabled) {
+ return true;
+ }
+
+ size_t initial_offset = vdex_size_;
+ size_t start_offset = RoundUp(initial_offset, 4u);
+
+ vdex_size_ = start_offset;
+ vdex_quickening_info_offset_ = vdex_size_;
+ size_quickening_info_alignment_ = start_offset - initial_offset;
+
+ off_t actual_offset = vdex_out->Seek(start_offset, kSeekSet);
+ if (actual_offset != static_cast<off_t>(start_offset)) {
+ PLOG(ERROR) << "Failed to seek to quickening info section. Actual: " << actual_offset
+ << " Expected: " << start_offset
+ << " Output: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ WriteQuickeningInfoMethodVisitor visitor(this, vdex_out, start_offset);
+ if (!VisitDexMethods(&visitor)) {
+ PLOG(ERROR) << "Failed to write the vdex quickening info. File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ if (!vdex_out->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ size_quickening_info_ = visitor.GetNumberOfWrittenBytes();
+ vdex_size_ += size_quickening_info_;
+ return true;
+}
+
bool OatWriter::WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps) {
if (!kIsVdexEnabled) {
return true;
@@ -1717,6 +1822,8 @@
DO_STAT(size_dex_file_);
DO_STAT(size_verifier_deps_);
DO_STAT(size_verifier_deps_alignment_);
+ DO_STAT(size_quickening_info_);
+ DO_STAT(size_quickening_info_alignment_);
DO_STAT(size_interpreter_to_interpreter_bridge_);
DO_STAT(size_interpreter_to_compiled_code_bridge_);
DO_STAT(size_jni_dlsym_lookup_);
@@ -2434,9 +2541,11 @@
DCHECK_NE(vdex_verifier_deps_offset_, 0u);
size_t dex_section_size = vdex_verifier_deps_offset_ - vdex_dex_files_offset_;
- size_t verifier_deps_section_size = vdex_size_ - vdex_verifier_deps_offset_;
+ size_t verifier_deps_section_size = vdex_quickening_info_offset_ - vdex_verifier_deps_offset_;
+ size_t quickening_info_section_size = vdex_size_ - vdex_quickening_info_offset_;
- VdexFile::Header vdex_header(dex_section_size, verifier_deps_section_size);
+ VdexFile::Header vdex_header(
+ dex_section_size, verifier_deps_section_size, quickening_info_section_size);
if (!vdex_out->WriteFully(&vdex_header, sizeof(VdexFile::Header))) {
PLOG(ERROR) << "Failed to write vdex header. File: " << vdex_out->GetLocation();
return false;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 1cc193b..3d08ad3 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -118,6 +118,10 @@
// - AddRawDexFileSource().
// Then the user must call in order
// - WriteAndOpenDexFiles()
+ // - Initialize()
+ // - WriteVerifierDeps()
+ // - WriteQuickeningInfo()
+ // - WriteVdexHeader()
// - PrepareLayout(),
// - WriteRodata(),
// - WriteCode(),
@@ -154,14 +158,20 @@
bool verify,
/*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
+ bool WriteQuickeningInfo(OutputStream* vdex_out);
bool WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps);
bool WriteVdexHeader(OutputStream* vdex_out);
+ // Initialize the writer with the given parameters.
+ void Initialize(const CompilerDriver* compiler,
+ ImageWriter* image_writer,
+ const std::vector<const DexFile*>& dex_files) {
+ compiler_driver_ = compiler;
+ image_writer_ = image_writer;
+ dex_files_ = &dex_files;
+ }
// Prepare layout of remaining data.
- void PrepareLayout(const CompilerDriver* compiler,
- ImageWriter* image_writer,
- const std::vector<const DexFile*>& dex_files,
- linker::MultiOatRelativePatcher* relative_patcher);
+ void PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher);
// Write the rest of .rodata section (ClassOffsets[], OatClass[], maps).
bool WriteRodata(OutputStream* out);
// Write the code to the .text section.
@@ -239,6 +249,7 @@
class InitImageMethodVisitor;
class WriteCodeMethodVisitor;
class WriteMapMethodVisitor;
+ class WriteQuickeningInfoMethodVisitor;
// Visit all the methods in all the compiled dex files in their definition order
// with a given DexMethodVisitor.
@@ -325,6 +336,9 @@
// Offset of section holding VerifierDeps inside Vdex.
size_t vdex_verifier_deps_offset_;
+ // Offset of section holding quickening info inside Vdex.
+ size_t vdex_quickening_info_offset_;
+
// Size required for Oat data structures.
size_t oat_size_;
@@ -368,6 +382,8 @@
uint32_t size_dex_file_;
uint32_t size_verifier_deps_;
uint32_t size_verifier_deps_alignment_;
+ uint32_t size_quickening_info_;
+ uint32_t size_quickening_info_alignment_;
uint32_t size_interpreter_to_interpreter_bridge_;
uint32_t size_interpreter_to_compiled_code_bridge_;
uint32_t size_jni_dlsym_lookup_;
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index d938a70..fd1db59 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -311,8 +311,7 @@
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(string);
} else {
- // FIXME: Disabled because of BSS root visiting issues. Bug: 32124939
- // desired_load_kind = HLoadString::LoadKind::kBssEntry;
+ desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
}
}
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 61b7f08..1e71d06 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2830,7 +2830,7 @@
void Thumb2Assembler::clrex(Condition cond) {
CheckCondition(cond);
- int32_t encoding = B31 | B30 | B29 | B27 | B28 | B25 | B24 | B23 |
+ int32_t encoding = B31 | B30 | B29 | B28 | B25 | B24 | B23 |
B21 | B20 |
0xf << 16 |
B15 |
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index d0799d6..30e8f4e 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -207,6 +207,13 @@
DriverStr(expected, "strexd");
}
+TEST_F(AssemblerThumb2Test, clrex) {
+ __ clrex();
+
+ const char* expected = "clrex\n";
+ DriverStr(expected, "clrex");
+}
+
TEST_F(AssemblerThumb2Test, LdrdStrd) {
__ ldrd(arm::R0, arm::Address(arm::R2, 8));
__ ldrd(arm::R0, arm::Address(arm::R12));
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0ce1362..8bbe685 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1777,6 +1777,14 @@
}
}
+ // Initialize the writers with the compiler driver, image writer, and their
+ // dex files. The writers were created without those being there yet.
+ for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
+ std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
+ std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
+ oat_writer->Initialize(driver_.get(), image_writer_.get(), dex_files);
+ }
+
{
TimingLogger::ScopedTiming t2("dex2oat Write VDEX", timings_);
DCHECK(IsBootImage() || oat_files_.size() == 1u);
@@ -1791,6 +1799,11 @@
return false;
}
+ if (!oat_writers_[i]->WriteQuickeningInfo(vdex_out.get())) {
+ LOG(ERROR) << "Failed to write quickening info into VDEX " << vdex_file->GetPath();
+ return false;
+ }
+
// VDEX finalized, seek back to the beginning and write the header.
if (!oat_writers_[i]->WriteVdexHeader(vdex_out.get())) {
LOG(ERROR) << "Failed to write vdex header into VDEX " << vdex_file->GetPath();
@@ -1799,15 +1812,14 @@
}
}
- linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
{
TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
+ linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
std::unique_ptr<ElfWriter>& elf_writer = elf_writers_[i];
std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i];
- std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
- oat_writer->PrepareLayout(driver_.get(), image_writer_.get(), dex_files, &patcher);
+ oat_writer->PrepareLayout(&patcher);
size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
size_t text_size = oat_writer->GetOatSize() - rodata_size;
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 15b6e17..30de28e 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1581,10 +1581,15 @@
/*
* Dumps the requested sections of the file.
*/
-static void processDexFile(const char* fileName, const DexFile* pDexFile) {
+static void processDexFile(const char* fileName,
+ const DexFile* pDexFile, size_t i, size_t n) {
if (gOptions.verbose) {
- fprintf(gOutFile, "Opened '%s', DEX version '%.3s'\n",
- fileName, pDexFile->GetHeader().magic_ + 4);
+ fputs("Opened '", gOutFile);
+ fputs(fileName, gOutFile);
+ if (n > 1) {
+ fprintf(gOutFile, ":%s", DexFile::GetMultiDexClassesDexName(i).c_str());
+ }
+ fprintf(gOutFile, "', DEX version '%.3s'\n", pDexFile->GetHeader().magic_ + 4);
}
// Headers.
@@ -1642,8 +1647,8 @@
if (gOptions.checksumOnly) {
fprintf(gOutFile, "Checksum verified\n");
} else {
- for (size_t i = 0; i < dex_files.size(); i++) {
- processDexFile(fileName, dex_files[i].get());
+ for (size_t i = 0, n = dex_files.size(); i < n; i++) {
+ processDexFile(fileName, dex_files[i].get(), i, n);
}
}
return 0;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c7bf231..4e81d50 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -64,6 +64,7 @@
#include "string_reference.h"
#include "thread_list.h"
#include "type_lookup_table.h"
+#include "vdex_file.h"
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
@@ -1029,13 +1030,19 @@
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable());
}
- uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
+ uint32_t vmap_table_offset = method_header == nullptr ? 0 : method_header->vmap_table_offset_;
vios->Stream() << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
- if (vmap_table_offset > oat_file_.Size()) {
+
+ size_t vmap_table_offset_limit =
+ (kIsVdexEnabled && IsMethodGeneratedByDexToDexCompiler(oat_method, code_item))
+ ? oat_file_.GetVdexFile()->Size()
+ : method_header->GetCode() - oat_file_.Begin();
+ if (vmap_table_offset >= vmap_table_offset_limit) {
vios->Stream() << StringPrintf("WARNING: "
"vmap table offset 0x%08x is past end of file 0x%08zx. "
"vmap table offset was loaded from offset 0x%08x.\n",
- vmap_table_offset, oat_file_.Size(),
+ vmap_table_offset,
+ vmap_table_offset_limit,
oat_method.GetVmapTableOffsetOffset());
success = false;
} else if (options_.dump_vmap_) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 3065f68..c550a1b 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -468,7 +468,18 @@
if (!found || (oat_method.GetQuickCode() != nullptr)) {
return nullptr;
}
- return oat_method.GetVmapTable();
+ if (kIsVdexEnabled) {
+ const OatQuickMethodHeader* header = oat_method.GetOatQuickMethodHeader();
+ // OatMethod without a header: no quickening table.
+ if (header == nullptr) {
+ return nullptr;
+ }
+ // The table is in the .vdex file.
+ const OatFile::OatDexFile* oat_dex_file = GetDexCache()->GetDexFile()->GetOatDexFile();
+ return oat_dex_file->GetOatFile()->DexBegin() + header->vmap_table_offset_;
+ } else {
+ return oat_method.GetVmapTable();
+ }
}
const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index a1ce30b..5399dc5 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -277,7 +277,7 @@
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o.Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("field operation on invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_object)),
java_object);
return false;
}
@@ -632,17 +632,17 @@
bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
- found_kind = GetIndirectRefKind(obj);
+ found_kind = IndirectReferenceTable::GetIndirectRefKind(obj);
if (found_kind == kHandleScopeOrInvalid && self->HandleScopeContains(obj)) {
found_kind = kLocal;
}
} else {
- found_kind = GetIndirectRefKind(obj);
+ found_kind = IndirectReferenceTable::GetIndirectRefKind(obj);
}
if (obj != nullptr && found_kind != expected_kind) {
AbortF("expected reference of kind %s but found %s: %p",
- ToStr<IndirectRefKind>(expected_kind).c_str(),
- ToStr<IndirectRefKind>(GetIndirectRefKind(obj)).c_str(),
+ GetIndirectRefKindString(expected_kind),
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(obj)),
obj);
return false;
}
@@ -773,7 +773,7 @@
// Either java_object is invalid or is a cleared weak.
IndirectRef ref = reinterpret_cast<IndirectRef>(java_object);
bool okay;
- if (GetIndirectRefKind(ref) != kWeakGlobal) {
+ if (IndirectReferenceTable::GetIndirectRefKind(ref) != kWeakGlobal) {
okay = false;
} else {
obj = soa.Vm()->DecodeWeakGlobal(soa.Self(), ref);
@@ -781,8 +781,10 @@
}
if (!okay) {
AbortF("%s is an invalid %s: %p (%p)",
- what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
- java_object, obj.Ptr());
+ what,
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_object)),
+ java_object,
+ obj.Ptr());
return false;
}
}
@@ -790,8 +792,10 @@
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj.Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("%s is an invalid %s: %p (%p)",
- what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
- java_object, obj.Ptr());
+ what,
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_object)),
+ java_object,
+ obj.Ptr());
return false;
}
@@ -1116,8 +1120,9 @@
if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a.Ptr()))) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("jarray is an invalid %s: %p (%p)",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
- java_array, a.Ptr());
+ GetIndirectRefKindString(IndirectReferenceTable::GetIndirectRefKind(java_array)),
+ java_array,
+ a.Ptr());
return false;
} else if (!a->IsArrayInstance()) {
AbortF("jarray argument has non-array type: %s", a->PrettyTypeOf().c_str());
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cea8377..439849b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1180,8 +1180,7 @@
<< resolved_types << " is not in image starting at "
<< reinterpret_cast<void*>(header_.GetImageBegin());
if (!is_copied || in_image_space) {
- // Go through the array so that we don't need to do a slow map lookup.
- method->SetDexCacheResolvedTypes(*reinterpret_cast<GcRoot<mirror::Class>**>(resolved_types),
+ method->SetDexCacheResolvedTypes(method->GetDexCache()->GetResolvedTypes(),
kRuntimePointerSize);
}
}
@@ -1197,8 +1196,7 @@
<< resolved_methods << " is not in image starting at "
<< reinterpret_cast<void*>(header_.GetImageBegin());
if (!is_copied || in_image_space) {
- // Go through the array so that we don't need to do a slow map lookup.
- method->SetDexCacheResolvedMethods(*reinterpret_cast<ArtMethod***>(resolved_methods),
+ method->SetDexCacheResolvedMethods(method->GetDexCache()->GetResolvedMethods(),
kRuntimePointerSize);
}
}
@@ -1283,7 +1281,7 @@
}
// Only add the classes to the class loader after the points where we can return false.
for (size_t i = 0; i < num_dex_caches; i++) {
- ObjPtr<mirror::DexCache> const dex_cache = dex_caches->Get(i);
+ ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
const DexFile* const dex_file = dex_cache->GetDexFile();
const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) {
@@ -1333,10 +1331,6 @@
DCHECK(types[j].IsNull());
}
std::copy_n(image_resolved_types, num_types, types);
- // Store a pointer to the new location for fast ArtMethod patching without requiring map.
- // This leaves random garbage at the start of the dex cache array, but nobody should ever
- // read from it again.
- *reinterpret_cast<GcRoot<mirror::Class>**>(image_resolved_types) = types;
dex_cache->SetResolvedTypes(types);
}
if (num_methods != 0u) {
@@ -1347,8 +1341,6 @@
DCHECK(methods[j] == nullptr);
}
std::copy_n(image_resolved_methods, num_methods, methods);
- // Store a pointer to the new location for fast ArtMethod patching without requiring map.
- *reinterpret_cast<ArtMethod***>(image_resolved_methods) = methods;
dex_cache->SetResolvedMethods(methods);
}
if (num_fields != 0u) {
@@ -1391,7 +1383,11 @@
/*allow_failure*/true);
CHECK(existing_dex_cache == nullptr);
StackHandleScope<1> hs3(self);
- RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache));
+ Handle<mirror::DexCache> h_dex_cache = hs3.NewHandle(dex_cache);
+ RegisterDexFileLocked(*dex_file, h_dex_cache);
+ if (kIsDebugBuild) {
+ dex_cache.Assign(h_dex_cache.Get()); // Update dex_cache, used below in debug build.
+ }
}
if (kIsDebugBuild) {
CHECK(new_class_set != nullptr);
@@ -1781,6 +1777,12 @@
<< reinterpret_cast<const void*>(section_end);
}
}
+ if (!oat_file->GetBssGcRoots().empty()) {
+ // Insert oat file to class table for visiting .bss GC roots.
+ class_table->InsertOatFile(oat_file);
+ }
+ } else {
+ DCHECK(oat_file->GetBssGcRoots().empty());
}
if (added_class_table) {
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -3242,6 +3244,10 @@
WriterMutexLock mu(self, dex_lock_);
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
+ // Another thread managed to initialize the dex cache faster, so use that DexCache.
+ // If this thread encountered OOME, ignore it.
+ DCHECK_EQ(h_dex_cache.Get() == nullptr, self->IsExceptionPending());
+ self->ClearException();
return dex_cache.Ptr();
}
if (h_dex_cache.Get() == nullptr) {
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 97c0abd..b44104e 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -170,14 +170,27 @@
const DexFile* dex_file = ObjPtr<mirror::DexCache>::DownCast(obj)->GetDexFile();
if (dex_file != nullptr && dex_file->GetOatDexFile() != nullptr) {
const OatFile* oat_file = dex_file->GetOatDexFile()->GetOatFile();
- if (!oat_file->GetBssGcRoots().empty() && !ContainsElement(oat_files_, oat_file)) {
- oat_files_.push_back(oat_file);
+ if (!oat_file->GetBssGcRoots().empty()) {
+ InsertOatFileLocked(oat_file); // Ignore return value.
}
}
}
return true;
}
+bool ClassTable::InsertOatFile(const OatFile* oat_file) {
+ WriterMutexLock mu(Thread::Current(), lock_);
+ return InsertOatFileLocked(oat_file);
+}
+
+bool ClassTable::InsertOatFileLocked(const OatFile* oat_file) {
+ if (ContainsElement(oat_files_, oat_file)) {
+ return false;
+ }
+ oat_files_.push_back(oat_file);
+ return true;
+}
+
size_t ClassTable::WriteToMemory(uint8_t* ptr) const {
ReaderMutexLock mu(Thread::Current(), lock_);
ClassSet combined;
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 1344990..bc9eaf4 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -141,6 +141,11 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Return true if we inserted the oat file, false if it already exists.
+ bool InsertOatFile(const OatFile* oat_file)
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
REQUIRES(!lock_)
@@ -168,6 +173,11 @@
private:
void InsertWithoutLocks(ObjPtr<mirror::Class> klass) NO_THREAD_SAFETY_ANALYSIS;
+ // Return true if we inserted the oat file, false if it already exists.
+ bool InsertOatFileLocked(const OatFile* oat_file)
+ REQUIRES(lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Lock to guard inserting and removing.
mutable ReaderWriterMutex lock_;
// We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 4d47b83..d438418 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -75,6 +75,10 @@
!dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
DCHECK(class_loader != nullptr); // We do not use .bss GC roots for boot image.
+ DCHECK(
+ !class_loader->GetClassTable()->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
+ << "Oat file with .bss GC roots was not registered in class table: "
+ << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
// Note that we emit the barrier before the compiled code stores the string as GC root.
// This is OK as there is no suspend point point in between.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 330c742..670dadc 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -15,13 +15,18 @@
*/
#include "art_method-inl.h"
+#include "base/casts.h"
#include "entrypoints/entrypoint_utils-inl.h"
+#include "indirect_reference_table.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
namespace art {
+static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
+static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
+
template <bool kDynamicFast>
static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
@@ -45,7 +50,7 @@
extern uint32_t JniMethodFastStart(Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
DCHECK(env != nullptr);
- uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
if (kIsDebugBuild) {
@@ -60,7 +65,7 @@
extern uint32_t JniMethodStart(Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
DCHECK(env != nullptr);
- uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
if (!native_method->IsFastNative()) {
@@ -117,7 +122,7 @@
env->CheckNoHeldMonitors();
}
env->locals.SetSegmentState(env->local_ref_cookie);
- env->local_ref_cookie = saved_local_ref_cookie;
+ env->local_ref_cookie = bit_cast<IRTSegmentState>(saved_local_ref_cookie);
self->PopHandleScope();
}
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e357fa6..9c634fa 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -43,15 +43,15 @@
iref));
return false;
}
- const int topIndex = segment_state_.parts.topIndex;
- int idx = ExtractIndex(iref);
- if (UNLIKELY(idx >= topIndex)) {
+ const uint32_t top_index = segment_state_.top_index;
+ uint32_t idx = ExtractIndex(iref);
+ if (UNLIKELY(idx >= top_index)) {
std::string msg = StringPrintf(
"JNI ERROR (app bug): accessed stale %s %p (index %d in a table of size %d)",
GetIndirectRefKindString(kind_),
iref,
idx,
- topIndex);
+ top_index);
AbortIfNoCheckJNI(msg);
return false;
}
@@ -68,7 +68,9 @@
}
// Make sure that the entry at "idx" is correctly paired with "iref".
-inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
+inline bool IndirectReferenceTable::CheckEntry(const char* what,
+ IndirectRef iref,
+ uint32_t idx) const {
IndirectRef checkRef = ToIndirectRef(idx);
if (UNLIKELY(checkRef != iref)) {
std::string msg = StringPrintf(
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 7389c73..bc0f73a 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -32,6 +32,7 @@
namespace art {
static constexpr bool kDumpStackOnNonLocalReference = false;
+static constexpr bool kDebugIRT = false;
const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
switch (kind) {
@@ -60,9 +61,13 @@
IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
IndirectRefKind desired_kind,
+ ResizableCapacity resizable,
std::string* error_msg)
- : kind_(desired_kind),
- max_entries_(max_count) {
+ : segment_state_(kIRTFirstSegment),
+ kind_(desired_kind),
+ max_entries_(max_count),
+ current_num_holes_(0),
+ resizable_(resizable) {
CHECK(error_msg != nullptr);
CHECK_NE(desired_kind, kHandleScopeOrInvalid);
@@ -78,60 +83,209 @@
} else {
table_ = nullptr;
}
- segment_state_.all = IRT_FIRST_SEGMENT;
+ segment_state_ = kIRTFirstSegment;
}
IndirectReferenceTable::~IndirectReferenceTable() {
}
+void IndirectReferenceTable::ConstexprChecks() {
+ // Use this for some assertions. They can't be put into the header as C++ wants the class
+ // to be complete.
+
+ // Check kind.
+ static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
+ "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
+ "Kind encoding error");
+ static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
+ "Kind encoding error");
+
+ // Check serial.
+ static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
+ static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
+
+ // Table index.
+ static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
+ static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
+}
+
bool IndirectReferenceTable::IsValid() const {
return table_mem_map_.get() != nullptr;
}
-IndirectRef IndirectReferenceTable::Add(uint32_t cookie, ObjPtr<mirror::Object> obj) {
- IRTSegmentState prevState;
- prevState.all = cookie;
- size_t topIndex = segment_state_.parts.topIndex;
+// Holes:
+//
+// To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
+// operation sequences. For simplicity and lower memory overhead, we do not use a free list or
+// similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
+// are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
+// scans when there are no holes, the number of known holes should be tracked.
+//
+// A previous implementation stored the top index and the number of holes as the segment state.
+// This constraints the maximum number of references to 16-bit. We want to relax this, as it
+// is easy to require more references (e.g., to list all classes in large applications). Thus,
+// the implicitly stack-stored state, the IRTSegmentState, is only the top index.
+//
+// Thus, hole count is a local property of the current segment, and needs to be recovered when
+// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
+// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
+// hole count is correct.
+//
+// To be able to detect segment changes, we require an additional local field that can describe
+// the known segment. This is last_known_previous_state_. The requirement will become clear with
+// the following (some non-trivial) cases that have to be supported:
+//
+// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+// reference
+// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+// reference
+//
+// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
+// segment changes above. The condition is simply that the last known state is greater than or
+// equal to the current previous state, and smaller than the current state (top index). The
+// condition is conservative as it adds O(1) overhead to operations on an empty segment.
+
+static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
+ size_t count = 0;
+ for (size_t index = from; index != to; ++index) {
+ if (table[index].GetReference()->IsNull()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
+ if (last_known_previous_state_.top_index >= segment_state_.top_index ||
+ last_known_previous_state_.top_index < prev_state.top_index) {
+ const size_t top_index = segment_state_.top_index;
+ size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
+
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ Recovered holes: "
+ << " Current prev=" << prev_state.top_index
+ << " Current top_index=" << top_index
+ << " Old num_holes=" << current_num_holes_
+ << " New num_holes=" << count;
+ }
+
+ current_num_holes_ = count;
+ last_known_previous_state_ = prev_state;
+ } else if (kDebugIRT) {
+ LOG(INFO) << "No need to recover holes";
+ }
+}
+
+ALWAYS_INLINE
+static inline void CheckHoleCount(IrtEntry* table,
+ size_t exp_num_holes,
+ IRTSegmentState prev_state,
+ IRTSegmentState cur_state) {
+ if (kIsDebugBuild) {
+ size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
+ CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
+ << " topIndex=" << cur_state.top_index;
+ }
+}
+
+bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
+ CHECK_GT(new_size, max_entries_);
+
+ const size_t table_bytes = new_size * sizeof(IrtEntry);
+ std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
+ nullptr,
+ table_bytes,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ error_msg));
+ if (new_map == nullptr) {
+ return false;
+ }
+
+ memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size());
+ table_mem_map_ = std::move(new_map);
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+ max_entries_ = new_size;
+
+ return true;
+}
+
+IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
+ ObjPtr<mirror::Object> obj) {
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
+ << " top_index=" << segment_state_.top_index
+ << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+ << " holes=" << current_num_holes_;
+ }
+
+ size_t top_index = segment_state_.top_index;
CHECK(obj != nullptr);
VerifyObject(obj);
DCHECK(table_ != nullptr);
- DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
- if (topIndex == max_entries_) {
- LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
- << "(max=" << max_entries_ << ")\n"
- << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+ if (top_index == max_entries_) {
+ if (resizable_ == ResizableCapacity::kNo) {
+ LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+ << "(max=" << max_entries_ << ")\n"
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+ UNREACHABLE();
+ }
+
+ // Try to double space.
+ std::string error_msg;
+ if (!Resize(max_entries_ * 2, &error_msg)) {
+ LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow "
+ << "(max=" << max_entries_ << ")" << std::endl
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this)
+ << " Resizing failed: " << error_msg;
+ UNREACHABLE();
+ }
}
+ RecoverHoles(previous_state);
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
// We know there's enough room in the table. Now we just need to find
// the right spot. If there's a hole, find it and fill it; otherwise,
// add to the end of the list.
IndirectRef result;
- int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
size_t index;
- if (numHoles > 0) {
- DCHECK_GT(topIndex, 1U);
+ if (current_num_holes_ > 0) {
+ DCHECK_GT(top_index, 1U);
// Find the first hole; likely to be near the end of the list.
- IrtEntry* pScan = &table_[topIndex - 1];
- DCHECK(!pScan->GetReference()->IsNull());
- --pScan;
- while (!pScan->GetReference()->IsNull()) {
- DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
- --pScan;
+ IrtEntry* p_scan = &table_[top_index - 1];
+ DCHECK(!p_scan->GetReference()->IsNull());
+ --p_scan;
+ while (!p_scan->GetReference()->IsNull()) {
+ DCHECK_GE(p_scan, table_ + previous_state.top_index);
+ --p_scan;
}
- index = pScan - table_;
- segment_state_.parts.numHoles--;
+ index = p_scan - table_;
+ current_num_holes_--;
} else {
// Add to the end.
- index = topIndex++;
- segment_state_.parts.topIndex = topIndex;
+ index = top_index++;
+ segment_state_.top_index = top_index;
}
table_[index].Add(obj);
result = ToIndirectRef(index);
- if ((false)) {
- LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex
- << " holes=" << segment_state_.parts.numHoles;
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
+ << " holes=" << current_num_holes_;
}
DCHECK(result != nullptr);
@@ -156,14 +310,18 @@
// This method is not called when a local frame is popped; this is only used
// for explicit single removals.
// Returns "false" if nothing was removed.
-bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
- IRTSegmentState prevState;
- prevState.all = cookie;
- int topIndex = segment_state_.parts.topIndex;
- int bottomIndex = prevState.parts.topIndex;
+bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
+ << " top_index=" << segment_state_.top_index
+ << " last_known_prev_top_index=" << last_known_previous_state_.top_index
+ << " holes=" << current_num_holes_;
+ }
+
+ const uint32_t top_index = segment_state_.top_index;
+ const uint32_t bottom_index = previous_state.top_index;
DCHECK(table_ != nullptr);
- DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
auto* self = Thread::Current();
@@ -180,21 +338,24 @@
return true;
}
}
- const int idx = ExtractIndex(iref);
- if (idx < bottomIndex) {
+ const uint32_t idx = ExtractIndex(iref);
+ if (idx < bottom_index) {
// Wrong segment.
LOG(WARNING) << "Attempt to remove index outside index area (" << idx
- << " vs " << bottomIndex << "-" << topIndex << ")";
+ << " vs " << bottom_index << "-" << top_index << ")";
return false;
}
- if (idx >= topIndex) {
+ if (idx >= top_index) {
// Bad --- stale reference?
LOG(WARNING) << "Attempt to remove invalid index " << idx
- << " (bottom=" << bottomIndex << " top=" << topIndex << ")";
+ << " (bottom=" << bottom_index << " top=" << top_index << ")";
return false;
}
- if (idx == topIndex - 1) {
+ RecoverHoles(previous_state);
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+
+ if (idx == top_index - 1) {
// Top-most entry. Scan up and consume holes.
if (!CheckEntry("remove", iref, idx)) {
@@ -202,28 +363,30 @@
}
*table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
- int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
- if (numHoles != 0) {
- while (--topIndex > bottomIndex && numHoles != 0) {
- if ((false)) {
- LOG(INFO) << "+++ checking for hole at " << topIndex - 1
- << " (cookie=" << cookie << ") val="
- << table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>();
+ if (current_num_holes_ != 0) {
+ uint32_t collapse_top_index = top_index;
+ while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
+ if (kDebugIRT) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
+ << " (previous_state=" << bottom_index << ") val="
+ << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
}
- if (!table_[topIndex - 1].GetReference()->IsNull()) {
+ if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
break;
}
- if ((false)) {
- LOG(INFO) << "+++ ate hole at " << (topIndex - 1);
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
}
- numHoles--;
+ current_num_holes_--;
}
- segment_state_.parts.numHoles = numHoles + prevState.parts.numHoles;
- segment_state_.parts.topIndex = topIndex;
+ segment_state_.top_index = collapse_top_index;
+
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
} else {
- segment_state_.parts.topIndex = topIndex-1;
- if ((false)) {
- LOG(INFO) << "+++ ate last entry " << topIndex - 1;
+ segment_state_.top_index = top_index - 1;
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ ate last entry " << top_index - 1;
}
}
} else {
@@ -238,9 +401,10 @@
}
*table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
- segment_state_.parts.numHoles++;
- if ((false)) {
- LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
+ current_num_holes_++;
+ CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
+ if (kDebugIRT) {
+ LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
}
}
@@ -278,4 +442,14 @@
ReferenceTable::Dump(os, entries);
}
+void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
+ if (kDebugIRT) {
+ LOG(INFO) << "Setting segment state: "
+ << segment_state_.top_index
+ << " -> "
+ << new_state.top_index;
+ }
+ segment_state_ = new_state;
+}
+
} // namespace art
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 363280a..7e452a2 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -20,8 +20,10 @@
#include <stdint.h>
#include <iosfwd>
+#include <limits>
#include <string>
+#include "base/bit_utils.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "gc_root.h"
@@ -40,165 +42,118 @@
class MemMap;
-/*
- * Maintain a table of indirect references. Used for local/global JNI
- * references.
- *
- * The table contains object references that are part of the GC root set.
- * When an object is added we return an IndirectRef that is not a valid
- * pointer but can be used to find the original value in O(1) time.
- * Conversions to and from indirect references are performed on upcalls
- * and downcalls, so they need to be very fast.
- *
- * To be efficient for JNI local variable storage, we need to provide
- * operations that allow us to operate on segments of the table, where
- * segments are pushed and popped as if on a stack. For example, deletion
- * of an entry should only succeed if it appears in the current segment,
- * and we want to be able to strip off the current segment quickly when
- * a method returns. Additions to the table must be made in the current
- * segment even if space is available in an earlier area.
- *
- * A new segment is created when we call into native code from interpreted
- * code, or when we handle the JNI PushLocalFrame function.
- *
- * The GC must be able to scan the entire table quickly.
- *
- * In summary, these must be very fast:
- * - adding or removing a segment
- * - adding references to a new segment
- * - converting an indirect reference back to an Object
- * These can be a little slower, but must still be pretty quick:
- * - adding references to a "mature" segment
- * - removing individual references
- * - scanning the entire table straight through
- *
- * If there's more than one segment, we don't guarantee that the table
- * will fill completely before we fail due to lack of space. We do ensure
- * that the current segment will pack tightly, which should satisfy JNI
- * requirements (e.g. EnsureLocalCapacity).
- *
- * To make everything fit nicely in 32-bit integers, the maximum size of
- * the table is capped at 64K.
- *
- * Only SynchronizedGet is synchronized.
- */
+// Maintain a table of indirect references. Used for local/global JNI references.
+//
+// The table contains object references, where the strong (local/global) references are part of the
+// GC root set (but not the weak global references). When an object is added we return an
+// IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
+// Conversions to and from indirect references are performed on upcalls and downcalls, so they need
+// to be very fast.
+//
+// To be efficient for JNI local variable storage, we need to provide operations that allow us to
+// operate on segments of the table, where segments are pushed and popped as if on a stack. For
+// example, deletion of an entry should only succeed if it appears in the current segment, and we
+// want to be able to strip off the current segment quickly when a method returns. Additions to the
+// table must be made in the current segment even if space is available in an earlier area.
+//
+// A new segment is created when we call into native code from interpreted code, or when we handle
+// the JNI PushLocalFrame function.
+//
+// The GC must be able to scan the entire table quickly.
+//
+// In summary, these must be very fast:
+// - adding or removing a segment
+// - adding references to a new segment
+// - converting an indirect reference back to an Object
+// These can be a little slower, but must still be pretty quick:
+// - adding references to a "mature" segment
+// - removing individual references
+// - scanning the entire table straight through
+//
+// If there's more than one segment, we don't guarantee that the table will fill completely before
+// we fail due to lack of space. We do ensure that the current segment will pack tightly, which
+// should satisfy JNI requirements (e.g. EnsureLocalCapacity).
+//
+// Only SynchronizedGet is synchronized.
-/*
- * Indirect reference definition. This must be interchangeable with JNI's
- * jobject, and it's convenient to let null be null, so we use void*.
- *
- * We need a 16-bit table index and a 2-bit reference type (global, local,
- * weak global). Real object pointers will have zeroes in the low 2 or 3
- * bits (4- or 8-byte alignment), so it's useful to put the ref type
- * in the low bits and reserve zero as an invalid value.
- *
- * The remaining 14 bits can be used to detect stale indirect references.
- * For example, if objects don't move, we can use a hash of the original
- * Object* to make sure the entry hasn't been re-used. (If the Object*
- * we find there doesn't match because of heap movement, we could do a
- * secondary check on the preserved hash value; this implies that creating
- * a global/local ref queries the hash value and forces it to be saved.)
- *
- * A more rigorous approach would be to put a serial number in the extra
- * bits, and keep a copy of the serial number in a parallel table. This is
- * easier when objects can move, but requires 2x the memory and additional
- * memory accesses on add/get. It will catch additional problems, e.g.:
- * create iref1 for obj, delete iref1, create iref2 for same obj, lookup
- * iref1. A pattern based on object bits will miss this.
- */
+// Indirect reference definition. This must be interchangeable with JNI's jobject, and it's
+// convenient to let null be null, so we use void*.
+//
+// We need a (potentially) large table index and a 2-bit reference type (global, local, weak
+// global). We also reserve some bits to be used to detect stale indirect references: we put a
+// serial number in the extra bits, and keep a copy of the serial number in the table. This requires
+// more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
+// additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
+// lookup iref1. A pattern based on object bits will miss this.
typedef void* IndirectRef;
-/*
- * Indirect reference kind, used as the two low bits of IndirectRef.
- *
- * For convenience these match up with enum jobjectRefType from jni.h.
- */
+// Indirect reference kind, used as the two low bits of IndirectRef.
+//
+// For convenience these match up with enum jobjectRefType from jni.h.
enum IndirectRefKind {
- kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
- kLocal = 1, // <<local reference>>
- kGlobal = 2, // <<global reference>>
- kWeakGlobal = 3 // <<weak global reference>>
+ kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
+ kLocal = 1, // <<local reference>>
+ kGlobal = 2, // <<global reference>>
+ kWeakGlobal = 3, // <<weak global reference>>
+ kLastKind = kWeakGlobal
};
std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
const char* GetIndirectRefKindString(const IndirectRefKind& kind);
-/*
- * Determine what kind of indirect reference this is.
- */
-static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
- return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03);
-}
+// Table definition.
+//
+// For the global reference table, the expected common operations are adding a new entry and
+// removing a recently-added entry (usually the most-recently-added entry). For JNI local
+// references, the common operations are adding a new entry and removing an entire table segment.
+//
+// If we delete entries from the middle of the list, we will be left with "holes". We track the
+// number of holes so that, when adding new elements, we can quickly decide to do a trivial append
+// or go slot-hunting.
+//
+// When the top-most entry is removed, any holes immediately below it are also removed. Thus,
+// deletion of an entry may reduce "top_index" by more than one.
+//
+// To get the desired behavior for JNI locals, we need to know the bottom and top of the current
+// "segment". The top is managed internally, and the bottom is passed in as a function argument.
+// When we call a native method or push a local frame, the current top index gets pushed on, and
+// serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
+// index, and the value stored in the previous frame becomes the new bottom.
+//
+// Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
+// number of holes, which restricts us to 16 bits for the top index. The value is cached within the
+// table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
+// adding and removing references needs to detect the change of a segment. Helper fields are used
+// for this detection.
+//
+// Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
+// Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
+// determining the type and deleting the reference are more expensive because the table must be
+// hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
+// the table when expanding it (so realloc() is out), and tricks like serial number checking to
+// detect stale references aren't possible (though we may be able to get similar benefits with other
+// approaches).
+//
+// TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
+// delete; must invalidate after segment pop might be worth only using it for JNI globals.
+//
+// TODO: may want completely different add/remove algorithms for global and local refs to improve
+// performance. A large circular buffer might reduce the amortized cost of adding global
+// references.
-/* use as initial value for "cookie", and when table has only one segment */
-static const uint32_t IRT_FIRST_SEGMENT = 0;
-
-/*
- * Table definition.
- *
- * For the global reference table, the expected common operations are
- * adding a new entry and removing a recently-added entry (usually the
- * most-recently-added entry). For JNI local references, the common
- * operations are adding a new entry and removing an entire table segment.
- *
- * If "alloc_entries_" is not equal to "max_entries_", the table may expand
- * when entries are added, which means the memory may move. If you want
- * to keep pointers into "table" rather than offsets, you must use a
- * fixed-size table.
- *
- * If we delete entries from the middle of the list, we will be left with
- * "holes". We track the number of holes so that, when adding new elements,
- * we can quickly decide to do a trivial append or go slot-hunting.
- *
- * When the top-most entry is removed, any holes immediately below it are
- * also removed. Thus, deletion of an entry may reduce "topIndex" by more
- * than one.
- *
- * To get the desired behavior for JNI locals, we need to know the bottom
- * and top of the current "segment". The top is managed internally, and
- * the bottom is passed in as a function argument. When we call a native method or
- * push a local frame, the current top index gets pushed on, and serves
- * as the new bottom. When we pop a frame off, the value from the stack
- * becomes the new top index, and the value stored in the previous frame
- * becomes the new bottom.
- *
- * To avoid having to re-scan the table after a pop, we want to push the
- * number of holes in the table onto the stack. Because of our 64K-entry
- * cap, we can combine the two into a single unsigned 32-bit value.
- * Instead of a "bottom" argument we take a "cookie", which includes the
- * bottom index and the count of holes below the bottom.
- *
- * Common alternative implementation: make IndirectRef a pointer to the
- * actual reference slot. Instead of getting a table and doing a lookup,
- * the lookup can be done instantly. Operations like determining the
- * type and deleting the reference are more expensive because the table
- * must be hunted for (i.e. you have to do a pointer comparison to see
- * which table it's in), you can't move the table when expanding it (so
- * realloc() is out), and tricks like serial number checking to detect
- * stale references aren't possible (though we may be able to get similar
- * benefits with other approaches).
- *
- * TODO: consider a "lastDeleteIndex" for quick hole-filling when an
- * add immediately follows a delete; must invalidate after segment pop
- * (which could increase the cost/complexity of method call/return).
- * Might be worth only using it for JNI globals.
- *
- * TODO: may want completely different add/remove algorithms for global
- * and local refs to improve performance. A large circular buffer might
- * reduce the amortized cost of adding global references.
- *
- */
-union IRTSegmentState {
- uint32_t all;
- struct {
- uint32_t topIndex:16; /* index of first unused entry */
- uint32_t numHoles:16; /* #of holes in entire table */
- } parts;
+// The state of the current segment. We only store the index. Splitting it for index and hole
+// count restricts the range too much.
+struct IRTSegmentState {
+ uint32_t top_index;
};
+// Use as initial value for "cookie", and when table has only one segment.
+static constexpr IRTSegmentState kIRTFirstSegment = { 0 };
+
// Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
// Contains multiple entries but only one active one, this helps us detect use after free errors
// since the serial stored in the indirect ref wont match.
-static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
+static constexpr size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
+
class IrtEntry {
public:
void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -208,6 +163,11 @@
return &references_[serial_];
}
+ const GcRoot<mirror::Object>* GetReference() const {
+ DCHECK_LT(serial_, kIRTPrevCount);
+ return &references_[serial_];
+ }
+
uint32_t GetSerial() const {
return serial_;
}
@@ -220,6 +180,7 @@
};
static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t),
"Unexpected sizeof(IrtEntry)");
+static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)");
class IrtIterator {
public:
@@ -257,14 +218,20 @@
class IndirectReferenceTable {
public:
- /*
- * WARNING: Construction of the IndirectReferenceTable may fail.
- * error_msg must not be null. If error_msg is set by the constructor, then
- * construction has failed and the IndirectReferenceTable will be in an
- * invalid state. Use IsValid to check whether the object is in an invalid
- * state.
- */
- IndirectReferenceTable(size_t max_count, IndirectRefKind kind, std::string* error_msg);
+ enum class ResizableCapacity {
+ kNo,
+ kYes
+ };
+
+ // WARNING: Construction of the IndirectReferenceTable may fail.
+ // error_msg must not be null. If error_msg is set by the constructor, then
+ // construction has failed and the IndirectReferenceTable will be in an
+ // invalid state. Use IsValid to check whether the object is in an invalid
+ // state.
+ IndirectReferenceTable(size_t max_count,
+ IndirectRefKind kind,
+ ResizableCapacity resizable,
+ std::string* error_msg);
~IndirectReferenceTable();
@@ -277,20 +244,14 @@
*/
bool IsValid() const;
- /*
- * Add a new entry. "obj" must be a valid non-nullptr object reference.
- *
- * Returns nullptr if the table is full (max entries reached, or alloc
- * failed during expansion).
- */
- IndirectRef Add(uint32_t cookie, ObjPtr<mirror::Object> obj)
+ // Add a new entry. "obj" must be a valid non-null object reference. This function will
+ // abort if the table is full (max entries reached, or expansion failed).
+ IndirectRef Add(IRTSegmentState previous_state, ObjPtr<mirror::Object> obj)
REQUIRES_SHARED(Locks::mutator_lock_);
- /*
- * Given an IndirectRef in the table, return the Object it refers to.
- *
- * Returns kInvalidIndirectRefObject if iref is invalid.
- */
+ // Given an IndirectRef in the table, return the Object it refers to.
+ //
+ // This function may abort under error conditions.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
ALWAYS_INLINE;
@@ -302,34 +263,26 @@
return Get<kReadBarrierOption>(iref);
}
- /*
- * Update an existing entry.
- *
- * Updates an existing indirect reference to point to a new object.
- */
+ // Updates an existing indirect reference to point to a new object.
void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
- /*
- * Remove an existing entry.
- *
- * If the entry is not between the current top index and the bottom index
- * specified by the cookie, we don't remove anything. This is the behavior
- * required by JNI's DeleteLocalRef function.
- *
- * Returns "false" if nothing was removed.
- */
- bool Remove(uint32_t cookie, IndirectRef iref);
+ // Remove an existing entry.
+ //
+ // If the entry is not between the current top index and the bottom index
+ // specified by the cookie, we don't remove anything. This is the behavior
+ // required by JNI's DeleteLocalRef function.
+ //
+ // Returns "false" if nothing was removed.
+ bool Remove(IRTSegmentState previous_state, IndirectRef iref);
void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
- /*
- * Return the #of entries in the entire table. This includes holes, and
- * so may be larger than the actual number of "live" entries.
- */
+ // Return the #of entries in the entire table. This includes holes, and
+ // so may be larger than the actual number of "live" entries.
size_t Capacity() const {
- return segment_state_.parts.topIndex;
+ return segment_state_.top_index;
}
// Note IrtIterator does not have a read barrier as it's used to visit roots.
@@ -344,13 +297,11 @@
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t GetSegmentState() const {
- return segment_state_.all;
+ IRTSegmentState GetSegmentState() const {
+ return segment_state_;
}
- void SetSegmentState(uint32_t new_state) {
- segment_state_.all = new_state;
- }
+ void SetSegmentState(IRTSegmentState new_state);
static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
// Note: Currently segment_state_ is at offset 0. We're testing the expected value in
@@ -362,32 +313,74 @@
// Release pages past the end of the table that may have previously held references.
void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
- private:
- // Extract the table index from an indirect reference.
- static uint32_t ExtractIndex(IndirectRef iref) {
- uintptr_t uref = reinterpret_cast<uintptr_t>(iref);
- return (uref >> 2) & 0xffff;
+ // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind.
+ ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
+ return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
}
- /*
- * The object pointer itself is subject to relocation in some GC
- * implementations, so we shouldn't really be using it here.
- */
- IndirectRef ToIndirectRef(uint32_t tableIndex) const {
- DCHECK_LT(tableIndex, 65536U);
- uint32_t serialChunk = table_[tableIndex].GetSerial();
- uintptr_t uref = (serialChunk << 20) | (tableIndex << 2) | kind_;
- return reinterpret_cast<IndirectRef>(uref);
+ private:
+ static constexpr size_t kSerialBits = MinimumBitsToStore(kIRTPrevCount);
+ static constexpr uint32_t kShiftedSerialMask = (1u << kSerialBits) - 1;
+
+ static constexpr size_t kKindBits = MinimumBitsToStore(
+ static_cast<uint32_t>(IndirectRefKind::kLastKind));
+ static constexpr uint32_t kKindMask = (1u << kKindBits) - 1;
+
+ static constexpr uintptr_t EncodeIndex(uint32_t table_index) {
+ static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size");
+ DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kSerialBits - kKindBits);
+ return (static_cast<uintptr_t>(table_index) << kKindBits << kSerialBits);
}
+ static constexpr uint32_t DecodeIndex(uintptr_t uref) {
+ return static_cast<uint32_t>((uref >> kKindBits) >> kSerialBits);
+ }
+
+ static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) {
+ return static_cast<uintptr_t>(kind);
+ }
+ static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) {
+ return static_cast<IndirectRefKind>(uref & kKindMask);
+ }
+
+ static constexpr uintptr_t EncodeSerial(uint32_t serial) {
+ DCHECK_LE(MinimumBitsToStore(serial), kSerialBits);
+ return serial << kKindBits;
+ }
+ static constexpr uint32_t DecodeSerial(uintptr_t uref) {
+ return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask;
+ }
+
+ constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const {
+ DCHECK_LT(table_index, max_entries_);
+ return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kind_);
+ }
+
+ static void ConstexprChecks();
+
+ // Extract the table index from an indirect reference.
+ ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) {
+ return DecodeIndex(reinterpret_cast<uintptr_t>(iref));
+ }
+
+ IndirectRef ToIndirectRef(uint32_t table_index) const {
+ DCHECK_LT(table_index, max_entries_);
+ uint32_t serial = table_[table_index].GetSerial();
+ return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
+ }
+
+ // Resize the backing table. Currently must be larger than the current size.
+ bool Resize(size_t new_size, std::string* error_msg);
+
+ void RecoverHoles(IRTSegmentState from);
// Abort if check_jni is not enabled. Otherwise, just log as an error.
static void AbortIfNoCheckJNI(const std::string& msg);
/* extra debugging checks */
bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_);
- bool CheckEntry(const char*, IndirectRef, int) const;
+ bool CheckEntry(const char*, IndirectRef, uint32_t) const;
- /* semi-public - read/write by jni down calls */
+ /// semi-public - read/write by jni down calls.
IRTSegmentState segment_state_;
// Mem map where we store the indirect refs.
@@ -395,10 +388,21 @@
// bottom of the stack. Do not directly access the object references
// in this as they are roots. Use Get() that has a read barrier.
IrtEntry* table_;
- /* bit mask, ORed into all irefs */
+ // bit mask, ORed into all irefs.
const IndirectRefKind kind_;
- /* max #of entries allowed */
- const size_t max_entries_;
+
+ // max #of entries allowed (modulo resizing).
+ size_t max_entries_;
+
+ // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
+ // file.
+ // TODO: Consider other data structures for compact tables, e.g., free lists.
+ size_t current_num_holes_;
+ IRTSegmentState last_known_previous_state_;
+
+ // Whether the table's capacity may be resized. As there are no locks used, it is the caller's
+ // responsibility to ensure thread-safety.
+ ResizableCapacity resizable_;
};
} // namespace art
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index d7026de..722b411 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -50,7 +50,10 @@
ScopedObjectAccess soa(Thread::Current());
static const size_t kTableMax = 20;
std::string error_msg;
- IndirectReferenceTable irt(kTableMax, kGlobal, &error_msg);
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
ASSERT_TRUE(irt.IsValid()) << error_msg;
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
@@ -65,7 +68,7 @@
Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
ASSERT_TRUE(obj3.Get() != nullptr);
- const uint32_t cookie = IRT_FIRST_SEGMENT;
+ const IRTSegmentState cookie = kIRTFirstSegment;
CheckDump(&irt, 0, 0);
@@ -257,4 +260,250 @@
CheckDump(&irt, 0, 0);
}
+TEST_F(IndirectReferenceTableTest, Holes) {
+ // Test the explicitly named cases from the IRT implementation:
+ //
+ // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
+ // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+ // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+ // reference
+ // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
+ // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+ // reference
+
+ ScopedObjectAccess soa(Thread::Current());
+ static const size_t kTableMax = 10;
+
+ mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ StackHandleScope<5> hs(soa.Self());
+ ASSERT_TRUE(c != nullptr);
+ Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj0.Get() != nullptr);
+ Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj1.Get() != nullptr);
+ Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj2.Get() != nullptr);
+ Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj3.Get() != nullptr);
+ Handle<mirror::Object> obj4 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj4.Get() != nullptr);
+
+ std::string error_msg;
+
+ // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference.
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+ IndirectRef iref1 = irt.Add(cookie0, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie0, obj2.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie0, iref1));
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref3 = irt.Add(cookie1, obj3.Get());
+
+ // Must not have filled the previous hole.
+ EXPECT_EQ(irt.Capacity(), 4u);
+ EXPECT_TRUE(irt.Get(iref1) == nullptr);
+ CheckDump(&irt, 3, 3);
+
+ UNUSED(iref0, iref1, iref2, iref3);
+ }
+
+ // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie1, obj2.Get());
+ IndirectRef iref3 = irt.Add(cookie1, obj3.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref2));
+
+ // Pop segment.
+ irt.SetSegmentState(cookie1);
+
+ IndirectRef iref4 = irt.Add(cookie1, obj4.Get());
+
+ EXPECT_EQ(irt.Capacity(), 2u);
+ EXPECT_TRUE(irt.Get(iref2) == nullptr);
+ CheckDump(&irt, 2, 2);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4);
+ }
+
+ // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
+ // reference.
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie1, obj2.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref1));
+
+ // New segment.
+ const IRTSegmentState cookie2 = irt.GetSegmentState();
+
+ IndirectRef iref3 = irt.Add(cookie2, obj3.Get());
+
+ // Pop segment.
+ irt.SetSegmentState(cookie2);
+
+ IndirectRef iref4 = irt.Add(cookie1, obj4.Get());
+
+ EXPECT_EQ(irt.Capacity(), 3u);
+ EXPECT_TRUE(irt.Get(iref1) == nullptr);
+ CheckDump(&irt, 3, 3);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4);
+ }
+
+ // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference.
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ EXPECT_TRUE(irt.Remove(cookie1, iref1));
+
+ // Emptied segment, push new one.
+ const IRTSegmentState cookie2 = irt.GetSegmentState();
+
+ IndirectRef iref2 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref3 = irt.Add(cookie1, obj2.Get());
+ IndirectRef iref4 = irt.Add(cookie1, obj3.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref3));
+
+ // Pop segment.
+ UNUSED(cookie2);
+ irt.SetSegmentState(cookie1);
+
+ IndirectRef iref5 = irt.Add(cookie1, obj4.Get());
+
+ EXPECT_EQ(irt.Capacity(), 2u);
+ EXPECT_TRUE(irt.Get(iref3) == nullptr);
+ CheckDump(&irt, 2, 2);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4, iref5);
+ }
+
+ // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
+ // reference
+ {
+ IndirectReferenceTable irt(kTableMax,
+ kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ const IRTSegmentState cookie0 = kIRTFirstSegment;
+
+ CheckDump(&irt, 0, 0);
+
+ IndirectRef iref0 = irt.Add(cookie0, obj0.Get());
+
+ // New segment.
+ const IRTSegmentState cookie1 = irt.GetSegmentState();
+
+ IndirectRef iref1 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref2 = irt.Add(cookie1, obj1.Get());
+ IndirectRef iref3 = irt.Add(cookie1, obj2.Get());
+
+ EXPECT_TRUE(irt.Remove(cookie1, iref2));
+
+ // Pop segment.
+ irt.SetSegmentState(cookie1);
+
+ // Push segment.
+ const IRTSegmentState cookie1_second = irt.GetSegmentState();
+ UNUSED(cookie1_second);
+
+ IndirectRef iref4 = irt.Add(cookie1, obj3.Get());
+
+ EXPECT_EQ(irt.Capacity(), 2u);
+ EXPECT_TRUE(irt.Get(iref3) == nullptr);
+ CheckDump(&irt, 2, 2);
+
+ UNUSED(iref0, iref1, iref2, iref3, iref4);
+ }
+}
+
+TEST_F(IndirectReferenceTableTest, Resize) {
+ ScopedObjectAccess soa(Thread::Current());
+ static const size_t kTableMax = 512;
+
+ mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ StackHandleScope<1> hs(soa.Self());
+ ASSERT_TRUE(c != nullptr);
+ Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj0.Get() != nullptr);
+
+ std::string error_msg;
+ IndirectReferenceTable irt(kTableMax,
+ kLocal,
+ IndirectReferenceTable::ResizableCapacity::kYes,
+ &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
+
+ CheckDump(&irt, 0, 0);
+ const IRTSegmentState cookie = kIRTFirstSegment;
+
+ for (size_t i = 0; i != kTableMax + 1; ++i) {
+ irt.Add(cookie, obj0.Get());
+ }
+
+ EXPECT_EQ(irt.Capacity(), kTableMax + 1);
+}
+
} // namespace art
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index b71236b..72722dd 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -795,7 +795,7 @@
self, new_shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
if (!PerformArgumentConversions<is_range>(self, callsite_type, target_type,
shadow_frame, vregC, first_dest_reg,
- arg, new_shadow_frame, result)) {
+ arg, new_shadow_frame)) {
DCHECK(self->IsExceptionPending());
result->SetL(0);
return false;
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 9b4327f..a1ed470 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -422,10 +422,13 @@
tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
|| VLOG_IS_ON(third_party_jni)),
trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
- globals_(kGlobalsMax, kGlobal, error_msg),
+ globals_(kGlobalsMax, kGlobal, IndirectReferenceTable::ResizableCapacity::kNo, error_msg),
libraries_(new Libraries),
unchecked_functions_(&gJniInvokeInterface),
- weak_globals_(kWeakGlobalsMax, kWeakGlobal, error_msg),
+ weak_globals_(kWeakGlobalsMax,
+ kWeakGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ error_msg),
allow_accessing_weak_globals_(true),
weak_globals_add_condition_("weak globals add condition",
(CHECK(Locks::jni_weak_globals_lock_ != nullptr),
@@ -551,7 +554,7 @@
return nullptr;
}
WriterMutexLock mu(self, *Locks::jni_globals_lock_);
- IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
+ IndirectRef ref = globals_.Add(kIRTFirstSegment, obj);
return reinterpret_cast<jobject>(ref);
}
@@ -563,7 +566,7 @@
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
- IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
+ IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj);
return reinterpret_cast<jweak>(ref);
}
@@ -572,7 +575,7 @@
return;
}
WriterMutexLock mu(self, *Locks::jni_globals_lock_);
- if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ if (!globals_.Remove(kIRTFirstSegment, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
<< "failed to find entry";
}
@@ -583,7 +586,7 @@
return;
}
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
- if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ if (!weak_globals_.Remove(kIRTFirstSegment, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
<< "failed to find entry";
}
@@ -680,7 +683,7 @@
// This only applies in the case where MayAccessWeakGlobals goes from false to true. In the other
// case, it may be racy, this is benign since DecodeWeakGlobalLocked does the correct behavior
// if MayAccessWeakGlobals is false.
- DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
+ DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
return weak_globals_.SynchronizedGet(ref);
}
@@ -699,7 +702,7 @@
}
ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, IndirectRef ref) {
- DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
+ DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
DCHECK(Runtime::Current()->IsShuttingDown(self));
if (self != nullptr) {
return DecodeWeakGlobal(self, ref);
@@ -712,7 +715,7 @@
}
bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
- DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
+ DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 8eca8fc..342e0d2 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -68,8 +68,8 @@
JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in, std::string* error_msg)
: self(self_in),
vm(vm_in),
- local_ref_cookie(IRT_FIRST_SEGMENT),
- locals(kLocalsInitial, kLocal, error_msg),
+ local_ref_cookie(kIRTFirstSegment),
+ locals(kLocalsInitial, kLocal, IndirectReferenceTable::ResizableCapacity::kYes, error_msg),
check_jni(false),
runtime_deleted(false),
critical(0),
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index e89debb..5cca0ae 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -64,7 +64,7 @@
JavaVMExt* const vm;
// Cookie used when using the local indirect reference table.
- uint32_t local_ref_cookie;
+ IRTSegmentState local_ref_cookie;
// JNI local references.
IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
@@ -72,7 +72,7 @@
// Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
// TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
// to a native method.
- std::vector<uint32_t> stacked_local_ref_cookies;
+ std::vector<IRTSegmentState> stacked_local_ref_cookies;
// Frequently-accessed fields cached from JavaVM.
bool check_jni;
@@ -131,7 +131,7 @@
private:
JNIEnvExt* const env_;
- uint32_t saved_local_ref_cookie_;
+ IRTSegmentState saved_local_ref_cookie_;
DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
};
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 3839e08..0217a67 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2374,7 +2374,7 @@
// Do we definitely know what kind of reference this is?
IndirectRef ref = reinterpret_cast<IndirectRef>(java_object);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
switch (kind) {
case kLocal:
return JNILocalRefType;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 9479a18..e990935 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -2308,21 +2308,25 @@
// by modifying memory.
// The parameters don't really matter here.
std::string error_msg;
- IndirectReferenceTable irt(5, IndirectRefKind::kGlobal, &error_msg);
+ IndirectReferenceTable irt(5,
+ IndirectRefKind::kGlobal,
+ IndirectReferenceTable::ResizableCapacity::kNo,
+ &error_msg);
ASSERT_TRUE(irt.IsValid()) << error_msg;
- uint32_t old_state = irt.GetSegmentState();
+ IRTSegmentState old_state = irt.GetSegmentState();
// Write some new state directly. We invert parts of old_state to ensure a new value.
- uint32_t new_state = old_state ^ 0x07705005;
- ASSERT_NE(old_state, new_state);
+ IRTSegmentState new_state;
+ new_state.top_index = old_state.top_index ^ 0x07705005;
+ ASSERT_NE(old_state.top_index, new_state.top_index);
uint8_t* base = reinterpret_cast<uint8_t*>(&irt);
int32_t segment_state_offset =
IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
- *reinterpret_cast<uint32_t*>(base + segment_state_offset) = new_state;
+ *reinterpret_cast<IRTSegmentState*>(base + segment_state_offset) = new_state;
// Read and compare.
- EXPECT_EQ(new_state, irt.GetSegmentState());
+ EXPECT_EQ(new_state.top_index, irt.GetSegmentState().top_index);
}
// Test the offset computation of JNIEnvExt offsets. b/26071368.
diff --git a/runtime/method_handles-inl.h b/runtime/method_handles-inl.h
index 5f9824c..7a77bda 100644
--- a/runtime/method_handles-inl.h
+++ b/runtime/method_handles-inl.h
@@ -97,6 +97,70 @@
size_t arg_index_;
};
+REQUIRES_SHARED(Locks::mutator_lock_)
+bool ConvertJValue(Handle<mirror::Class> from,
+ Handle<mirror::Class> to,
+ const JValue& from_value,
+ JValue* to_value) {
+ const Primitive::Type from_type = from->GetPrimitiveType();
+ const Primitive::Type to_type = to->GetPrimitiveType();
+
+ // This method must be called only when the types don't match.
+ DCHECK(from.Get() != to.Get());
+
+ if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
+ // Throws a ClassCastException if we're unable to convert a primitive value.
+ return ConvertPrimitiveValue(false, from_type, to_type, from_value, to_value);
+ } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
+ // They're both reference types. If "from" is null, we can pass it
+ // through unchanged. If not, we must generate a cast exception if
+ // |to| is not assignable from the dynamic type of |ref|.
+ mirror::Object* const ref = from_value.GetL();
+ if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
+ to_value->SetL(ref);
+ return true;
+ } else {
+ ThrowClassCastException(to.Get(), ref->GetClass());
+ return false;
+ }
+ } else {
+ // Precisely one of the source or the destination are reference types.
+ // We must box or unbox.
+ if (to_type == Primitive::kPrimNot) {
+ // The target type is a reference, we must box.
+ Primitive::Type type;
+ // TODO(narayan): This is a CHECK for now. There might be a few corner cases
+ // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
+ // we will need to box this "naturally".
+ CHECK(GetPrimitiveType(to.Get(), &type));
+ // First perform a primitive conversion to the unboxed equivalent of the target,
+ // if necessary. This should be for the rarer cases like (int->Long) etc.
+ if (UNLIKELY(from_type != type)) {
+ if (!ConvertPrimitiveValue(false, from_type, type, from_value, to_value)) {
+ return false;
+ }
+ } else {
+ *to_value = from_value;
+ }
+
+ // Then perform the actual boxing, and then set the reference.
+ ObjPtr<mirror::Object> boxed = BoxPrimitive(type, from_value);
+ to_value->SetL(boxed.Ptr());
+ return true;
+ } else {
+ // The target type is a primitive, we must unbox.
+ ObjPtr<mirror::Object> ref(from_value.GetL());
+
+ // Note that UnboxPrimitiveForResult already performs all of the type
+ // conversions that we want, based on |to|.
+ JValue unboxed_value;
+ return UnboxPrimitiveForResult(ref, to.Get(), to_value);
+ }
+ }
+
+ return true;
+}
+
template <bool is_range>
bool PerformArgumentConversions(Thread* self,
Handle<mirror::MethodType> callsite_type,
@@ -105,8 +169,7 @@
uint32_t first_src_reg,
uint32_t first_dest_reg,
const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- ShadowFrame* callee_frame,
- JValue* result) {
+ ShadowFrame* callee_frame) {
StackHandleScope<4> hs(self);
Handle<mirror::ObjectArray<mirror::Class>> from_types(hs.NewHandle(callsite_type->GetPTypes()));
Handle<mirror::ObjectArray<mirror::Class>> to_types(hs.NewHandle(callee_type->GetPTypes()));
@@ -114,7 +177,6 @@
const int32_t num_method_params = from_types->GetLength();
if (to_types->GetLength() != num_method_params) {
ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
- result->SetJ(0);
return false;
}
@@ -149,106 +211,33 @@
}
continue;
- } else if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
- // They are both primitive types - we should perform any widening or
- // narrowing conversions as applicable.
+ } else {
JValue from_value;
JValue to_value;
if (Primitive::Is64BitType(from_type)) {
from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
+ } else if (from_type == Primitive::kPrimNot) {
+ from_value.SetL(caller_frame.GetVRegReference(input_args.Next()));
} else {
from_value.SetI(caller_frame.GetVReg(input_args.Next()));
}
- // Throws a ClassCastException if we're unable to convert a primitive value.
- if (!ConvertPrimitiveValue(false, from_type, to_type, from_value, &to_value)) {
+ if (!ConvertJValue(from, to, from_value, &to_value)) {
DCHECK(self->IsExceptionPending());
- result->SetL(0);
return false;
}
if (Primitive::Is64BitType(to_type)) {
callee_frame->SetVRegLong(first_dest_reg + to_arg_index, to_value.GetJ());
to_arg_index += 2;
+ } else if (to_type == Primitive::kPrimNot) {
+ callee_frame->SetVRegReference(first_dest_reg + to_arg_index, to_value.GetL());
+ ++to_arg_index;
} else {
callee_frame->SetVReg(first_dest_reg + to_arg_index, to_value.GetI());
++to_arg_index;
}
- } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
- // They're both reference types. If "from" is null, we can pass it
- // through unchanged. If not, we must generate a cast exception if
- // |to| is not assignable from the dynamic type of |ref|.
- const size_t next_arg_reg = input_args.Next();
- mirror::Object* const ref = caller_frame.GetVRegReference(next_arg_reg);
- if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
- interpreter::AssignRegister(callee_frame,
- caller_frame,
- first_dest_reg + to_arg_index,
- next_arg_reg);
- ++to_arg_index;
- } else {
- ThrowClassCastException(to.Get(), ref->GetClass());
- result->SetL(0);
- return false;
- }
- } else {
- // Precisely one of the source or the destination are reference types.
- // We must box or unbox.
- if (to_type == Primitive::kPrimNot) {
- // The target type is a reference, we must box.
- Primitive::Type type;
- // TODO(narayan): This is a CHECK for now. There might be a few corner cases
- // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
- // we will need to box this "naturally".
- CHECK(GetPrimitiveType(to.Get(), &type));
-
- JValue from_value;
- JValue to_value;
-
- if (Primitive::Is64BitType(from_type)) {
- from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
- } else {
- from_value.SetI(caller_frame.GetVReg(input_args.Next()));
- }
-
- // First perform a primitive conversion to the unboxed equivalent of the target,
- // if necessary. This should be for the rarer cases like (int->Long) etc.
- if (UNLIKELY(from_type != type)) {
- if (!ConvertPrimitiveValue(false, from_type, type, from_value, &to_value)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
- }
- } else {
- to_value = from_value;
- }
-
- // Then perform the actual boxing, and then set the reference.
- ObjPtr<mirror::Object> boxed = BoxPrimitive(type, to_value);
- callee_frame->SetVRegReference(first_dest_reg + to_arg_index, boxed.Ptr());
- ++to_arg_index;
- } else {
- // The target type is a primitive, we must unbox.
- ObjPtr<mirror::Object> ref(caller_frame.GetVRegReference(input_args.Next()));
-
- // Note that UnboxPrimitiveForResult already performs all of the type
- // conversions that we want, based on |to|.
- JValue unboxed_value;
- if (!UnboxPrimitiveForResult(ref, to.Get(), &unboxed_value)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
- }
-
- if (Primitive::Is64BitType(to_type)) {
- callee_frame->SetVRegLong(first_dest_reg + to_arg_index, unboxed_value.GetJ());
- to_arg_index += 2;
- } else {
- callee_frame->SetVReg(first_dest_reg + to_arg_index, unboxed_value.GetI());
- ++to_arg_index;
- }
- }
}
}
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index a36b66d..26a29b3 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -55,6 +55,14 @@
return handle_kind <= kLastInvokeKind;
}
+// Performs a single argument conversion from type |from| to a distinct
+// type |to|. Returns true on success, false otherwise.
+REQUIRES_SHARED(Locks::mutator_lock_)
+bool ConvertJValue(Handle<mirror::Class> from,
+ Handle<mirror::Class> to,
+ const JValue& from_value,
+ JValue* to_value) ALWAYS_INLINE;
+
// Perform argument conversions between |callsite_type| (the type of the
// incoming arguments) and |callee_type| (the type of the method being
// invoked). These include widening and narrowing conversions as well as
@@ -68,8 +76,7 @@
uint32_t first_src_reg,
uint32_t first_dest_reg,
const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- ShadowFrame* callee_frame,
- JValue* result);
+ ShadowFrame* callee_frame);
} // namespace art
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b99bcb5..63a0e14 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -301,6 +301,10 @@
// error and sets found to false.
static OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found);
+ VdexFile* GetVdexFile() const {
+ return vdex_.get();
+ }
+
protected:
OatFile(const std::string& filename, bool executable);
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 661012c..f88309b 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -911,7 +911,7 @@
// Will need to be fixed if there's cases where it's not.
void UpdateReference(Thread* self, jobject obj, ObjPtr<mirror::Object> result) {
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
if (kind == kLocal) {
self->GetJniEnv()->locals.Update(obj, result);
} else if (kind == kHandleScopeOrInvalid) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e47ccc0..ace5e67 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1860,7 +1860,7 @@
return nullptr;
}
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
ObjPtr<mirror::Object> result;
bool expect_null = false;
// The "kinds" below are sorted by the frequency we expect to encounter them.
@@ -1902,7 +1902,7 @@
bool Thread::IsJWeakCleared(jweak obj) const {
CHECK(obj != nullptr);
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
- IndirectRefKind kind = GetIndirectRefKind(ref);
+ IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref);
CHECK_EQ(kind, kWeakGlobal);
return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref);
}
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 5ca7684..c7875b5 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -70,10 +70,7 @@
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
- // App image patching relies on having enough room for a forwarding pointer in the types array.
- // See FixupArtMethodArrayVisitor and ClassLinker::AddImageSpace.
- return std::max(ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements),
- static_cast<size_t>(pointer_size_));
+ return ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements);
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
@@ -85,8 +82,7 @@
}
inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
- // App image patching relies on having enough room for a forwarding pointer in the methods array.
- return std::max(ArraySize(pointer_size_, num_elements), static_cast<size_t>(pointer_size_));
+ return ArraySize(pointer_size_, num_elements);
}
inline size_t DexCacheArraysLayout::MethodsAlignment() const {
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 9fbf875..b3dab58 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -34,9 +34,12 @@
return (memcmp(version_, kVdexVersion, sizeof(kVdexVersion)) == 0);
}
-VdexFile::Header::Header(uint32_t dex_size, uint32_t verifier_deps_size)
+VdexFile::Header::Header(uint32_t dex_size,
+ uint32_t verifier_deps_size,
+ uint32_t quickening_info_size)
: dex_size_(dex_size),
- verifier_deps_size_(verifier_deps_size) {
+ verifier_deps_size_(verifier_deps_size),
+ quickening_info_size_(quickening_info_size) {
memcpy(magic_, kVdexMagic, sizeof(kVdexMagic));
memcpy(version_, kVdexVersion, sizeof(kVdexVersion));
DCHECK(IsMagicValid());
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 6bea153..28f9bb3 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -42,13 +42,14 @@
public:
struct Header {
public:
- Header(uint32_t dex_size, uint32_t verifier_deps_size);
+ Header(uint32_t dex_size, uint32_t verifier_deps_size, uint32_t quickening_info_size);
bool IsMagicValid() const;
bool IsVersionValid() const;
uint32_t GetDexSize() const { return dex_size_; }
uint32_t GetVerifierDepsSize() const { return verifier_deps_size_; }
+ uint32_t GetQuickeningInfoSize() const { return quickening_info_size_; }
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
@@ -58,6 +59,7 @@
uint8_t version_[4];
uint32_t dex_size_;
uint32_t verifier_deps_size_;
+ uint32_t quickening_info_size_;
};
static VdexFile* Open(const std::string& vdex_filename,
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index ff6ccd4..3c053cf 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -284,29 +284,28 @@
/// CHECK-START: java.lang.String Main.$noinline$getNonBootImageString() sharpening (before)
/// CHECK: LoadString load_kind:DexCacheViaMethod
- // FIXME: Disabled because of BSS root visiting issues. Bug: 32124939
- // CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (after)
- // CHECK-DAG: X86ComputeBaseMethodAddress
- // CHECK-DAG: LoadString load_kind:BssEntry
+ /// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (after)
+ /// CHECK-DAG: X86ComputeBaseMethodAddress
+ /// CHECK-DAG: LoadString load_kind:BssEntry
- // CHECK-START-X86_64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-X86_64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-ARM: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
- // CHECK: LoadString load_kind:BssEntry
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:BssEntry
- // CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
- // CHECK-DAG: MipsComputeBaseMethodAddress
- // CHECK-DAG: LoadString load_kind:BssEntry
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
+ /// CHECK-DAG: MipsComputeBaseMethodAddress
+ /// CHECK-DAG: LoadString load_kind:BssEntry
public static String $noinline$getNonBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 5ef66d1..12e0338 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -45,16 +45,6 @@
fi
done
-# Workaround for repo incompatibilities on the Chromium buildbot.
-# TODO: Remove this workaround once https://bugs.chromium.org/p/chromium/issues/detail?id=646329
-# is addressed.
-repo=$(which repo)
-if [[ $repo == *"depot_tools"* ]]; then
- ln -s build/soong/root.bp Android.bp
- ln -s build/soong/bootstrap.bash bootstrap.bash
- echo "include build/core/main.mk" > Makefile
-fi
-
if [[ $mode == "host" ]]; then
make_command="make $j_arg $showcommands build-art-host-tests $common_targets"
make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "